summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity-as39358
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-swap10
-rw-r--r--Documentation/ABI/testing/sysfs-power2
-rw-r--r--Documentation/core-api/kernel-api.rst14
-rw-r--r--Documentation/core-api/workqueue.rst12
-rw-r--r--Documentation/cpu-freq/index.txt2
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/as3935.txt5
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt6
-rw-r--r--Documentation/devicetree/bindings/leds/ams,as3645a.txt28
-rw-r--r--Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt12
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pp2.txt10
-rw-r--r--Documentation/devicetree/bindings/net/rockchip-dwmac.txt1
-rw-r--r--Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt28
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt2
-rw-r--r--Documentation/driver-model/driver.txt7
-rw-r--r--Documentation/filesystems/overlayfs.txt5
-rw-r--r--Documentation/filesystems/sysfs.txt3
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/kbuild/makefiles.txt31
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst147
-rw-r--r--MAINTAINERS45
-rw-r--r--Makefile22
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/mmu_context.h1
-rw-r--r--arch/alpha/kernel/sys_alcor.c4
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c12
-rw-r--r--arch/alpha/kernel/sys_dp264.c20
-rw-r--r--arch/alpha/kernel/sys_eb64p.c4
-rw-r--r--arch/alpha/kernel/sys_eiger.c4
-rw-r--r--arch/alpha/kernel/sys_miata.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c4
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c6
-rw-r--r--arch/alpha/kernel/sys_rawhide.c4
-rw-r--r--arch/alpha/kernel/sys_ruffian.c6
-rw-r--r--arch/alpha/kernel/sys_rx164.c4
-rw-r--r--arch/alpha/kernel/sys_sable.c10
-rw-r--r--arch/alpha/kernel/sys_sio.c8
-rw-r--r--arch/alpha/kernel/sys_sx164.c4
-rw-r--r--arch/alpha/kernel/sys_takara.c6
-rw-r--r--arch/alpha/kernel/sys_wildfire.c4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi9
-rw-r--r--arch/arc/boot/dts/hsdk.dts33
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/hsdk_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h11
-rw-r--r--arch/arc/kernel/setup.c32
-rw-r--r--arch/arc/kernel/smp.c5
-rw-r--r--arch/arc/plat-axs10x/axs10x.c7
-rw-r--r--arch/arc/plat-hsdk/Kconfig4
-rw-r--r--arch/arc/plat-hsdk/platform.c52
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S4
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts6
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts19
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts16
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts9
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts5
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi7
-rw-r--r--arch/arm/boot/dts/da850-evm.dts7
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/gemini.dtsi3
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi8
-rw-r--r--arch/arm/boot/dts/moxart.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts11
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi10
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi1
-rw-r--r--arch/arm/boot/dts/stm32429i-eval.dts5
-rw-r--r--arch/arm/boot/dts/stm32f4-pinctrl.dtsi343
-rw-r--r--arch/arm/boot/dts/stm32f429-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f429-pinctrl.dtsi95
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi297
-rw-r--r--arch/arm/boot/dts/stm32f469-disco.dts1
-rw-r--r--arch/arm/boot/dts/stm32f469-pinctrl.dtsi96
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi16
-rw-r--r--arch/arm/configs/gemini_defconfig3
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/kernel/debug.S8
-rw-r--r--arch/arm/mach-at91/pm.c4
-rw-r--r--arch/arm/mach-omap2/hsmmc.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c1
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts19
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts9
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts13
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi39
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi74
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/include/asm/memory.h9
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/m32r/Kconfig4
-rw-r--r--arch/m32r/kernel/traps.c9
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/kernel/dma.c2
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/loongson32/common/platform.c38
-rw-r--r--arch/mips/math-emu/cp1emu.c2
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rwxr-xr-xarch/mips/tools/generic-board-config.sh6
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S24
-rw-r--r--arch/powerpc/kernel/mce_power.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c13
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S45
-rw-r--r--arch/powerpc/kernel/watchdog.c30
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S27
-rw-r--r--arch/powerpc/kvm/book3s_xive.c5
-rw-r--r--arch/powerpc/kvm/book3s_xive.h1
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/perf/imc-pmu.c39
-rw-r--r--arch/powerpc/platforms/powernv/setup.c10
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c17
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c8
-rw-r--r--arch/powerpc/sysdev/xive/common.c8
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c4
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7264.h4
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h4
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7722.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h8
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/um/kernel/time.c4
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S4
-rw-r--r--arch/x86/entry/entry_32.S4
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/events/intel/bts.c6
-rw-r--r--arch/x86/events/intel/core.c11
-rw-r--r--arch/x86/events/intel/cstate.c4
-rw-r--r--arch/x86/events/intel/rapl.c3
-rw-r--r--arch/x86/events/intel/uncore.c12
-rw-r--r--arch/x86/events/intel/uncore_snbep.c4
-rw-r--r--arch/x86/events/msr.c8
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/hyperv/mmu.c57
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/alternative-asm.h4
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/asm.h8
-rw-r--r--arch/x86/include/asm/fpu/internal.h90
-rw-r--r--arch/x86/include/asm/fpu/types.h32
-rw-r--r--arch/x86/include/asm/fpu/xstate.h12
-rw-r--r--arch/x86/include/asm/kvm_para.h4
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mmu_context.h8
-rw-r--r--arch/x86/include/asm/mshyperv.h1
-rw-r--r--arch/x86/include/asm/thread_info.h11
-rw-r--r--arch/x86/include/asm/tlbflush.h33
-rw-r--r--arch/x86/include/asm/trace/fpu.h11
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/kernel/amd_nb.c41
-rw-r--r--arch/x86/kernel/apic/apic.c15
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c27
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c19
-rw-r--r--arch/x86/kernel/fpu/core.c155
-rw-r--r--arch/x86/kernel/fpu/init.c2
-rw-r--r--arch/x86/kernel/fpu/regset.c48
-rw-r--r--arch/x86/kernel/fpu/signal.c37
-rw-r--r--arch/x86/kernel/fpu/xstate.c264
-rw-r--r--arch/x86/kernel/head32.c5
-rw-r--r--arch/x86/kernel/irq_32.c6
-rw-r--r--arch/x86/kernel/kprobes/common.h13
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/ksysfs.c2
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/signal.c6
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/unwind_frame.c38
-rw-r--r--arch/x86/kernel/unwind_orc.c29
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/mmu.c17
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/vmx.c208
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/math-emu/fpu_entry.c2
-rw-r--r--arch/x86/mm/Makefile11
-rw-r--r--arch/x86/mm/extable.c24
-rw-r--r--arch/x86/mm/fault.c56
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/pkeys.c3
-rw-r--r--arch/x86/mm/tlb.c107
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/mmu_pv.c13
-rw-r--r--block/bio.c26
-rw-r--r--block/blk-core.c3
-rw-r--r--block/blk-mq-debugfs.c6
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/bsg-lib.c28
-rw-r--r--block/partition-generic.c2
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c3
-rw-r--r--crypto/shash.c10
-rw-r--r--crypto/skcipher.c17
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/acpi/apei/ghes.c16
-rw-r--r--drivers/acpi/arm64/iort.c35
-rw-r--r--drivers/acpi/property.c29
-rw-r--r--drivers/android/binder.c112
-rw-r--r--drivers/android/binder_alloc.c42
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/auxdisplay/charlcd.c11
-rw-r--r--drivers/auxdisplay/panel.c11
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/node.c12
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/opp/core.c7
-rw-r--r--drivers/base/property.c19
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/loop.h6
-rw-r--r--drivers/block/nbd.c27
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/block/virtio_blk.c12
-rw-r--r--drivers/block/zram/zram_drv.c36
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/clk/clk-bulk.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c15
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/clocksource/numachip.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c15
-rw-r--r--drivers/dma-buf/sync_file.c17
-rw-r--r--drivers/dma/altera-msgdma.c41
-rw-r--r--drivers/dma/edma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c3
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpio-omap.c24
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_color.c16
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c19
-rw-r--r--drivers/gpu/drm/tegra/trace.h2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c7
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-elecom.c13
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c110
-rw-r--r--drivers/hv/channel.c6
-rw-r--r--drivers/hv/channel_mgmt.c46
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/da9052-hwmon.c5
-rw-r--r--drivers/hwmon/tmp102.c13
-rw-r--r--drivers/hwmon/xgene-hwmon.c19
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c162
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-scan-pci.c13
-rw-r--r--drivers/ide/setup-pci.c63
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7793.c4
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c28
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c45
-rw-r--r--drivers/iio/adc/mcp320x.c25
-rw-r--r--drivers/iio/adc/stm32-adc.c2
-rw-r--r--drivers/iio/adc/ti-ads1015.c8
-rw-r--r--drivers/iio/adc/twl4030-madc.c14
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c11
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c1
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c2
-rw-r--r--drivers/iio/pressure/zpa2326.c10
-rw-r--r--drivers/iio/proximity/as3935.c43
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c4
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/core/netlink.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c101
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c20
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c41
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c50
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c14
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c47
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c30
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/input/input.c84
-rw-r--r--drivers/input/joydev.c70
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c29
-rw-r--r--drivers/input/misc/axp20x-pek.c2
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/rmi4/rmi_f30.c5
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/input/touchscreen/goodix.c67
-rw-r--r--drivers/input/touchscreen/stmfts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/iommu/amd_iommu.c11
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c2
-rw-r--r--drivers/iommu/mtk_iommu.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c43
-rw-r--r--drivers/irqchip/irq-mips-gic.c13
-rw-r--r--drivers/irqchip/irq-tango.c2
-rw-r--r--drivers/leds/leds-as3645a.c29
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-raid.c13
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c72
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/cec-adap.c13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c50
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c3
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h2
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/media/tuners/mt2060.c59
-rw-r--r--drivers/misc/cxl/cxllib.c13
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c23
-rw-r--r--drivers/misc/mei/pci-txe.c30
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c36
-rw-r--r--drivers/mmc/core/queue.c125
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c26
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c17
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c17
-rw-r--r--drivers/mmc/host/sdhci-xenon.c24
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c60
-rw-r--r--drivers/mtd/mtdpart.c8
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/sun4i_can.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/can/usb/kvaser_usb.c9
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c182
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c72
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c180
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/marvell.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/tap.c25
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/cdc_ether.c48
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/wan/lapbether.c1
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c122
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/fabrics.c18
-rw-r--r--drivers/nvme/host/fc.c58
-rw-r--r--drivers/nvme/host/pci.c48
-rw-r--r--drivers/nvme/host/rdma.c36
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/fabrics-cmd.c9
-rw-r--r--drivers/nvme/target/fc.c24
-rw-r--r--drivers/nvme/target/fcloop.c104
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/of/base.c8
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/pci/host/pci-aardvark.c2
-rw-r--r--drivers/pci/host/pci-tegra.c22
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c18
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c82
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/pinctrl/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c14
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c10
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c115
-rw-r--r--drivers/rapidio/devices/tsi721.c7
-rw-r--r--drivers/rapidio/rio-access.c40
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-hsdk.c (renamed from drivers/reset/reset-hsdk-v1.c)44
-rw-r--r--drivers/reset/reset-socfpga.c17
-rw-r--r--drivers/rpmsg/qcom_glink_native.c14
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/aacraid/aachba.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/linit.c27
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c17
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c18
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/spi/spi-armada-3700.c145
-rw-r--r--drivers/spi/spi-bcm-qspi.c9
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/iio/adc/ad7192.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c2
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c8
-rw-r--r--drivers/staging/pi433/rf69.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/rtw_proc.c2
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c4
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c13
-rw-r--r--drivers/staging/speakup/main.c15
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c19
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/tty/mxser.c16
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c40
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/tty/tty_ldisc.c11
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/config.c22
-rw-r--r--drivers/usb/core/devio.c5
-rw-r--r--drivers/usb/core/hub.c13
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/configfs.c15
-rw-r--r--drivers/usb/gadget/configfs.h11
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h14
-rw-r--r--drivers/usb/gadget/function/f_printer.c7
-rw-r--r--drivers/usb/gadget/function/f_rndis.c12
-rw-r--r--drivers/usb/gadget/function/u_fs.h1
-rw-r--r--drivers/usb/gadget/function/u_rndis.h1
-rw-r--r--drivers/usb/gadget/legacy/inode.c46
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c26
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c74
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c19
-rw-r--r--drivers/usb/host/pci-quirks.c10
-rw-r--r--drivers/usb/host/xhci-hub.c37
-rw-r--r--drivers/usb/host/xhci-pci.c12
-rw-r--r--drivers/usb/host/xhci-plat.c16
-rw-r--r--drivers/usb/host/xhci-ring.c21
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/misc/usbtest.c10
-rw-r--r--drivers/usb/musb/musb_core.c21
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c94
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c23
-rw-r--r--drivers/usb/serial/console.c3
-rw-r--r--drivers/usb/serial/cp210x.c13
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--drivers/usb/storage/transport.c14
-rw-r--r--drivers/usb/storage/uas-detect.h15
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/uwbd.c12
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-balloon.c19
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c11
-rw-r--r--fs/9p/vfs_addr.c10
-rw-r--r--fs/binfmt_misc.c56
-rw-r--r--fs/binfmt_script.c17
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c9
-rw-r--r--fs/btrfs/extent_io.c10
-rw-r--r--fs/btrfs/inode.c27
-rw-r--r--fs/btrfs/ioctl.c12
-rw-r--r--fs/btrfs/qgroup.c6
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/mds_client.c11
-rw-r--r--fs/ceph/snap.c8
-rw-r--r--fs/cifs/Kconfig5
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c31
-rw-r--r--fs/cifs/smb2pdu.c33
-rw-r--r--fs/cifs/smb2pdu.h5
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2transport.c26
-rw-r--r--fs/crypto/keyinfo.c5
-rw-r--r--fs/direct-io.c75
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/exec.c3
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/segment.c6
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/iomap.c54
-rw-r--r--fs/isofs/inode.c2
-rw-r--r--fs/mpage.c14
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/filelayout/filelayout.c3
-rw-r--r--fs/nfs/nfs4idmap.c2
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/nfs/nfs4xdr.c4
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/dir.c20
-rw-r--r--fs/overlayfs/inode.c20
-rw-r--r--fs/overlayfs/namei.c33
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/ovl_entry.h3
-rw-r--r--fs/overlayfs/readdir.c17
-rw-r--r--fs/overlayfs/super.c30
-rw-r--r--fs/overlayfs/util.c24
-rw-r--r--fs/proc/array.c35
-rw-r--r--fs/quota/dquot.c29
-rw-r--r--fs/quota/quota_v2.c4
-rw-r--r--fs/read_write.c4
-rw-r--r--fs/userfaultfd.c66
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c12
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c32
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h27
-rw-r--r--fs/xfs/xfs_acl.c22
-rw-r--r--fs/xfs/xfs_aops.c50
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c40
-rw-r--r--fs/xfs/xfs_bmap_util.h13
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_file.c64
-rw-r--r--fs/xfs/xfs_fsmap.c58
-rw-r--r--fs/xfs/xfs_inode.c8
-rw-r--r--fs/xfs/xfs_inode_item.c81
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_iomap.c7
-rw-r--r--fs/xfs/xfs_iomap.h2
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_ondisk.h2
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c9
-rw-r--r--fs/xfs/xfs_super.c12
-rw-r--r--include/asm-generic/percpu.h24
-rw-r--r--include/dt-bindings/reset/snps,hsdk-reset.h17
-rw-r--r--include/dt-bindings/reset/snps,hsdk-v1-reset.h17
-rw-r--r--include/linux/binfmts.h2
-rw-r--r--include/linux/bitfield.h2
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cpuhotplug.h21
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/if_tap.h4
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h3
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/kernel.h90
-rw-r--r--include/linux/key.h47
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmu_notifier.h5
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter_bridge/ebtables.h7
-rw-r--r--include/linux/nmi.h121
-rw-r--r--include/linux/nvme-fc-driver.h13
-rw-r--r--include/linux/nvme.h19
-rw-r--r--include/linux/of.h10
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/sched.h64
-rw-r--r--include/linux/sched/mm.h22
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/sctp.h34
-rw-r--r--include/linux/smpboot.h4
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/swait.h27
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/timer.h14
-rw-r--r--include/net/fq_impl.h9
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/netlink.h73
-rw-r--r--include/net/pkt_cls.h3
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/strparser.h3
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/net/udp.h2
-rw-r--r--include/rdma/ib_verbs.h6
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/sound/control.h3
-rw-r--r--include/sound/hda_verbs.h1
-rw-r--r--include/sound/seq_virmidi.h1
-rw-r--r--include/trace/events/sched.h19
-rw-r--r--include/uapi/linux/bpf.h9
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--include/uapi/linux/netfilter/xt_bpf.h1
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/spi/spidev.h1
-rw-r--r--include/uapi/linux/usb/ch9.h1
-rw-r--r--include/uapi/rdma/ib_user_verbs.h2
-rw-r--r--init/Kconfig2
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/devmap.c10
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/inode.c1
-rw-r--r--kernel/bpf/sockmap.c57
-rw-r--r--kernel/bpf/verifier.c70
-rw-r--r--kernel/cgroup/cgroup.c8
-rw-r--r--kernel/cpu.c513
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/events/ring_buffer.c20
-rw-r--r--kernel/exit.c29
-rw-r--r--kernel/extable.c45
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/futex.c33
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/cpuhotplug.c28
-rw-r--r--kernel/irq/generic-chip.c16
-rw-r--r--kernel/irq/irqdomain.c4
-rw-r--r--kernel/irq/manage.c21
-rw-r--r--kernel/kcmp.c2
-rw-r--r--kernel/livepatch/core.c60
-rw-r--r--kernel/locking/lockdep.c48
-rw-r--r--kernel/locking/rwsem-xadd.c27
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/params.c35
-rw-r--r--kernel/power/suspend.c18
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/sync.c9
-rw-r--r--kernel/rcu/tree.c28
-rw-r--r--kernel/sched/core.c24
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c140
-rw-r--r--kernel/sched/features.h3
-rw-r--r--kernel/sched/membarrier.c34
-rw-r--r--kernel/seccomp.c23
-rw-r--r--kernel/smpboot.c25
-rw-r--r--kernel/sysctl.c27
-rw-r--r--kernel/trace/blktrace.c18
-rw-r--r--kernel/trace/ftrace.c14
-rw-r--r--kernel/trace/trace_output.c21
-rw-r--r--kernel/trace/trace_sched_wakeup.c8
-rw-r--r--kernel/trace/trace_stack.c15
-rw-r--r--kernel/watchdog.c643
-rw-r--r--kernel/watchdog_hld.c196
-rw-r--r--kernel/workqueue.c37
-rw-r--r--lib/Kconfig.debug147
-rw-r--r--lib/assoc_array.c51
-rw-r--r--lib/digsig.c6
-rw-r--r--lib/idr.c4
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/kobject_uevent.c49
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lz4/lz4_decompress.c4
-rw-r--r--lib/ratelimit.c4
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--mm/cma.c2
-rw-r--r--mm/compaction.c13
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/list_lru.c12
-rw-r--r--mm/madvise.c19
-rw-r--r--mm/memcontrol.c38
-rw-r--r--mm/memory.c2
-rw-r--r--mm/memory_hotplug.c7
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/oom_kill.c16
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/page_vma_mapped.c28
-rw-r--r--mm/percpu-stats.c2
-rw-r--r--mm/percpu.c19
-rw-r--r--mm/rodata_test.c2
-rw-r--r--mm/slab_common.c22
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c52
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/z3fold.c10
-rw-r--r--net/8021q/vlan_core.c6
-rw-r--r--net/bluetooth/Kconfig10
-rw-r--r--net/bluetooth/hci_sock.c6
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/bridge/netfilter/ebtable_filter.c4
-rw-r--r--net/bridge/netfilter/ebtable_nat.c4
-rw-r--r--net/bridge/netfilter/ebtables.c17
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c13
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c75
-rw-r--r--net/core/rtnetlink.c16
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c15
-rw-r--r--net/core/sock_reuseport.c12
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/dsa/dsa2.c7
-rw-r--r--net/dsa/slave.c31
-rw-r--r--net/ife/ife.c2
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/cipso_ipv4.c24
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_hashtables.c5
-rw-r--r--net/ipv4/inetpeer.c4
-rw-r--r--net/ipv4/ip_gre.c12
-rw-r--r--net/ipv4/ip_input.c25
-rw-r--r--net/ipv4/ip_vti.c3
-rw-r--r--net/ipv4/ipip.c59
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c48
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c31
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/udp.c45
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c21
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c5
-rw-r--r--net/ipv6/ip6_vti.c3
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/l2tp/l2tp_core.c16
-rw-r--r--net/l2tp/l2tp_core.h6
-rw-r--r--net/l2tp/l2tp_eth.c51
-rw-r--r--net/l2tp/l2tp_ppp.c18
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/key.c54
-rw-r--r--net/ncsi/internal.h1
-rw-r--r--net/ncsi/ncsi-aen.c2
-rw-r--r--net/ncsi/ncsi-manage.c52
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c29
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_tables_api.c10
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/netfilter/xt_bpf.c22
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/packet/af_packet.c36
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rds/ib_send.c16
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/cls_api.c60
-rw-r--r--net/sched/cls_basic.c20
-rw-r--r--net/sched/cls_bpf.c19
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_flower.c21
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c19
-rw-r--r--net/sched/cls_route.c19
-rw-r--r--net/sched/cls_rsvp.h19
-rw-r--r--net/sched/cls_tcindex.c38
-rw-r--r--net/sched/cls_u32.c29
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/input.c24
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/sctp_diag.c4
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/stream.c26
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/strparser/strparser.c17
-rw-r--r--net/sunrpc/xprt.c36
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c2
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/msg.c10
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/hyperv_transport.c22
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/sme.c50
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_input.c6
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c17
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c26
-rw-r--r--samples/sockmap/sockmap_kern.c2
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.modpost1
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/faddr2line5
-rw-r--r--scripts/kallsyms.c2
-rw-r--r--scripts/spelling.txt33
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Makefile43
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--security/apparmor/file.c30
-rw-r--r--security/apparmor/include/audit.h26
-rw-r--r--security/apparmor/include/net.h114
-rw-r--r--security/apparmor/include/perms.h5
-rw-r--r--security/apparmor/include/policy.h13
-rw-r--r--security/apparmor/lib.c5
-rw-r--r--security/apparmor/lsm.c387
-rw-r--r--security/apparmor/net.c184
-rw-r--r--security/apparmor/policy_unpack.c47
-rw-r--r--security/commoncap.c3
-rw-r--r--security/keys/Kconfig5
-rw-r--r--security/keys/big_key.c143
-rw-r--r--security/keys/encrypted-keys/encrypted.c9
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/internal.h2
-rw-r--r--security/keys/key.c47
-rw-r--r--security/keys/keyctl.c14
-rw-r--r--security/keys/keyring.c51
-rw-r--r--security/keys/permission.c7
-rw-r--r--security/keys/proc.c39
-rw-r--r--security/keys/process_keys.c8
-rw-r--r--security/keys/request_key.c7
-rw-r--r--security/keys/request_key_auth.c76
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--security/smack/smack_lsm.c55
-rw-r--r--sound/core/compress_offload.c3
-rw-r--r--sound/core/pcm_compat.c1
-rw-r--r--sound/core/seq/seq_clientmgr.c6
-rw-r--r--sound/core/seq/seq_lock.c4
-rw-r--r--sound/core/seq/seq_lock.h12
-rw-r--r--sound/core/seq/seq_ports.c7
-rw-r--r--sound/core/seq/seq_virmidi.c27
-rw-r--r--sound/core/vmaster.c31
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/asihpi/hpioctl.c12
-rw-r--r--sound/pci/echoaudio/echoaudio.c6
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/pci/hda/patch_hdmi.c21
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/usb/caiaq/device.c12
-rw-r--r--sound/usb/card.c20
-rw-r--r--sound/usb/line6/driver.c7
-rw-r--r--sound/usb/line6/podhd.c8
-rw-r--r--sound/usb/mixer.c12
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/usx2y/usb_stream.c6
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h6
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h4
-rw-r--r--tools/include/asm-generic/hugetlb_encode.h34
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h14
-rw-r--r--tools/include/uapi/drm/drm.h22
-rw-r--r--tools/include/uapi/drm/i915_drm.h51
-rw-r--r--tools/include/uapi/linux/bpf.h44
-rw-r--r--tools/include/uapi/linux/kvm.h3
-rw-r--r--tools/include/uapi/linux/mman.h24
-rw-r--r--tools/objtool/arch/x86/decode.c11
-rw-r--r--tools/objtool/check.c9
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/MANIFEST87
-rw-r--r--tools/perf/arch/s390/util/Build1
-rw-r--r--tools/perf/arch/s390/util/sym-handling.c29
-rw-r--r--tools/perf/builtin-script.c4
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh9
-rw-r--r--tools/perf/ui/hist.c9
-rw-r--r--tools/perf/util/callchain.c41
-rw-r--r--tools/perf/util/evsel.c7
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/parse-events.l17
-rw-r--r--tools/perf/util/pmu.c56
-rw-r--r--tools/perf/util/pmu.h1
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/symbol-elf.c8
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/syscalltbl.c2
-rw-r--r--tools/perf/util/xyarray.h4
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.c10
-rw-r--r--tools/scripts/Makefile.include6
-rw-r--r--tools/testing/selftests/Makefile18
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h17
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c4
-rw-r--r--tools/testing/selftests/bpf/test_maps.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c510
-rw-r--r--tools/testing/selftests/breakpoints/Makefile8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc2
-rw-r--r--tools/testing/selftests/futex/Makefile9
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile2
-rwxr-xr-xtools/testing/selftests/intel_pstate/run.sh11
-rw-r--r--tools/testing/selftests/lib.mk48
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/memfd/run_tests.sh0
-rw-r--r--tools/testing/selftests/mqueue/Makefile4
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile6
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c2
-rwxr-xr-xtools/testing/selftests/net/netdevice.sh2
-rw-r--r--tools/testing/selftests/net/reuseaddr_conflict.c114
-rw-r--r--tools/testing/selftests/networking/timestamping/rxtimestamp.c2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c18
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c4
-rw-r--r--tools/testing/selftests/sync/Makefile24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json23
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py22
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py62
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py2
-rw-r--r--tools/testing/selftests/timers/set-timer-lat.c13
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c25
-rw-r--r--tools/testing/selftests/watchdog/Makefile7
-rw-r--r--tools/testing/selftests/x86/Makefile2
1256 files changed, 14318 insertions, 8674 deletions
diff --git a/.mailmap b/.mailmap
index 5273cfd70ad6..c7b10caecc4e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -68,6 +68,8 @@ Jacob Shin <Jacob.Shin@amd.com>
68James Bottomley <jejb@mulgrave.(none)> 68James Bottomley <jejb@mulgrave.(none)>
69James Bottomley <jejb@titanic.il.steeleye.com> 69James Bottomley <jejb@titanic.il.steeleye.com>
70James E Wilson <wilson@specifix.com> 70James E Wilson <wilson@specifix.com>
71James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
72James Hogan <jhogan@kernel.org> <james@albanarts.com>
71James Ketrenos <jketreno@io.(none)> 73James Ketrenos <jketreno@io.(none)>
72Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> 74Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
73<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 75<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
index 33e96f740639..147d4e8a1403 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
@@ -14,3 +14,11 @@ Description:
14 Show or set the gain boost of the amp, from 0-31 range. 14 Show or set the gain boost of the amp, from 0-31 range.
15 18 = indoors (default) 15 18 = indoors (default)
16 14 = outdoors 16 14 = outdoors
17
18What /sys/bus/iio/devices/iio:deviceX/noise_level_tripped
19Date: May 2017
20KernelVersion: 4.13
21Contact: Matt Ranostay <matt.ranostay@konsulko.com>
22Description:
23 When 1 the noise level is over the trip level and not reporting
24 valid data
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap
index 587db52084c7..94672016c268 100644
--- a/Documentation/ABI/testing/sysfs-kernel-mm-swap
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
14 still used for tmpfs etc. other users. If set to 14 still used for tmpfs etc. other users. If set to
15 false, the global swap readahead algorithm will be 15 false, the global swap readahead algorithm will be
16 used for all swappable pages. 16 used for all swappable pages.
17
18What: /sys/kernel/mm/swap/vma_ra_max_order
19Date: August 2017
20Contact: Linux memory management mailing list <linux-mm@kvack.org>
21Description: The max readahead size in order for VMA based swap readahead
22
23 VMA based swap readahead algorithm will readahead at
24 most 1 << max_order pages for each readahead. The
25 real readahead size for each readahead will be scaled
26 according to the estimation algorithm.
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 713cab1d5f12..a1d1612f3651 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -127,7 +127,7 @@ Description:
127 127
128What; /sys/power/pm_trace_dev_match 128What; /sys/power/pm_trace_dev_match
129Date: October 2010 129Date: October 2010
130Contact: James Hogan <james@albanarts.com> 130Contact: James Hogan <jhogan@kernel.org>
131Description: 131Description:
132 The /sys/power/pm_trace_dev_match file contains the name of the 132 The /sys/power/pm_trace_dev_match file contains the name of the
133 device associated with the last PM event point saved in the RTC 133 device associated with the last PM event point saved in the RTC
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 8282099e0cbf..5da10184d908 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
352---------------------- 352----------------------
353 353
354.. kernel-doc:: include/linux/rcupdate.h 354.. kernel-doc:: include/linux/rcupdate.h
355 :external:
356 355
357.. kernel-doc:: include/linux/rcupdate_wait.h 356.. kernel-doc:: include/linux/rcupdate_wait.h
358 :external:
359 357
360.. kernel-doc:: include/linux/rcutree.h 358.. kernel-doc:: include/linux/rcutree.h
361 :external:
362 359
363.. kernel-doc:: kernel/rcu/tree.c 360.. kernel-doc:: kernel/rcu/tree.c
364 :external:
365 361
366.. kernel-doc:: kernel/rcu/tree_plugin.h 362.. kernel-doc:: kernel/rcu/tree_plugin.h
367 :external:
368 363
369.. kernel-doc:: kernel/rcu/tree_exp.h 364.. kernel-doc:: kernel/rcu/tree_exp.h
370 :external:
371 365
372.. kernel-doc:: kernel/rcu/update.c 366.. kernel-doc:: kernel/rcu/update.c
373 :external:
374 367
375.. kernel-doc:: include/linux/srcu.h 368.. kernel-doc:: include/linux/srcu.h
376 :external:
377 369
378.. kernel-doc:: kernel/rcu/srcutree.c 370.. kernel-doc:: kernel/rcu/srcutree.c
379 :external:
380 371
381.. kernel-doc:: include/linux/rculist_bl.h 372.. kernel-doc:: include/linux/rculist_bl.h
382 :external:
383 373
384.. kernel-doc:: include/linux/rculist.h 374.. kernel-doc:: include/linux/rculist.h
385 :external:
386 375
387.. kernel-doc:: include/linux/rculist_nulls.h 376.. kernel-doc:: include/linux/rculist_nulls.h
388 :external:
389 377
390.. kernel-doc:: include/linux/rcu_sync.h 378.. kernel-doc:: include/linux/rcu_sync.h
391 :external:
392 379
393.. kernel-doc:: kernel/rcu/sync.c 380.. kernel-doc:: kernel/rcu/sync.c
394 :external:
395 381
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index 3943b5bfa8cf..00a5ba51e63f 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -39,8 +39,8 @@ up.
39Although MT wq wasted a lot of resource, the level of concurrency 39Although MT wq wasted a lot of resource, the level of concurrency
40provided was unsatisfactory. The limitation was common to both ST and 40provided was unsatisfactory. The limitation was common to both ST and
41MT wq albeit less severe on MT. Each wq maintained its own separate 41MT wq albeit less severe on MT. Each wq maintained its own separate
42worker pool. A MT wq could provide only one execution context per CPU 42worker pool. An MT wq could provide only one execution context per CPU
43while a ST wq one for the whole system. Work items had to compete for 43while an ST wq one for the whole system. Work items had to compete for
44those very limited execution contexts leading to various problems 44those very limited execution contexts leading to various problems
45including proneness to deadlocks around the single execution context. 45including proneness to deadlocks around the single execution context.
46 46
@@ -151,7 +151,7 @@ Application Programming Interface (API)
151 151
152``alloc_workqueue()`` allocates a wq. The original 152``alloc_workqueue()`` allocates a wq. The original
153``create_*workqueue()`` functions are deprecated and scheduled for 153``create_*workqueue()`` functions are deprecated and scheduled for
154removal. ``alloc_workqueue()`` takes three arguments - @``name``, 154removal. ``alloc_workqueue()`` takes three arguments - ``@name``,
155``@flags`` and ``@max_active``. ``@name`` is the name of the wq and 155``@flags`` and ``@max_active``. ``@name`` is the name of the wq and
156also used as the name of the rescuer thread if there is one. 156also used as the name of the rescuer thread if there is one.
157 157
@@ -197,7 +197,7 @@ resources, scheduled and executed.
197 served by worker threads with elevated nice level. 197 served by worker threads with elevated nice level.
198 198
199 Note that normal and highpri worker-pools don't interact with 199 Note that normal and highpri worker-pools don't interact with
200 each other. Each maintain its separate pool of workers and 200 each other. Each maintains its separate pool of workers and
201 implements concurrency management among its workers. 201 implements concurrency management among its workers.
202 202
203``WQ_CPU_INTENSIVE`` 203``WQ_CPU_INTENSIVE``
@@ -249,8 +249,8 @@ unbound worker-pools and only one work item could be active at any given
249time thus achieving the same ordering property as ST wq. 249time thus achieving the same ordering property as ST wq.
250 250
251In the current implementation the above configuration only guarantees 251In the current implementation the above configuration only guarantees
252ST behavior within a given NUMA node. Instead alloc_ordered_queue should 252ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
253be used to achieve system wide ST behavior. 253be used to achieve system-wide ST behavior.
254 254
255 255
256Example Execution Scenarios 256Example Execution Scenarios
diff --git a/Documentation/cpu-freq/index.txt b/Documentation/cpu-freq/index.txt
index 03a7cee6ac73..c15e75386a05 100644
--- a/Documentation/cpu-freq/index.txt
+++ b/Documentation/cpu-freq/index.txt
@@ -32,8 +32,6 @@ cpufreq-stats.txt - General description of sysfs cpufreq stats.
32 32
33index.txt - File index, Mailing list and Links (this document) 33index.txt - File index, Mailing list and Links (this document)
34 34
35intel-pstate.txt - Intel pstate cpufreq driver specific file.
36
37pcc-cpufreq.txt - PCC cpufreq driver specific file. 35pcc-cpufreq.txt - PCC cpufreq driver specific file.
38 36
39 37
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 4a0a7469fdd7..32df07e29f68 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -344,3 +344,4 @@ Version History
344 (wrong raid10_copies/raid10_format sequence) 344 (wrong raid10_copies/raid10_format sequence)
3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option 3451.11.1 Add raid4/5/6 journal write-back support via journal_mode option
3461.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available 3461.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
3471.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A')
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index 38d74314b7ab..b6c1afa6f02d 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -16,6 +16,10 @@ Optional properties:
16 - ams,tuning-capacitor-pf: Calibration tuning capacitor stepping 16 - ams,tuning-capacitor-pf: Calibration tuning capacitor stepping
17 value 0 - 120pF. This will require using the calibration data from 17 value 0 - 120pF. This will require using the calibration data from
18 the manufacturer. 18 the manufacturer.
19 - ams,nflwdth: Set the noise and watchdog threshold register on
20 startup. This will need to set according to the noise from the
21 MCU board, and possibly the local environment. Refer to the
22 datasheet for the threshold settings.
19 23
20Example: 24Example:
21 25
@@ -27,4 +31,5 @@ as3935@0 {
27 interrupt-parent = <&gpio1>; 31 interrupt-parent = <&gpio1>;
28 interrupts = <16 1>; 32 interrupts = <16 1>;
29 ams,tuning-capacitor-pf = <80>; 33 ams,tuning-capacitor-pf = <80>;
34 ams,nflwdth = <0x44>;
30}; 35};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cdab0ea5..5eb108e180fa 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -99,7 +99,7 @@ Examples:
99 compatible = "arm,gic-v3-its"; 99 compatible = "arm,gic-v3-its";
100 msi-controller; 100 msi-controller;
101 #msi-cells = <1>; 101 #msi-cells = <1>;
102 reg = <0x0 0x2c200000 0 0x200000>; 102 reg = <0x0 0x2c200000 0 0x20000>;
103 }; 103 };
104 }; 104 };
105 105
@@ -124,14 +124,14 @@ Examples:
124 compatible = "arm,gic-v3-its"; 124 compatible = "arm,gic-v3-its";
125 msi-controller; 125 msi-controller;
126 #msi-cells = <1>; 126 #msi-cells = <1>;
127 reg = <0x0 0x2c200000 0 0x200000>; 127 reg = <0x0 0x2c200000 0 0x20000>;
128 }; 128 };
129 129
130 gic-its@2c400000 { 130 gic-its@2c400000 {
131 compatible = "arm,gic-v3-its"; 131 compatible = "arm,gic-v3-its";
132 msi-controller; 132 msi-controller;
133 #msi-cells = <1>; 133 #msi-cells = <1>;
134 reg = <0x0 0x2c400000 0 0x200000>; 134 reg = <0x0 0x2c400000 0 0x20000>;
135 }; 135 };
136 136
137 ppi-partitions { 137 ppi-partitions {
diff --git a/Documentation/devicetree/bindings/leds/ams,as3645a.txt b/Documentation/devicetree/bindings/leds/ams,as3645a.txt
index 12c5ef26ec73..fdc40e354a64 100644
--- a/Documentation/devicetree/bindings/leds/ams,as3645a.txt
+++ b/Documentation/devicetree/bindings/leds/ams,as3645a.txt
@@ -15,11 +15,14 @@ Required properties
15 15
16compatible : Must be "ams,as3645a". 16compatible : Must be "ams,as3645a".
17reg : The I2C address of the device. Typically 0x30. 17reg : The I2C address of the device. Typically 0x30.
18#address-cells : 1
19#size-cells : 0
18 20
19 21
20Required properties of the "flash" child node 22Required properties of the flash child node (0)
21============================================= 23===============================================
22 24
25reg: 0
23flash-timeout-us: Flash timeout in microseconds. The value must be in 26flash-timeout-us: Flash timeout in microseconds. The value must be in
24 the range [100000, 850000] and divisible by 50000. 27 the range [100000, 850000] and divisible by 50000.
25flash-max-microamp: Maximum flash current in microamperes. Has to be 28flash-max-microamp: Maximum flash current in microamperes. Has to be
@@ -33,20 +36,21 @@ ams,input-max-microamp: Maximum flash controller input current. The
33 and divisible by 50000. 36 and divisible by 50000.
34 37
35 38
36Optional properties of the "flash" child node 39Optional properties of the flash child node
37============================================= 40===========================================
38 41
39label : The label of the flash LED. 42label : The label of the flash LED.
40 43
41 44
42Required properties of the "indicator" child node 45Required properties of the indicator child node (1)
43================================================= 46===================================================
44 47
48reg: 1
45led-max-microamp: Maximum indicator current. The allowed values are 49led-max-microamp: Maximum indicator current. The allowed values are
46 2500, 5000, 7500 and 10000. 50 2500, 5000, 7500 and 10000.
47 51
48Optional properties of the "indicator" child node 52Optional properties of the indicator child node
49================================================= 53===============================================
50 54
51label : The label of the indicator LED. 55label : The label of the indicator LED.
52 56
@@ -55,16 +59,20 @@ Example
55======= 59=======
56 60
57 as3645a@30 { 61 as3645a@30 {
62 #address-cells: 1
63 #size-cells: 0
58 reg = <0x30>; 64 reg = <0x30>;
59 compatible = "ams,as3645a"; 65 compatible = "ams,as3645a";
60 flash { 66 flash@0 {
67 reg = <0x0>;
61 flash-timeout-us = <150000>; 68 flash-timeout-us = <150000>;
62 flash-max-microamp = <320000>; 69 flash-max-microamp = <320000>;
63 led-max-microamp = <60000>; 70 led-max-microamp = <60000>;
64 ams,input-max-microamp = <1750000>; 71 ams,input-max-microamp = <1750000>;
65 label = "as3645a:flash"; 72 label = "as3645a:flash";
66 }; 73 };
67 indicator { 74 indicator@1 {
75 reg = <0x1>;
68 led-max-microamp = <10000>; 76 led-max-microamp = <10000>;
69 label = "as3645a:indicator"; 77 label = "as3645a:indicator";
70 }; 78 };
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
index b878a1e305af..ed1456f5c94d 100644
--- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt
@@ -16,11 +16,13 @@ Required Properties:
16 16
17- clocks: 17- clocks:
18 Array of clocks required for SDHC. 18 Array of clocks required for SDHC.
19 Require at least input clock for Xenon IP core. 19 Require at least input clock for Xenon IP core. For Armada AP806 and
20 CP110, the AXI clock is also mandatory.
20 21
21- clock-names: 22- clock-names:
22 Array of names corresponding to clocks property. 23 Array of names corresponding to clocks property.
23 The input clock for Xenon IP core should be named as "core". 24 The input clock for Xenon IP core should be named as "core".
25 The input clock for the AXI bus must be named as "axi".
24 26
25- reg: 27- reg:
26 * For "marvell,armada-3700-sdhci", two register areas. 28 * For "marvell,armada-3700-sdhci", two register areas.
@@ -106,8 +108,8 @@ Example:
106 compatible = "marvell,armada-ap806-sdhci"; 108 compatible = "marvell,armada-ap806-sdhci";
107 reg = <0xaa0000 0x1000>; 109 reg = <0xaa0000 0x1000>;
108 interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> 110 interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
109 clocks = <&emmc_clk>; 111 clocks = <&emmc_clk>,<&axi_clk>;
110 clock-names = "core"; 112 clock-names = "core", "axi";
111 bus-width = <4>; 113 bus-width = <4>;
112 marvell,xenon-phy-slow-mode; 114 marvell,xenon-phy-slow-mode;
113 marvell,xenon-tun-count = <11>; 115 marvell,xenon-tun-count = <11>;
@@ -126,8 +128,8 @@ Example:
126 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> 128 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
127 vqmmc-supply = <&sd_vqmmc_regulator>; 129 vqmmc-supply = <&sd_vqmmc_regulator>;
128 vmmc-supply = <&sd_vmmc_regulator>; 130 vmmc-supply = <&sd_vmmc_regulator>;
129 clocks = <&sdclk>; 131 clocks = <&sdclk>, <&axi_clk>;
130 clock-names = "core"; 132 clock-names = "core", "axi";
131 bus-width = <4>; 133 bus-width = <4>;
132 marvell,xenon-tun-count = <9>; 134 marvell,xenon-tun-count = <9>;
133 }; 135 };
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
index 7e2dad08a12e..1814fa13f6ab 100644
--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -21,8 +21,9 @@ Required properties:
21 - main controller clock (for both armada-375-pp2 and armada-7k-pp2) 21 - main controller clock (for both armada-375-pp2 and armada-7k-pp2)
22 - GOP clock (for both armada-375-pp2 and armada-7k-pp2) 22 - GOP clock (for both armada-375-pp2 and armada-7k-pp2)
23 - MG clock (only for armada-7k-pp2) 23 - MG clock (only for armada-7k-pp2)
24- clock-names: names of used clocks, must be "pp_clk", "gop_clk" and 24 - AXI clock (only for armada-7k-pp2)
25 "mg_clk" (the latter only for armada-7k-pp2). 25- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
26 and "axi_clk" (the 2 latter only for armada-7k-pp2).
26 27
27The ethernet ports are represented by subnodes. At least one port is 28The ethernet ports are represented by subnodes. At least one port is
28required. 29required.
@@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2:
78cpm_ethernet: ethernet@0 { 79cpm_ethernet: ethernet@0 {
79 compatible = "marvell,armada-7k-pp22"; 80 compatible = "marvell,armada-7k-pp22";
80 reg = <0x0 0x100000>, <0x129000 0xb000>; 81 reg = <0x0 0x100000>, <0x129000 0xb000>;
81 clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>; 82 clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
82 clock-names = "pp_clk", "gop_clk", "gp_clk"; 83 <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
84 clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
83 85
84 eth0: eth0 { 86 eth0: eth0 {
85 interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, 87 interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
index 6af8eed1adeb..9c16ee2965a2 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt
@@ -4,6 +4,7 @@ The device node has following properties.
4 4
5Required properties: 5Required properties:
6 - compatible: should be "rockchip,<name>-gamc" 6 - compatible: should be "rockchip,<name>-gamc"
7 "rockchip,rk3128-gmac": found on RK312x SoCs
7 "rockchip,rk3228-gmac": found on RK322x SoCs 8 "rockchip,rk3228-gmac": found on RK322x SoCs
8 "rockchip,rk3288-gmac": found on RK3288 SoCs 9 "rockchip,rk3288-gmac": found on RK3288 SoCs
9 "rockchip,rk3328-gmac": found on RK3328 SoCs 10 "rockchip,rk3328-gmac": found on RK3328 SoCs
diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
new file mode 100644
index 000000000000..830069b1c37c
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
@@ -0,0 +1,28 @@
1Binding for the Synopsys HSDK reset controller
2
3This binding uses the common reset binding[1].
4
5[1] Documentation/devicetree/bindings/reset/reset.txt
6
7Required properties:
8- compatible: should be "snps,hsdk-reset".
9- reg: should always contain 2 pairs address - length: first for reset
10 configuration register and second for corresponding SW reset and status bits
11 register.
12- #reset-cells: from common reset binding; Should always be set to 1.
13
14Example:
15 reset: reset@880 {
16 compatible = "snps,hsdk-reset";
17 #reset-cells = <1>;
18 reg = <0x8A0 0x4>, <0xFF0 0x4>;
19 };
20
21Specifying reset lines connected to IP modules:
22 ethernet@.... {
23 ....
24 resets = <&reset HSDK_V1_ETH_RESET>;
25 ....
26 };
27
28The index could be found in <dt-bindings/reset/snps,hsdk-reset.h>
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 4fc96946f81d..cf504d0380ae 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -41,6 +41,8 @@ Required properties:
41 - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART. 41 - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
42 - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART. 42 - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
43 - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART. 43 - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
44 - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
45 - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
44 - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART. 46 - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
45 - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART. 47 - "renesas,hscif-r8a77995" for R8A77995 (R-Car D3) HSCIF compatible UART.
46 - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART. 48 - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
diff --git a/Documentation/driver-model/driver.txt b/Documentation/driver-model/driver.txt
index 4421135826a2..d661e6f7e6a0 100644
--- a/Documentation/driver-model/driver.txt
+++ b/Documentation/driver-model/driver.txt
@@ -196,12 +196,13 @@ struct driver_attribute {
196}; 196};
197 197
198Device drivers can export attributes via their sysfs directories. 198Device drivers can export attributes via their sysfs directories.
199Drivers can declare attributes using a DRIVER_ATTR macro that works 199Drivers can declare attributes using a DRIVER_ATTR_RW and DRIVER_ATTR_RO
200identically to the DEVICE_ATTR macro. 200macro that works identically to the DEVICE_ATTR_RW and DEVICE_ATTR_RO
201macros.
201 202
202Example: 203Example:
203 204
204DRIVER_ATTR(debug,0644,show_debug,store_debug); 205DRIVER_ATTR_RW(debug);
205 206
206This is equivalent to declaring: 207This is equivalent to declaring:
207 208
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 36f528a7fdd6..8caa60734647 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
210beneath or above the path of another overlay lower layer path. 210beneath or above the path of another overlay lower layer path.
211 211
212Using an upper layer path and/or a workdir path that are already used by 212Using an upper layer path and/or a workdir path that are already used by
213another overlay mount is not allowed and will fail with EBUSY. Using 213another overlay mount is not allowed and may fail with EBUSY. Using
214partially overlapping paths is not allowed but will not fail with EBUSY. 214partially overlapping paths is not allowed but will not fail with EBUSY.
215If files are accessed from two overlayfs mounts which share or overlap the
216upper layer and/or workdir path the behavior of the overlay is undefined,
217though it will not result in a crash or deadlock.
215 218
216Mounting an overlay using an upper layer path, where the upper layer path 219Mounting an overlay using an upper layer path, where the upper layer path
217was previously used by another mounted overlay in combination with a 220was previously used by another mounted overlay in combination with a
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index 24da7b32c489..9a3658cc399e 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -366,7 +366,8 @@ struct driver_attribute {
366 366
367Declaring: 367Declaring:
368 368
369DRIVER_ATTR(_name, _mode, _show, _store) 369DRIVER_ATTR_RO(_name)
370DRIVER_ATTR_RW(_name)
370 371
371Creation/Removal: 372Creation/Removal:
372 373
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 0500193434cb..d47702456926 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@ Supported adapters:
36 * Intel Gemini Lake (SOC) 36 * Intel Gemini Lake (SOC)
37 * Intel Cannon Lake-H (PCH) 37 * Intel Cannon Lake-H (PCH)
38 * Intel Cannon Lake-LP (PCH) 38 * Intel Cannon Lake-LP (PCH)
39 * Intel Cedar Fork (PCH)
39 Datasheets: Publicly available at the Intel website 40 Datasheets: Publicly available at the Intel website
40 41
41On Intel Patsburg and later chipsets, both the normal host SMBus controller 42On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 329e740adea7..f6f80380dff2 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
1108 ld 1108 ld
1109 Link target. Often, LDFLAGS_$@ is used to set specific options to ld. 1109 Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
1110 1110
1111 objcopy
1112 Copy binary. Uses OBJCOPYFLAGS usually specified in
1113 arch/$(ARCH)/Makefile.
1114 OBJCOPYFLAGS_$@ may be used to set additional options.
1115
1116 gzip
1117 Compress target. Use maximum compression to compress target.
1118
1119 Example: 1111 Example:
1120 #arch/x86/boot/Makefile 1112 #arch/x86/boot/Makefile
1121 LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary 1113 LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
1139 resulting in the target file being recompiled for no 1131 resulting in the target file being recompiled for no
1140 obvious reason. 1132 obvious reason.
1141 1133
1134 objcopy
1135 Copy binary. Uses OBJCOPYFLAGS usually specified in
1136 arch/$(ARCH)/Makefile.
1137 OBJCOPYFLAGS_$@ may be used to set additional options.
1138
1139 gzip
1140 Compress target. Use maximum compression to compress target.
1141
1142 Example:
1143 #arch/x86/boot/compressed/Makefile
1144 $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
1145 $(call if_changed,gzip)
1146
1142 dtc 1147 dtc
1143 Create flattened device tree blob object suitable for linking 1148 Create flattened device tree blob object suitable for linking
1144 into vmlinux. Device tree blobs linked into vmlinux are placed 1149 into vmlinux. Device tree blobs linked into vmlinux are placed
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
1219 that may be shared between individual architectures. 1224 that may be shared between individual architectures.
1220 The recommended approach how to use a generic header file is 1225 The recommended approach how to use a generic header file is
1221 to list the file in the Kbuild file. 1226 to list the file in the Kbuild file.
1222 See "7.3 generic-y" for further info on syntax etc. 1227 See "7.2 generic-y" for further info on syntax etc.
1223 1228
1224--- 6.11 Post-link pass 1229--- 6.11 Post-link pass
1225 1230
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
1254arch/<arch>/include/asm/ to list asm files coming from asm-generic. 1259arch/<arch>/include/asm/ to list asm files coming from asm-generic.
1255See subsequent chapter for the syntax of the Kbuild file. 1260See subsequent chapter for the syntax of the Kbuild file.
1256 1261
1257 --- 7.1 no-export-headers 1262--- 7.1 no-export-headers
1258 1263
1259 no-export-headers is essentially used by include/uapi/linux/Kbuild to 1264 no-export-headers is essentially used by include/uapi/linux/Kbuild to
1260 avoid exporting specific headers (e.g. kvm.h) on architectures that do 1265 avoid exporting specific headers (e.g. kvm.h) on architectures that do
1261 not support it. It should be avoided as much as possible. 1266 not support it. It should be avoided as much as possible.
1262 1267
1263 --- 7.2 generic-y 1268--- 7.2 generic-y
1264 1269
1265 If an architecture uses a verbatim copy of a header from 1270 If an architecture uses a verbatim copy of a header from
1266 include/asm-generic then this is listed in the file 1271 include/asm-generic then this is listed in the file
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
1287 Example: termios.h 1292 Example: termios.h
1288 #include <asm-generic/termios.h> 1293 #include <asm-generic/termios.h>
1289 1294
1290 --- 7.3 generated-y 1295--- 7.3 generated-y
1291 1296
1292 If an architecture generates other header files alongside generic-y 1297 If an architecture generates other header files alongside generic-y
1293 wrappers, generated-y specifies them. 1298 wrappers, generated-y specifies them.
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
1299 #arch/x86/include/asm/Kbuild 1304 #arch/x86/include/asm/Kbuild
1300 generated-y += syscalls_32.h 1305 generated-y += syscalls_32.h
1301 1306
1302 --- 7.5 mandatory-y 1307--- 7.4 mandatory-y
1303 1308
1304 mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm 1309 mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
1305 to define the minimum set of headers that must be exported in 1310 to define the minimum set of headers that must be exported in
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 57f52cdce32e..9ba04c0bab8d 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
2387 and packet type ID), so in a "gatewayed" configuration, all 2387 and packet type ID), so in a "gatewayed" configuration, all
2388 outgoing traffic will generally use the same device. Incoming 2388 outgoing traffic will generally use the same device. Incoming
2389 traffic may also end up on a single device, but that is 2389 traffic may also end up on a single device, but that is
2390 dependent upon the balancing policy of the peer's 8023.ad 2390 dependent upon the balancing policy of the peer's 802.3ad
2391 implementation. In a "local" configuration, traffic will be 2391 implementation. In a "local" configuration, traffic will be
2392 distributed across the devices in the bond. 2392 distributed across the devices in the bond.
2393 2393
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 82fc399fcd33..61e43cc3ed17 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
25 submitting-patches 25 submitting-patches
26 coding-style 26 coding-style
27 email-clients 27 email-clients
28 kernel-enforcement-statement
28 29
29Other guides to the community that are of interest to most developers are: 30Other guides to the community that are of interest to most developers are:
30 31
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644
index 000000000000..1e23d4227337
--- /dev/null
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -0,0 +1,147 @@
1Linux Kernel Enforcement Statement
2----------------------------------
3
4As developers of the Linux kernel, we have a keen interest in how our software
5is used and how the license for our software is enforced. Compliance with the
6reciprocal sharing obligations of GPL-2.0 is critical to the long-term
7sustainability of our software and community.
8
9Although there is a right to enforce the separate copyright interests in the
10contributions made to our community, we share an interest in ensuring that
11individual enforcement actions are conducted in a manner that benefits our
12community and do not have an unintended negative impact on the health and
13growth of our software ecosystem. In order to deter unhelpful enforcement
14actions, we agree that it is in the best interests of our development
15community to undertake the following commitment to users of the Linux kernel
16on behalf of ourselves and any successors to our copyright interests:
17
18 Notwithstanding the termination provisions of the GPL-2.0, we agree that
19 it is in the best interests of our development community to adopt the
20 following provisions of GPL-3.0 as additional permissions under our
21 license with respect to any non-defensive assertion of rights under the
22 license.
23
24 However, if you cease all violation of this License, then your license
25 from a particular copyright holder is reinstated (a) provisionally,
26 unless and until the copyright holder explicitly and finally
27 terminates your license, and (b) permanently, if the copyright holder
28 fails to notify you of the violation by some reasonable means prior to
29 60 days after the cessation.
30
31 Moreover, your license from a particular copyright holder is
32 reinstated permanently if the copyright holder notifies you of the
33 violation by some reasonable means, this is the first time you have
34 received notice of violation of this License (for any work) from that
35 copyright holder, and you cure the violation prior to 30 days after
36 your receipt of the notice.
37
38Our intent in providing these assurances is to encourage more use of the
39software. We want companies and individuals to use, modify and distribute
40this software. We want to work with users in an open and transparent way to
41eliminate any uncertainty about our expectations regarding compliance or
42enforcement that might limit adoption of our software. We view legal action
43as a last resort, to be initiated only when other community efforts have
44failed to resolve the problem.
45
46Finally, once a non-compliance issue is resolved, we hope the user will feel
47welcome to join us in our efforts on this project. Working together, we will
48be stronger.
49
50Except where noted below, we speak only for ourselves, and not for any company
51we might work for today, have in the past, or will in the future.
52
53 - Bjorn Andersson (Linaro)
54 - Andrea Arcangeli (Red Hat)
55 - Neil Armstrong
56 - Jens Axboe
57 - Pablo Neira Ayuso
58 - Khalid Aziz
59 - Ralf Baechle
60 - Felipe Balbi
61 - Arnd Bergmann
62 - Ard Biesheuvel
63 - Paolo Bonzini (Red Hat)
64 - Christian Borntraeger
65 - Mark Brown (Linaro)
66 - Paul Burton
67 - Javier Martinez Canillas
68 - Rob Clark
69 - Jonathan Corbet
70 - Vivien Didelot (Savoir-faire Linux)
71 - Hans de Goede (Red Hat)
72 - Mel Gorman (SUSE)
73 - Sven Eckelmann
74 - Alex Elder (Linaro)
75 - Fabio Estevam
76 - Larry Finger
77 - Bhumika Goyal
78 - Andy Gross
79 - Juergen Gross
80 - Shawn Guo
81 - Ulf Hansson
82 - Tejun Heo
83 - Rob Herring
84 - Masami Hiramatsu
85 - Michal Hocko
86 - Simon Horman
87 - Johan Hovold (Hovold Consulting AB)
88 - Christophe JAILLET
89 - Olof Johansson
90 - Lee Jones (Linaro)
91 - Heiner Kallweit
92 - Srinivas Kandagatla
93 - Jan Kara
94 - Shuah Khan (Samsung)
95 - David Kershner
96 - Jaegeuk Kim
97 - Namhyung Kim
98 - Colin Ian King
99 - Jeff Kirsher
100 - Greg Kroah-Hartman (Linux Foundation)
101 - Christian König
102 - Vinod Koul
103 - Krzysztof Kozlowski
104 - Viresh Kumar
105 - Aneesh Kumar K.V
106 - Julia Lawall
107 - Doug Ledford (Red Hat)
108 - Chuck Lever (Oracle)
109 - Daniel Lezcano
110 - Shaohua Li
111 - Xin Long (Red Hat)
112 - Tony Luck
113 - Mike Marshall
114 - Chris Mason
115 - Paul E. McKenney
116 - David S. Miller
117 - Ingo Molnar
118 - Kuninori Morimoto
119 - Borislav Petkov
120 - Jiri Pirko
121 - Josh Poimboeuf
122 - Sebastian Reichel (Collabora)
123 - Guenter Roeck
124 - Joerg Roedel
125 - Leon Romanovsky
126 - Steven Rostedt (VMware)
127 - Ivan Safonov
128 - Ivan Safonov
129 - Anna Schumaker
130 - Jes Sorensen
131 - K.Y. Srinivasan
132 - Heiko Stuebner
133 - Jiri Kosina (SUSE)
134 - Dmitry Torokhov
135 - Linus Torvalds
136 - Thierry Reding
137 - Rik van Riel
138 - Geert Uytterhoeven (Glider bvba)
139 - Daniel Vetter
140 - Linus Walleij
141 - Richard Weinberger
142 - Dan Williams
143 - Rafael J. Wysocki
144 - Arvind Yadav
145 - Masahiro Yamada
146 - Wei Yongjun
147 - Lv Zheng
diff --git a/MAINTAINERS b/MAINTAINERS
index 6671f375f7fc..bf1d20695cbf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5259,7 +5259,8 @@ S: Maintained
5259F: drivers/iommu/exynos-iommu.c 5259F: drivers/iommu/exynos-iommu.c
5260 5260
5261EZchip NPS platform support 5261EZchip NPS platform support
5262M: Noam Camus <noamc@ezchip.com> 5262M: Elad Kanfi <eladkan@mellanox.com>
5263M: Vineet Gupta <vgupta@synopsys.com>
5263S: Supported 5264S: Supported
5264F: arch/arc/plat-eznps 5265F: arch/arc/plat-eznps
5265F: arch/arc/boot/dts/eznps.dts 5266F: arch/arc/boot/dts/eznps.dts
@@ -5345,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org>
5345L: linux-fsdevel@vger.kernel.org 5346L: linux-fsdevel@vger.kernel.org
5346S: Maintained 5347S: Maintained
5347F: include/linux/fcntl.h 5348F: include/linux/fcntl.h
5348F: include/linux/fs.h
5349F: include/uapi/linux/fcntl.h 5349F: include/uapi/linux/fcntl.h
5350F: include/uapi/linux/fs.h
5351F: fs/fcntl.c 5350F: fs/fcntl.c
5352F: fs/locks.c 5351F: fs/locks.c
5353 5352
@@ -5356,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk>
5356L: linux-fsdevel@vger.kernel.org 5355L: linux-fsdevel@vger.kernel.org
5357S: Maintained 5356S: Maintained
5358F: fs/* 5357F: fs/*
5358F: include/linux/fs.h
5359F: include/uapi/linux/fs.h
5359 5360
5360FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER 5361FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
5361M: Riku Voipio <riku.voipio@iki.fi> 5362M: Riku Voipio <riku.voipio@iki.fi>
@@ -6670,7 +6671,7 @@ F: include/net/ieee802154_netdev.h
6670F: Documentation/networking/ieee802154.txt 6671F: Documentation/networking/ieee802154.txt
6671 6672
6672IFE PROTOCOL 6673IFE PROTOCOL
6673M: Yotam Gigi <yotamg@mellanox.com> 6674M: Yotam Gigi <yotam.gi@gmail.com>
6674M: Jamal Hadi Salim <jhs@mojatatu.com> 6675M: Jamal Hadi Salim <jhs@mojatatu.com>
6675F: net/ife 6676F: net/ife
6676F: include/net/ife.h 6677F: include/net/ife.h
@@ -6738,7 +6739,7 @@ F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
6738F: drivers/auxdisplay/img-ascii-lcd.c 6739F: drivers/auxdisplay/img-ascii-lcd.c
6739 6740
6740IMGTEC IR DECODER DRIVER 6741IMGTEC IR DECODER DRIVER
6741M: James Hogan <james.hogan@imgtec.com> 6742M: James Hogan <jhogan@kernel.org>
6742S: Maintained 6743S: Maintained
6743F: drivers/media/rc/img-ir/ 6744F: drivers/media/rc/img-ir/
6744 6745
@@ -7562,7 +7563,7 @@ F: arch/arm64/include/asm/kvm*
7562F: arch/arm64/kvm/ 7563F: arch/arm64/kvm/
7563 7564
7564KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) 7565KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
7565M: James Hogan <james.hogan@imgtec.com> 7566M: James Hogan <jhogan@kernel.org>
7566L: linux-mips@linux-mips.org 7567L: linux-mips@linux-mips.org
7567S: Supported 7568S: Supported
7568F: arch/mips/include/uapi/asm/kvm* 7569F: arch/mips/include/uapi/asm/kvm*
@@ -7570,7 +7571,7 @@ F: arch/mips/include/asm/kvm*
7570F: arch/mips/kvm/ 7571F: arch/mips/kvm/
7571 7572
7572KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) 7573KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc)
7573M: Alexander Graf <agraf@suse.com> 7574M: Paul Mackerras <paulus@ozlabs.org>
7574L: kvm-ppc@vger.kernel.org 7575L: kvm-ppc@vger.kernel.org
7575W: http://www.linux-kvm.org/ 7576W: http://www.linux-kvm.org/
7576T: git git://github.com/agraf/linux-2.6.git 7577T: git git://github.com/agraf/linux-2.6.git
@@ -8264,6 +8265,12 @@ L: libertas-dev@lists.infradead.org
8264S: Orphan 8265S: Orphan
8265F: drivers/net/wireless/marvell/libertas/ 8266F: drivers/net/wireless/marvell/libertas/
8266 8267
8268MARVELL MACCHIATOBIN SUPPORT
8269M: Russell King <rmk@armlinux.org.uk>
8270L: linux-arm-kernel@lists.infradead.org
8271S: Maintained
8272F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
8273
8267MARVELL MV643XX ETHERNET DRIVER 8274MARVELL MV643XX ETHERNET DRIVER
8268M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 8275M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
8269L: netdev@vger.kernel.org 8276L: netdev@vger.kernel.org
@@ -8597,6 +8604,12 @@ M: Sean Wang <sean.wang@mediatek.com>
8597S: Maintained 8604S: Maintained
8598F: drivers/media/rc/mtk-cir.c 8605F: drivers/media/rc/mtk-cir.c
8599 8606
8607MEDIATEK PMIC LED DRIVER
8608M: Sean Wang <sean.wang@mediatek.com>
8609S: Maintained
8610F: drivers/leds/leds-mt6323.c
8611F: Documentation/devicetree/bindings/leds/leds-mt6323.txt
8612
8600MEDIATEK ETHERNET DRIVER 8613MEDIATEK ETHERNET DRIVER
8601M: Felix Fietkau <nbd@openwrt.org> 8614M: Felix Fietkau <nbd@openwrt.org>
8602M: John Crispin <john@phrozen.org> 8615M: John Crispin <john@phrozen.org>
@@ -8730,7 +8743,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8730F: drivers/net/ethernet/mellanox/mlxsw/ 8743F: drivers/net/ethernet/mellanox/mlxsw/
8731 8744
8732MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) 8745MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
8733M: Yotam Gigi <yotamg@mellanox.com> 8746M: mlxsw@mellanox.com
8734L: netdev@vger.kernel.org 8747L: netdev@vger.kernel.org
8735S: Supported 8748S: Supported
8736W: http://www.mellanox.com 8749W: http://www.mellanox.com
@@ -8879,7 +8892,7 @@ F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
8879T: git git://linuxtv.org/media_tree.git 8892T: git git://linuxtv.org/media_tree.git
8880 8893
8881METAG ARCHITECTURE 8894METAG ARCHITECTURE
8882M: James Hogan <james.hogan@imgtec.com> 8895M: James Hogan <jhogan@kernel.org>
8883L: linux-metag@vger.kernel.org 8896L: linux-metag@vger.kernel.org
8884T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git 8897T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git
8885S: Odd Fixes 8898S: Odd Fixes
@@ -9200,7 +9213,6 @@ F: include/linux/isicom.h
9200MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 9213MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
9201M: Bin Liu <b-liu@ti.com> 9214M: Bin Liu <b-liu@ti.com>
9202L: linux-usb@vger.kernel.org 9215L: linux-usb@vger.kernel.org
9203T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
9204S: Maintained 9216S: Maintained
9205F: drivers/usb/musb/ 9217F: drivers/usb/musb/
9206 9218
@@ -9348,7 +9360,7 @@ NETWORK BLOCK DEVICE (NBD)
9348M: Josef Bacik <jbacik@fb.com> 9360M: Josef Bacik <jbacik@fb.com>
9349S: Maintained 9361S: Maintained
9350L: linux-block@vger.kernel.org 9362L: linux-block@vger.kernel.org
9351L: nbd-general@lists.sourceforge.net 9363L: nbd@other.debian.org
9352F: Documentation/blockdev/nbd.txt 9364F: Documentation/blockdev/nbd.txt
9353F: drivers/block/nbd.c 9365F: drivers/block/nbd.c
9354F: include/uapi/linux/nbd.h 9366F: include/uapi/linux/nbd.h
@@ -10167,7 +10179,6 @@ F: Documentation/parport*.txt
10167 10179
10168PARAVIRT_OPS INTERFACE 10180PARAVIRT_OPS INTERFACE
10169M: Juergen Gross <jgross@suse.com> 10181M: Juergen Gross <jgross@suse.com>
10170M: Chris Wright <chrisw@sous-sol.org>
10171M: Alok Kataria <akataria@vmware.com> 10182M: Alok Kataria <akataria@vmware.com>
10172M: Rusty Russell <rusty@rustcorp.com.au> 10183M: Rusty Russell <rusty@rustcorp.com.au>
10173L: virtualization@lists.linux-foundation.org 10184L: virtualization@lists.linux-foundation.org
@@ -10547,6 +10558,8 @@ M: Peter Zijlstra <peterz@infradead.org>
10547M: Ingo Molnar <mingo@redhat.com> 10558M: Ingo Molnar <mingo@redhat.com>
10548M: Arnaldo Carvalho de Melo <acme@kernel.org> 10559M: Arnaldo Carvalho de Melo <acme@kernel.org>
10549R: Alexander Shishkin <alexander.shishkin@linux.intel.com> 10560R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
10561R: Jiri Olsa <jolsa@redhat.com>
10562R: Namhyung Kim <namhyung@kernel.org>
10550L: linux-kernel@vger.kernel.org 10563L: linux-kernel@vger.kernel.org
10551T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core 10564T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
10552S: Supported 10565S: Supported
@@ -10877,7 +10890,7 @@ S: Maintained
10877F: drivers/block/ps3vram.c 10890F: drivers/block/ps3vram.c
10878 10891
10879PSAMPLE PACKET SAMPLING SUPPORT: 10892PSAMPLE PACKET SAMPLING SUPPORT:
10880M: Yotam Gigi <yotamg@mellanox.com> 10893M: Yotam Gigi <yotam.gi@gmail.com>
10881S: Maintained 10894S: Maintained
10882F: net/psample 10895F: net/psample
10883F: include/net/psample.h 10896F: include/net/psample.h
@@ -12925,9 +12938,9 @@ F: drivers/mmc/host/dw_mmc*
12925SYNOPSYS HSDK RESET CONTROLLER DRIVER 12938SYNOPSYS HSDK RESET CONTROLLER DRIVER
12926M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> 12939M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
12927S: Supported 12940S: Supported
12928F: drivers/reset/reset-hsdk-v1.c 12941F: drivers/reset/reset-hsdk.c
12929F: include/dt-bindings/reset/snps,hsdk-v1-reset.h 12942F: include/dt-bindings/reset/snps,hsdk-reset.h
12930F: Documentation/devicetree/bindings/reset/snps,hsdk-v1-reset.txt 12943F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt
12931 12944
12932SYSTEM CONFIGURATION (SYSCON) 12945SYSTEM CONFIGURATION (SYSCON)
12933M: Lee Jones <lee.jones@linaro.org> 12946M: Lee Jones <lee.jones@linaro.org>
diff --git a/Makefile b/Makefile
index d1119941261c..5f91a28a3cea 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc7
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -130,8 +130,8 @@ endif
130ifneq ($(KBUILD_OUTPUT),) 130ifneq ($(KBUILD_OUTPUT),)
131# check that the output directory actually exists 131# check that the output directory actually exists
132saved-output := $(KBUILD_OUTPUT) 132saved-output := $(KBUILD_OUTPUT)
133$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT)) 133KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
134KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT)) 134 && /bin/pwd)
135$(if $(KBUILD_OUTPUT),, \ 135$(if $(KBUILD_OUTPUT),, \
136 $(error failed to create output directory "$(saved-output)")) 136 $(error failed to create output directory "$(saved-output)"))
137 137
@@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag)
697 697
698ifeq ($(cc-name),clang) 698ifeq ($(cc-name),clang)
699ifneq ($(CROSS_COMPILE),) 699ifneq ($(CROSS_COMPILE),)
700CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%)) 700CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
701GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) 701GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
702endif 702endif
703ifneq ($(GCC_TOOLCHAIN),) 703ifneq ($(GCC_TOOLCHAIN),)
704CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN) 704CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
705endif 705endif
706KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 706KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
707KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) 707KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION
933 ifeq ($(has_libelf),1) 933 ifeq ($(has_libelf),1)
934 objtool_target := tools/objtool FORCE 934 objtool_target := tools/objtool FORCE
935 else 935 else
936 $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") 936 ifdef CONFIG_ORC_UNWINDER
937 $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
938 else
939 $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
940 endif
937 SKIP_STACK_VALIDATION := 1 941 SKIP_STACK_VALIDATION := 1
938 export SKIP_STACK_VALIDATION 942 export SKIP_STACK_VALIDATION
939 endif 943 endif
@@ -1172,11 +1176,11 @@ headers_check: headers_install
1172 1176
1173PHONY += kselftest 1177PHONY += kselftest
1174kselftest: 1178kselftest:
1175 $(Q)$(MAKE) -C tools/testing/selftests run_tests 1179 $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
1176 1180
1177PHONY += kselftest-clean 1181PHONY += kselftest-clean
1178kselftest-clean: 1182kselftest-clean:
1179 $(Q)$(MAKE) -C tools/testing/selftests clean 1183 $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
1180 1184
1181PHONY += kselftest-merge 1185PHONY += kselftest-merge
1182kselftest-merge: 1186kselftest-merge:
@@ -1395,7 +1399,7 @@ help:
1395 @echo ' Build, install, and boot kernel before' 1399 @echo ' Build, install, and boot kernel before'
1396 @echo ' running kselftest on it' 1400 @echo ' running kselftest on it'
1397 @echo ' kselftest-clean - Remove all generated kselftest files' 1401 @echo ' kselftest-clean - Remove all generated kselftest files'
1398 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed' 1402 @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
1399 @echo ' .config.' 1403 @echo ' .config.'
1400 @echo '' 1404 @echo ''
1401 @echo 'Userspace tools targets:' 1405 @echo 'Userspace tools targets:'
diff --git a/arch/Kconfig b/arch/Kconfig
index 1aafb4efbb51..d789a89cb32c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
937 and non-text memory will be made non-executable. This provides 937 and non-text memory will be made non-executable. This provides
938 protection against certain security exploits (e.g. writing to text) 938 protection against certain security exploits (e.g. writing to text)
939 939
940config ARCH_WANT_RELAX_ORDER
941 bool
942
943config ARCH_HAS_REFCOUNT 940config ARCH_HAS_REFCOUNT
944 bool 941 bool
945 help 942 help
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
index 384bd47b5187..45c020a0fe76 100644
--- a/arch/alpha/include/asm/mmu_context.h
+++ b/arch/alpha/include/asm/mmu_context.h
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/mm_types.h> 10#include <linux/mm_types.h>
11#include <linux/sched.h>
11 12
12#include <asm/machvec.h> 13#include <asm/machvec.h>
13#include <asm/compiler.h> 14#include <asm/compiler.h>
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 118dc6af1805..7ad074fd5ab5 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -181,10 +181,10 @@ alcor_init_irq(void)
181 * comes in on. This makes interrupt processing much easier. 181 * comes in on. This makes interrupt processing much easier.
182 */ 182 */
183 183
184static int __init 184static int
185alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 185alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
186{ 186{
187 static char irq_tab[7][5] __initdata = { 187 static char irq_tab[7][5] = {
188 /*INT INTA INTB INTC INTD */ 188 /*INT INTA INTB INTC INTD */
189 /* note: IDSEL 17 is XLT only */ 189 /* note: IDSEL 17 is XLT only */
190 {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ 190 {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 4c50f8f40cbb..c0fa1fe5ce77 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -173,10 +173,10 @@ pc164_init_irq(void)
173 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. 173 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
174 */ 174 */
175 175
176static inline int __init 176static inline int
177eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 177eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
178{ 178{
179 static char irq_tab[5][5] __initdata = { 179 static char irq_tab[5][5] = {
180 /*INT INTA INTB INTC INTD */ 180 /*INT INTA INTB INTC INTD */
181 {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ 181 {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
182 {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ 182 {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
@@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
203 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. 203 * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
204 */ 204 */
205 205
206static inline int __init 206static inline int
207cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 207cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
208{ 208{
209 static char irq_tab[5][5] __initdata = { 209 static char irq_tab[5][5] = {
210 /*INT INTA INTB INTC INTD */ 210 /*INT INTA INTB INTC INTD */
211 { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ 211 { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
212 { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ 212 { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
@@ -287,10 +287,10 @@ cia_cab_init_pci(void)
287 * 287 *
288 */ 288 */
289 289
290static inline int __init 290static inline int
291alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 291alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
292{ 292{
293 static char irq_tab[7][5] __initdata = { 293 static char irq_tab[7][5] = {
294 /*INT INTA INTB INTC INTD */ 294 /*INT INTA INTB INTC INTD */
295 { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ 295 { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
296 { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ 296 { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 6c35159bc00e..9e1e40ea1d14 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -356,7 +356,7 @@ clipper_init_irq(void)
356 * 10 64 bit PCI option slot 3 (not bus 0) 356 * 10 64 bit PCI option slot 3 (not bus 0)
357 */ 357 */
358 358
359static int __init 359static int
360isa_irq_fixup(const struct pci_dev *dev, int irq) 360isa_irq_fixup(const struct pci_dev *dev, int irq)
361{ 361{
362 u8 irq8; 362 u8 irq8;
@@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
372 return irq8 & 0xf; 372 return irq8 & 0xf;
373} 373}
374 374
375static int __init 375static int
376dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 376dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
377{ 377{
378 static char irq_tab[6][5] __initdata = { 378 static char irq_tab[6][5] = {
379 /*INT INTA INTB INTC INTD */ 379 /*INT INTA INTB INTC INTD */
380 { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ 380 { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
381 { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ 381 { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
394 return isa_irq_fixup(dev, irq); 394 return isa_irq_fixup(dev, irq);
395} 395}
396 396
397static int __init 397static int
398monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 398monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
399{ 399{
400 static char irq_tab[13][5] __initdata = { 400 static char irq_tab[13][5] = {
401 /*INT INTA INTB INTC INTD */ 401 /*INT INTA INTB INTC INTD */
402 { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ 402 { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
403 { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ 403 { -1, -1, -1, -1, -1}, /* IdSel 4 unused */
@@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
423 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); 423 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
424} 424}
425 425
426static u8 __init 426static u8
427monet_swizzle(struct pci_dev *dev, u8 *pinp) 427monet_swizzle(struct pci_dev *dev, u8 *pinp)
428{ 428{
429 struct pci_controller *hose = dev->sysdata; 429 struct pci_controller *hose = dev->sysdata;
@@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
456 return slot; 456 return slot;
457} 457}
458 458
459static int __init 459static int
460webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 460webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
461{ 461{
462 static char irq_tab[13][5] __initdata = { 462 static char irq_tab[13][5] = {
463 /*INT INTA INTB INTC INTD */ 463 /*INT INTA INTB INTC INTD */
464 { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ 464 { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
465 { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ 465 { -1, -1, -1, -1, -1}, /* IdSel 8 unused */
@@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
478 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); 478 return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
479} 479}
480 480
481static int __init 481static int
482clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 482clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
483{ 483{
484 static char irq_tab[7][5] __initdata = { 484 static char irq_tab[7][5] = {
485 /*INT INTA INTB INTC INTD */ 485 /*INT INTA INTB INTC INTD */
486 { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ 486 { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
487 { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ 487 { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ad40a425e841..372661c56537 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -167,10 +167,10 @@ eb64p_init_irq(void)
167 * comes in on. This makes interrupt processing much easier. 167 * comes in on. This makes interrupt processing much easier.
168 */ 168 */
169 169
170static int __init 170static int
171eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 171eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
172{ 172{
173 static char irq_tab[5][5] __initdata = { 173 static char irq_tab[5][5] = {
174 /*INT INTA INTB INTC INTD */ 174 /*INT INTA INTB INTC INTD */
175 {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ 175 {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
176 {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ 176 {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 15f42083bdb3..2731738b5872 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -141,7 +141,7 @@ eiger_init_irq(void)
141 } 141 }
142} 142}
143 143
144static int __init 144static int
145eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 145eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
146{ 146{
147 u8 irq_orig; 147 u8 irq_orig;
@@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
158 return irq_orig - 0x80; 158 return irq_orig - 0x80;
159} 159}
160 160
161static u8 __init 161static u8
162eiger_swizzle(struct pci_dev *dev, u8 *pinp) 162eiger_swizzle(struct pci_dev *dev, u8 *pinp)
163{ 163{
164 struct pci_controller *hose = dev->sysdata; 164 struct pci_controller *hose = dev->sysdata;
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index d5b9776a608d..731d693fa1f9 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -149,10 +149,10 @@ miata_init_irq(void)
149 * comes in on. This makes interrupt processing much easier. 149 * comes in on. This makes interrupt processing much easier.
150 */ 150 */
151 151
152static int __init 152static int
153miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 153miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
154{ 154{
155 static char irq_tab[18][5] __initdata = { 155 static char irq_tab[18][5] = {
156 /*INT INTA INTB INTC INTD */ 156 /*INT INTA INTB INTC INTD */
157 {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ 157 {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
158 { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ 158 { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
@@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
196 return COMMON_TABLE_LOOKUP; 196 return COMMON_TABLE_LOOKUP;
197} 197}
198 198
199static u8 __init 199static u8
200miata_swizzle(struct pci_dev *dev, u8 *pinp) 200miata_swizzle(struct pci_dev *dev, u8 *pinp)
201{ 201{
202 int slot, pin = *pinp; 202 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 5e82dc1ad6f2..350ec9c8335b 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -145,10 +145,10 @@ mikasa_init_irq(void)
145 * comes in on. This makes interrupt processing much easier. 145 * comes in on. This makes interrupt processing much easier.
146 */ 146 */
147 147
148static int __init 148static int
149mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 149mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
150{ 150{
151 static char irq_tab[8][5] __initdata = { 151 static char irq_tab[8][5] = {
152 /*INT INTA INTB INTC INTD */ 152 /*INT INTA INTB INTC INTD */
153 {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ 153 {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
154 { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ 154 { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 8ae04a121186..d019e4ce07bd 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -62,7 +62,7 @@ nautilus_init_irq(void)
62 common_init_isa_dma(); 62 common_init_isa_dma();
63} 63}
64 64
65static int __init 65static int
66nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 66nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
67{ 67{
68 /* Preserve the IRQ set up by the console. */ 68 /* Preserve the IRQ set up by the console. */
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 063e594fd969..2301678d9f9d 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -193,10 +193,10 @@ noritake_init_irq(void)
193 * comes in on. This makes interrupt processing much easier. 193 * comes in on. This makes interrupt processing much easier.
194 */ 194 */
195 195
196static int __init 196static int
197noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 197noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
198{ 198{
199 static char irq_tab[15][5] __initdata = { 199 static char irq_tab[15][5] = {
200 /*INT INTA INTB INTC INTD */ 200 /*INT INTA INTB INTC INTD */
201 /* note: IDSELs 16, 17, and 25 are CORELLE only */ 201 /* note: IDSELs 16, 17, and 25 are CORELLE only */
202 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ 202 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
@@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
221 return COMMON_TABLE_LOOKUP; 221 return COMMON_TABLE_LOOKUP;
222} 222}
223 223
224static u8 __init 224static u8
225noritake_swizzle(struct pci_dev *dev, u8 *pinp) 225noritake_swizzle(struct pci_dev *dev, u8 *pinp)
226{ 226{
227 int slot, pin = *pinp; 227 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index dfd510ae5d8c..546822d07dc7 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -221,10 +221,10 @@ rawhide_init_irq(void)
221 * 221 *
222 */ 222 */
223 223
224static int __init 224static int
225rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 225rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
226{ 226{
227 static char irq_tab[5][5] __initdata = { 227 static char irq_tab[5][5] = {
228 /*INT INTA INTB INTC INTD */ 228 /*INT INTA INTB INTC INTD */
229 { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ 229 { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
230 { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ 230 { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index a3f485257170..3b35e1913492 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -117,10 +117,10 @@ ruffian_kill_arch (int mode)
117 * 117 *
118 */ 118 */
119 119
120static int __init 120static int
121ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 121ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
122{ 122{
123 static char irq_tab[11][5] __initdata = { 123 static char irq_tab[11][5] = {
124 /*INT INTA INTB INTC INTD */ 124 /*INT INTA INTB INTC INTD */
125 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ 125 {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
126 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ 126 {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
@@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
139 return COMMON_TABLE_LOOKUP; 139 return COMMON_TABLE_LOOKUP;
140} 140}
141 141
142static u8 __init 142static u8
143ruffian_swizzle(struct pci_dev *dev, u8 *pinp) 143ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
144{ 144{
145 int slot, pin = *pinp; 145 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 08ee737d4fba..e178007107ef 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -142,7 +142,7 @@ rx164_init_irq(void)
142 * 142 *
143 */ 143 */
144 144
145static int __init 145static int
146rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 146rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
147{ 147{
148#if 0 148#if 0
@@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
156 { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ 156 { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
157 }; 157 };
158#else 158#else
159 static char irq_tab[6][5] __initdata = { 159 static char irq_tab[6][5] = {
160 /*INT INTA INTB INTC INTD */ 160 /*INT INTA INTB INTC INTD */
161 { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ 161 { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
162 { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ 162 { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 8a0aa6d67b53..86d259c2612d 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -192,10 +192,10 @@ sable_init_irq(void)
192 * with the values in the irq swizzling tables above. 192 * with the values in the irq swizzling tables above.
193 */ 193 */
194 194
195static int __init 195static int
196sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 196sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
197{ 197{
198 static char irq_tab[9][5] __initdata = { 198 static char irq_tab[9][5] = {
199 /*INT INTA INTB INTC INTD */ 199 /*INT INTA INTB INTC INTD */
200 { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ 200 { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
201 { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ 201 { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
@@ -374,10 +374,10 @@ lynx_init_irq(void)
374 * with the values in the irq swizzling tables above. 374 * with the values in the irq swizzling tables above.
375 */ 375 */
376 376
377static int __init 377static int
378lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 378lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
379{ 379{
380 static char irq_tab[19][5] __initdata = { 380 static char irq_tab[19][5] = {
381 /*INT INTA INTB INTC INTD */ 381 /*INT INTA INTB INTC INTD */
382 { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ 382 { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
383 { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ 383 { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
@@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
404 return COMMON_TABLE_LOOKUP; 404 return COMMON_TABLE_LOOKUP;
405} 405}
406 406
407static u8 __init 407static u8
408lynx_swizzle(struct pci_dev *dev, u8 *pinp) 408lynx_swizzle(struct pci_dev *dev, u8 *pinp)
409{ 409{
410 int slot, pin = *pinp; 410 int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index febd24eba7a6..9fd2895639d5 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
144 outb((level_bits >> 8) & 0xff, 0x4d1); 144 outb((level_bits >> 8) & 0xff, 0x4d1);
145} 145}
146 146
147static inline int __init 147static inline int
148noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 148noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
149{ 149{
150 /* 150 /*
@@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
165 * that they use the default INTA line, if they are interrupt 165 * that they use the default INTA line, if they are interrupt
166 * driven at all). 166 * driven at all).
167 */ 167 */
168 static char irq_tab[][5] __initdata = { 168 static char irq_tab[][5] = {
169 /*INT A B C D */ 169 /*INT A B C D */
170 { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ 170 { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
171 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ 171 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
@@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
183 return irq >= 0 ? tmp : -1; 183 return irq >= 0 ? tmp : -1;
184} 184}
185 185
186static inline int __init 186static inline int
187p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 187p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
188{ 188{
189 static char irq_tab[][5] __initdata = { 189 static char irq_tab[][5] = {
190 /*INT A B C D */ 190 /*INT A B C D */
191 { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ 191 { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
192 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ 192 {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index d063b360efed..23eee54d714a 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -94,10 +94,10 @@ sx164_init_irq(void)
94 * 9 32 bit PCI option slot 3 94 * 9 32 bit PCI option slot 3
95 */ 95 */
96 96
97static int __init 97static int
98sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 98sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
99{ 99{
100 static char irq_tab[5][5] __initdata = { 100 static char irq_tab[5][5] = {
101 /*INT INTA INTB INTC INTD */ 101 /*INT INTA INTB INTC INTD */
102 { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ 102 { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
103 { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ 103 { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index dd0f1eae3c68..9101f2bb6176 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -155,10 +155,10 @@ takara_init_irq(void)
155 * assign it whatever the hell IRQ we like and it doesn't matter. 155 * assign it whatever the hell IRQ we like and it doesn't matter.
156 */ 156 */
157 157
158static int __init 158static int
159takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) 159takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
160{ 160{
161 static char irq_tab[15][5] __initdata = { 161 static char irq_tab[15][5] = {
162 { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ 162 { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
163 { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ 163 { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
164 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ 164 { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
@@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
210 return COMMON_TABLE_LOOKUP; 210 return COMMON_TABLE_LOOKUP;
211} 211}
212 212
213static u8 __init 213static u8
214takara_swizzle(struct pci_dev *dev, u8 *pinp) 214takara_swizzle(struct pci_dev *dev, u8 *pinp)
215{ 215{
216 int slot = PCI_SLOT(dev->devfn); 216 int slot = PCI_SLOT(dev->devfn);
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index ee1874887776..c3f8b79fe214 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector)
288 * 7 64 bit PCI 1 option slot 7 288 * 7 64 bit PCI 1 option slot 7
289 */ 289 */
290 290
291static int __init 291static int
292wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 292wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
293{ 293{
294 static char irq_tab[8][5] __initdata = { 294 static char irq_tab[8][5] = {
295 /*INT INTA INTB INTC INTD */ 295 /*INT INTA INTB INTC INTD */
296 { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ 296 { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
297 { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ 297 { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index a598641eed98..c84e67fdea09 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -24,7 +24,7 @@ config ARC
24 select GENERIC_SMP_IDLE_THREAD 24 select GENERIC_SMP_IDLE_THREAD
25 select HAVE_ARCH_KGDB 25 select HAVE_ARCH_KGDB
26 select HAVE_ARCH_TRACEHOOK 26 select HAVE_ARCH_TRACEHOOK
27 select HAVE_FUTEX_CMPXCHG 27 select HAVE_FUTEX_CMPXCHG if FUTEX
28 select HAVE_IOREMAP_PROT 28 select HAVE_IOREMAP_PROT
29 select HAVE_KPROBES 29 select HAVE_KPROBES
30 select HAVE_KRETPROBES 30 select HAVE_KRETPROBES
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 3a4b52b7e09d..d37f49d6a27f 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,8 +6,6 @@
6# published by the Free Software Foundation. 6# published by the Free Software Foundation.
7# 7#
8 8
9UTS_MACHINE := arc
10
11ifeq ($(CROSS_COMPILE),) 9ifeq ($(CROSS_COMPILE),)
12ifndef CONFIG_CPU_BIG_ENDIAN 10ifndef CONFIG_CPU_BIG_ENDIAN
13CROSS_COMPILE := arc-linux- 11CROSS_COMPILE := arc-linux-
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 2367a67c5f10..e114000a84f5 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -44,7 +44,14 @@
44 44
45 mmcclk: mmcclk { 45 mmcclk: mmcclk {
46 compatible = "fixed-clock"; 46 compatible = "fixed-clock";
47 clock-frequency = <50000000>; 47 /*
48 * DW sdio controller has external ciu clock divider
49 * controlled via register in SDIO IP. It divides
50 * sdio_ref_clk (which comes from CGU) by 16 for
51 * default. So default mmcclk clock (which comes
52 * to sdk_in) is 25000000 Hz.
53 */
54 clock-frequency = <25000000>;
48 #clock-cells = <0>; 55 #clock-cells = <0>;
49 }; 56 };
50 57
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 229d13adbce4..8f627c200d60 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -12,6 +12,7 @@
12/dts-v1/; 12/dts-v1/;
13 13
14#include <dt-bindings/net/ti-dp83867.h> 14#include <dt-bindings/net/ti-dp83867.h>
15#include <dt-bindings/reset/snps,hsdk-reset.h>
15 16
16/ { 17/ {
17 model = "snps,hsdk"; 18 model = "snps,hsdk";
@@ -57,10 +58,10 @@
57 }; 58 };
58 }; 59 };
59 60
60 core_clk: core-clk { 61 input_clk: input-clk {
61 #clock-cells = <0>; 62 #clock-cells = <0>;
62 compatible = "fixed-clock"; 63 compatible = "fixed-clock";
63 clock-frequency = <500000000>; 64 clock-frequency = <33333333>;
64 }; 65 };
65 66
66 cpu_intc: cpu-interrupt-controller { 67 cpu_intc: cpu-interrupt-controller {
@@ -102,6 +103,19 @@
102 103
103 ranges = <0x00000000 0xf0000000 0x10000000>; 104 ranges = <0x00000000 0xf0000000 0x10000000>;
104 105
106 cgu_rst: reset-controller@8a0 {
107 compatible = "snps,hsdk-reset";
108 #reset-cells = <1>;
109 reg = <0x8A0 0x4>, <0xFF0 0x4>;
110 };
111
112 core_clk: core-clk@0 {
113 compatible = "snps,hsdk-core-pll-clock";
114 reg = <0x00 0x10>, <0x14B8 0x4>;
115 #clock-cells = <0>;
116 clocks = <&input_clk>;
117 };
118
105 serial: serial@5000 { 119 serial: serial@5000 {
106 compatible = "snps,dw-apb-uart"; 120 compatible = "snps,dw-apb-uart";
107 reg = <0x5000 0x100>; 121 reg = <0x5000 0x100>;
@@ -120,7 +134,18 @@
120 134
121 mmcclk_ciu: mmcclk-ciu { 135 mmcclk_ciu: mmcclk-ciu {
122 compatible = "fixed-clock"; 136 compatible = "fixed-clock";
123 clock-frequency = <100000000>; 137 /*
138 * DW sdio controller has external ciu clock divider
139 * controlled via register in SDIO IP. Due to its
140 * unexpected default value (it should divide by 1
141 * but it divides by 8) SDIO IP uses wrong clock and
142 * works unstable (see STAR 9001204800)
143 * We switched to the minimum possible value of the
144 * divisor (div-by-2) in HSDK platform code.
145 * So add temporary fix and change clock frequency
146 * to 50000000 Hz until we fix dw sdio driver itself.
147 */
148 clock-frequency = <50000000>;
124 #clock-cells = <0>; 149 #clock-cells = <0>;
125 }; 150 };
126 151
@@ -141,6 +166,8 @@
141 clocks = <&gmacclk>; 166 clocks = <&gmacclk>;
142 clock-names = "stmmaceth"; 167 clock-names = "stmmaceth";
143 phy-handle = <&phy0>; 168 phy-handle = <&phy0>;
169 resets = <&cgu_rst HSDK_ETH_RESET>;
170 reset-names = "stmmaceth";
144 171
145 mdio { 172 mdio {
146 #address-cells = <1>; 173 #address-cells = <1>;
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 6980b966a364..ec7c849a5c8e 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y
105# CONFIG_ENABLE_WARN_DEPRECATED is not set 105# CONFIG_ENABLE_WARN_DEPRECATED is not set
106# CONFIG_ENABLE_MUST_CHECK is not set 106# CONFIG_ENABLE_MUST_CHECK is not set
107CONFIG_STRIP_ASM_SYMS=y 107CONFIG_STRIP_ASM_SYMS=y
108CONFIG_LOCKUP_DETECTOR=y 108CONFIG_SOFTLOCKUP_DETECTOR=y
109CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 109CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
110# CONFIG_SCHED_DEBUG is not set 110# CONFIG_SCHED_DEBUG is not set
111# CONFIG_DEBUG_PREEMPT is not set 111# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 2233f5777a71..63d3cf69e0b0 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y
104# CONFIG_ENABLE_WARN_DEPRECATED is not set 104# CONFIG_ENABLE_WARN_DEPRECATED is not set
105# CONFIG_ENABLE_MUST_CHECK is not set 105# CONFIG_ENABLE_MUST_CHECK is not set
106CONFIG_STRIP_ASM_SYMS=y 106CONFIG_STRIP_ASM_SYMS=y
107CONFIG_LOCKUP_DETECTOR=y 107CONFIG_SOFTLOCKUP_DETECTOR=y
108CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 108CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
109# CONFIG_SCHED_DEBUG is not set 109# CONFIG_SCHED_DEBUG is not set
110# CONFIG_DEBUG_PREEMPT is not set 110# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 30a3d4cf53d2..f613ecac14a7 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y
107# CONFIG_ENABLE_WARN_DEPRECATED is not set 107# CONFIG_ENABLE_WARN_DEPRECATED is not set
108# CONFIG_ENABLE_MUST_CHECK is not set 108# CONFIG_ENABLE_MUST_CHECK is not set
109CONFIG_STRIP_ASM_SYMS=y 109CONFIG_STRIP_ASM_SYMS=y
110CONFIG_LOCKUP_DETECTOR=y 110CONFIG_SOFTLOCKUP_DETECTOR=y
111CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 111CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
112# CONFIG_SCHED_DEBUG is not set 112# CONFIG_SCHED_DEBUG is not set
113# CONFIG_DEBUG_PREEMPT is not set 113# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 821a2e562f3f..3507be2af6fe 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y
84CONFIG_NFS_FS=y 84CONFIG_NFS_FS=y
85# CONFIG_ENABLE_WARN_DEPRECATED is not set 85# CONFIG_ENABLE_WARN_DEPRECATED is not set
86# CONFIG_ENABLE_MUST_CHECK is not set 86# CONFIG_ENABLE_MUST_CHECK is not set
87CONFIG_LOCKUP_DETECTOR=y 87CONFIG_SOFTLOCKUP_DETECTOR=y
88# CONFIG_DEBUG_PREEMPT is not set 88# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 9a3fcf446388..7b8f8faf8a24 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -72,7 +72,7 @@ CONFIG_NLS_ISO8859_1=y
72# CONFIG_ENABLE_WARN_DEPRECATED is not set 72# CONFIG_ENABLE_WARN_DEPRECATED is not set
73# CONFIG_ENABLE_MUST_CHECK is not set 73# CONFIG_ENABLE_MUST_CHECK is not set
74CONFIG_STRIP_ASM_SYMS=y 74CONFIG_STRIP_ASM_SYMS=y
75CONFIG_LOCKUP_DETECTOR=y 75CONFIG_SOFTLOCKUP_DETECTOR=y
76CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 76CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
77# CONFIG_SCHED_DEBUG is not set 77# CONFIG_SCHED_DEBUG is not set
78# CONFIG_DEBUG_PREEMPT is not set 78# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index c0d6a010751a..4fcf4f2503f6 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y
94# CONFIG_ENABLE_MUST_CHECK is not set 94# CONFIG_ENABLE_MUST_CHECK is not set
95CONFIG_STRIP_ASM_SYMS=y 95CONFIG_STRIP_ASM_SYMS=y
96CONFIG_DEBUG_SHIRQ=y 96CONFIG_DEBUG_SHIRQ=y
97CONFIG_LOCKUP_DETECTOR=y 97CONFIG_SOFTLOCKUP_DETECTOR=y
98CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 98CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
99# CONFIG_SCHED_DEBUG is not set 99# CONFIG_SCHED_DEBUG is not set
100# CONFIG_DEBUG_PREEMPT is not set 100# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 5c0971787acf..7b71464f6c2f 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y
98# CONFIG_ENABLE_MUST_CHECK is not set 98# CONFIG_ENABLE_MUST_CHECK is not set
99CONFIG_STRIP_ASM_SYMS=y 99CONFIG_STRIP_ASM_SYMS=y
100CONFIG_DEBUG_SHIRQ=y 100CONFIG_DEBUG_SHIRQ=y
101CONFIG_LOCKUP_DETECTOR=y 101CONFIG_SOFTLOCKUP_DETECTOR=y
102CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 102CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
103# CONFIG_SCHED_DEBUG is not set 103# CONFIG_SCHED_DEBUG is not set
104# CONFIG_DEBUG_PREEMPT is not set 104# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index ba8e802dba80..b1c56d35f2a9 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -98,6 +98,7 @@
98 98
99/* Auxiliary registers */ 99/* Auxiliary registers */
100#define AUX_IDENTITY 4 100#define AUX_IDENTITY 4
101#define AUX_EXEC_CTRL 8
101#define AUX_INTR_VEC_BASE 0x25 102#define AUX_INTR_VEC_BASE 0x25
102#define AUX_VOL 0x5e 103#define AUX_VOL 0x5e
103 104
@@ -135,12 +136,12 @@ struct bcr_identity {
135#endif 136#endif
136}; 137};
137 138
138struct bcr_isa { 139struct bcr_isa_arcv2 {
139#ifdef CONFIG_CPU_BIG_ENDIAN 140#ifdef CONFIG_CPU_BIG_ENDIAN
140 unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, 141 unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
141 pad1:11, atomic1:1, ver:8; 142 pad1:12, ver:8;
142#else 143#else
143 unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1, 144 unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
144 ldd:1, pad2:4, div_rem:4; 145 ldd:1, pad2:4, div_rem:4;
145#endif 146#endif
146}; 147};
@@ -263,13 +264,13 @@ struct cpuinfo_arc {
263 struct cpuinfo_arc_mmu mmu; 264 struct cpuinfo_arc_mmu mmu;
264 struct cpuinfo_arc_bpu bpu; 265 struct cpuinfo_arc_bpu bpu;
265 struct bcr_identity core; 266 struct bcr_identity core;
266 struct bcr_isa isa; 267 struct bcr_isa_arcv2 isa;
267 const char *details, *name; 268 const char *details, *name;
268 unsigned int vec_base; 269 unsigned int vec_base;
269 struct cpuinfo_arc_ccm iccm, dccm; 270 struct cpuinfo_arc_ccm iccm, dccm;
270 struct { 271 struct {
271 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, 272 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
272 fpu_sp:1, fpu_dp:1, pad2:6, 273 fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
273 debug:1, ap:1, smart:1, rtt:1, pad3:4, 274 debug:1, ap:1, smart:1, rtt:1, pad3:4,
274 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; 275 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
275 } extn; 276 } extn;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 877cec8f5ea2..fb83844daeea 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
51 { 0x51, "R2.0" }, 51 { 0x51, "R2.0" },
52 { 0x52, "R2.1" }, 52 { 0x52, "R2.1" },
53 { 0x53, "R3.0" }, 53 { 0x53, "R3.0" },
54 { 0x54, "R4.0" },
54#endif 55#endif
55 { 0x00, NULL } 56 { 0x00, NULL }
56}; 57};
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = {
62#else 63#else
63 { 0x40, "ARC EM" }, 64 { 0x40, "ARC EM" },
64 { 0x50, "ARC HS38" }, 65 { 0x50, "ARC HS38" },
66 { 0x54, "ARC HS48" },
65#endif 67#endif
66 { 0x00, "Unknown" } 68 { 0x00, "Unknown" }
67}; 69};
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void)
119 struct bcr_generic bcr; 121 struct bcr_generic bcr;
120 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 122 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
121 const struct id_to_str *tbl; 123 const struct id_to_str *tbl;
124 struct bcr_isa_arcv2 isa;
122 125
123 FIX_PTR(cpu); 126 FIX_PTR(cpu);
124 127
125 READ_BCR(AUX_IDENTITY, cpu->core); 128 READ_BCR(AUX_IDENTITY, cpu->core);
126 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
127 129
128 for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { 130 for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
129 if (cpu->core.family == tbl->id) { 131 if (cpu->core.family == tbl->id) {
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void)
133 } 135 }
134 136
135 for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { 137 for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
136 if ((cpu->core.family & 0xF0) == tbl->id) 138 if ((cpu->core.family & 0xF4) == tbl->id)
137 break; 139 break;
138 } 140 }
139 cpu->name = tbl->str; 141 cpu->name = tbl->str;
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void)
192 cpu->bpu.full = bpu.ft; 194 cpu->bpu.full = bpu.ft;
193 cpu->bpu.num_cache = 256 << bpu.bce; 195 cpu->bpu.num_cache = 256 << bpu.bce;
194 cpu->bpu.num_pred = 2048 << bpu.pte; 196 cpu->bpu.num_pred = 2048 << bpu.pte;
197
198 if (cpu->core.family >= 0x54) {
199 unsigned int exec_ctrl;
200
201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
202 cpu->extn.dual_iss_exist = 1;
203 cpu->extn.dual_iss_enb = exec_ctrl & 1;
204 }
195 } 205 }
196 206
197 READ_BCR(ARC_REG_AP_BCR, bcr); 207 READ_BCR(ARC_REG_AP_BCR, bcr);
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void)
205 215
206 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; 216 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
207 217
218 READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
219
208 /* some hacks for lack of feature BCR info in old ARC700 cores */ 220 /* some hacks for lack of feature BCR info in old ARC700 cores */
209 if (is_isa_arcompact()) { 221 if (is_isa_arcompact()) {
210 if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ 222 if (!isa.ver) /* ISA BCR absent, use Kconfig info */
211 cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); 223 cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
212 else 224 else {
213 cpu->isa.atomic = cpu->isa.atomic1; 225 /* ARC700_BUILD only has 2 bits of isa info */
226 struct bcr_generic bcr = *(struct bcr_generic *)&isa;
227 cpu->isa.atomic = bcr.info & 1;
228 }
214 229
215 cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); 230 cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
216 231
217 /* there's no direct way to distinguish 750 vs. 770 */ 232 /* there's no direct way to distinguish 750 vs. 770 */
218 if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) 233 if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
219 cpu->name = "ARC750"; 234 cpu->name = "ARC750";
235 } else {
236 cpu->isa = isa;
220 } 237 }
221} 238}
222 239
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
232 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", 249 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
233 core->family, core->cpu_id, core->chip_id); 250 core->family, core->cpu_id, core->chip_id);
234 251
235 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n", 252 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
236 cpu_id, cpu->name, cpu->details, 253 cpu_id, cpu->name, cpu->details,
237 is_isa_arcompact() ? "ARCompact" : "ARCv2", 254 is_isa_arcompact() ? "ARCompact" : "ARCv2",
238 IS_AVAIL1(cpu->isa.be, "[Big-Endian]")); 255 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
256 IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
239 257
240 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", 258 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
241 IS_AVAIL1(cpu->extn.timer0, "Timer0 "), 259 IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f46267153ec2..6df9d94a9537 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -23,6 +23,8 @@
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/reboot.h> 24#include <linux/reboot.h>
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/export.h>
27
26#include <asm/processor.h> 28#include <asm/processor.h>
27#include <asm/setup.h> 29#include <asm/setup.h>
28#include <asm/mach_desc.h> 30#include <asm/mach_desc.h>
@@ -30,6 +32,9 @@
30#ifndef CONFIG_ARC_HAS_LLSC 32#ifndef CONFIG_ARC_HAS_LLSC
31arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 33arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; 34arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
35
36EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
37EXPORT_SYMBOL_GPL(smp_bitops_lock);
33#endif 38#endif
34 39
35struct plat_smp_ops __weak plat_smp_ops; 40struct plat_smp_ops __weak plat_smp_ops;
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index f1ac6790da5f..cf14ebc36916 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void)
111 111
112 axs10x_enable_gpio_intc_wire(); 112 axs10x_enable_gpio_intc_wire();
113 113
114 /*
115 * Reset ethernet IP core.
116 * TODO: get rid of this quirk after axs10x reset driver (or simple
117 * reset driver) will be available in upstream.
118 */
119 iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
120
114 scnprintf(mb, 32, "MainBoard v%d", mb_rev); 121 scnprintf(mb, 32, "MainBoard v%d", mb_rev);
115 axs10x_print_board_ver(CREG_MB_VER, mb); 122 axs10x_print_board_ver(CREG_MB_VER, mb);
116} 123}
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index 5a6ed5afb009..19ab3cf98f0f 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -6,4 +6,6 @@
6# 6#
7 7
8menuconfig ARC_SOC_HSDK 8menuconfig ARC_SOC_HSDK
9 bool "ARC HS Development Kit SOC" 9 bool "ARC HS Development Kit SOC"
10 select CLK_HSDK
11 select RESET_HSDK
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index a2e7fd17e36d..fd0ae5e38639 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -38,6 +38,46 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
38#define CREG_PAE (CREG_BASE + 0x180) 38#define CREG_PAE (CREG_BASE + 0x180)
39#define CREG_PAE_UPDATE (CREG_BASE + 0x194) 39#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
40 40
41#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
42#define CREG_CORE_IF_CLK_DIV_2 0x1
43#define CGU_BASE ARC_PERIPHERAL_BASE
44#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
45#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
46#define CGU_PLL_STATUS_LOCK BIT(0)
47#define CGU_PLL_STATUS_ERR BIT(1)
48#define CGU_PLL_CTRL_1GHZ 0x3A10
49#define HSDK_PLL_LOCK_TIMEOUT 500
50
51#define HSDK_PLL_LOCKED() \
52 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
53
54#define HSDK_PLL_ERR() \
55 !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
56
57static void __init hsdk_set_cpu_freq_1ghz(void)
58{
59 u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
60
61 /*
62 * As we set cpu clock which exceeds 500MHz, the divider for the interface
63 * clock must be programmed to div-by-2.
64 */
65 iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
66
67 /* Set cpu clock to 1GHz */
68 iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
69
70 while (!HSDK_PLL_LOCKED() && timeout--)
71 cpu_relax();
72
73 if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
74 pr_err("Failed to setup CPU frequency to 1GHz!");
75}
76
77#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
78#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
79#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
80
41static void __init hsdk_init_early(void) 81static void __init hsdk_init_early(void)
42{ 82{
43 /* 83 /*
@@ -52,6 +92,18 @@ static void __init hsdk_init_early(void)
52 92
53 /* Really apply settings made above */ 93 /* Really apply settings made above */
54 writel(1, (void __iomem *) CREG_PAE_UPDATE); 94 writel(1, (void __iomem *) CREG_PAE_UPDATE);
95
96 /*
97 * Switch SDIO external ciu clock divider from default div-by-8 to
98 * minimum possible div-by-2.
99 */
100 iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
101
102 /*
103 * Setup CPU frequency to 1GHz.
104 * TODO: remove it after smart hsdk pll driver will be introduced.
105 */
106 hsdk_set_cpu_freq_1ghz();
55} 107}
56 108
57static const char *hsdk_compat[] __initconst = { 109static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47d3a1ab08d2..817e5cfef83a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -131,7 +131,7 @@ endif
131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm 131KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float 132KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
133 133
134CHECKFLAGS += -D__arm__ 134CHECKFLAGS += -D__arm__ -m32
135 135
136#Default value 136#Default value
137head-y := arch/arm/kernel/head$(MMUEXT).o 137head-y := arch/arm/kernel/head$(MMUEXT).o
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 5392ee63338f..8f6e37177de1 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -23,7 +23,11 @@ ENTRY(putc)
23 strb r0, [r1] 23 strb r0, [r1]
24 mov r0, #0x03 @ SYS_WRITEC 24 mov r0, #0x03 @ SYS_WRITEC
25 ARM( svc #0x123456 ) 25 ARM( svc #0x123456 )
26#ifdef CONFIG_CPU_V7M
27 THUMB( bkpt #0xab )
28#else
26 THUMB( svc #0xab ) 29 THUMB( svc #0xab )
30#endif
27 mov pc, lr 31 mov pc, lr
28 .align 2 32 .align 2
291: .word _GLOBAL_OFFSET_TABLE_ - . 331: .word _GLOBAL_OFFSET_TABLE_ - .
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 7d7ca054c557..e58fab8aec5d 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -36,6 +36,8 @@
36 phy1 = &usb1_phy; 36 phy1 = &usb1_phy;
37 ethernet0 = &cpsw_emac0; 37 ethernet0 = &cpsw_emac0;
38 ethernet1 = &cpsw_emac1; 38 ethernet1 = &cpsw_emac1;
39 spi0 = &spi0;
40 spi1 = &spi1;
39 }; 41 };
40 42
41 cpus { 43 cpus {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 9d276af7c539..081fa68b6f98 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -388,6 +388,7 @@
388 pinctrl-0 = <&cpsw_default>; 388 pinctrl-0 = <&cpsw_default>;
389 pinctrl-1 = <&cpsw_sleep>; 389 pinctrl-1 = <&cpsw_sleep>;
390 status = "okay"; 390 status = "okay";
391 slaves = <1>;
391}; 392};
392 393
393&davinci_mdio { 394&davinci_mdio {
@@ -402,11 +403,6 @@
402 phy-mode = "rmii"; 403 phy-mode = "rmii";
403}; 404};
404 405
405&cpsw_emac1 {
406 phy_id = <&davinci_mdio>, <1>;
407 phy-mode = "rmii";
408};
409
410&phy_sel { 406&phy_sel {
411 rmii-clock-ext; 407 rmii-clock-ext;
412}; 408};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 7ff0811e61db..4960722aab32 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -178,7 +178,7 @@
178 }; 178 };
179 179
180 i2c0: i2c@11000 { 180 i2c0: i2c@11000 {
181 compatible = "marvell,mv64xxx-i2c"; 181 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
182 reg = <0x11000 0x20>; 182 reg = <0x11000 0x20>;
183 #address-cells = <1>; 183 #address-cells = <1>;
184 #size-cells = <0>; 184 #size-cells = <0>;
@@ -189,7 +189,7 @@
189 }; 189 };
190 190
191 i2c1: i2c@11100 { 191 i2c1: i2c@11100 {
192 compatible = "marvell,mv64xxx-i2c"; 192 compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
193 reg = <0x11100 0x20>; 193 reg = <0x11100 0x20>;
194 #address-cells = <1>; 194 #address-cells = <1>;
195 #size-cells = <0>; 195 #size-cells = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 63a5af898165..cf0087b4c9e1 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -67,8 +67,8 @@
67 pinctrl-0 = <&pinctrl_macb0_default>; 67 pinctrl-0 = <&pinctrl_macb0_default>;
68 phy-mode = "rmii"; 68 phy-mode = "rmii";
69 69
70 ethernet-phy@1 { 70 ethernet-phy@0 {
71 reg = <0x1>; 71 reg = <0x0>;
72 interrupt-parent = <&pioA>; 72 interrupt-parent = <&pioA>;
73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; 73 interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
74 pinctrl-names = "default"; 74 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 9c9088c99cc4..60cb084a8d92 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -67,7 +67,10 @@
67 67
68 usb1: ohci@00400000 { 68 usb1: ohci@00400000 {
69 num-ports = <3>; 69 num-ports = <3>;
70 atmel,vbus-gpio = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; 70 atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */
71 &pioA PIN_PA27 GPIO_ACTIVE_HIGH
72 0
73 >;
71 pinctrl-names = "default"; 74 pinctrl-names = "default";
72 pinctrl-0 = <&pinctrl_usb_default>; 75 pinctrl-0 = <&pinctrl_usb_default>;
73 status = "okay"; 76 status = "okay";
@@ -120,7 +123,7 @@
120 pinctrl-names = "default"; 123 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_mikrobus2_uart>; 124 pinctrl-0 = <&pinctrl_mikrobus2_uart>;
122 atmel,use-dma-rx; 125 atmel,use-dma-rx;
123 atmel-use-dma-tx; 126 atmel,use-dma-tx;
124 status = "okay"; 127 status = "okay";
125 }; 128 };
126 129
@@ -178,7 +181,7 @@
178 uart4: serial@fc00c000 { 181 uart4: serial@fc00c000 {
179 atmel,use-dma-rx; 182 atmel,use-dma-rx;
180 atmel,use-dma-tx; 183 atmel,use-dma-tx;
181 pinctrl-name = "default"; 184 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_mikrobus1_uart>; 185 pinctrl-0 = <&pinctrl_mikrobus1_uart>;
183 status = "okay"; 186 status = "okay";
184 }; 187 };
@@ -330,7 +333,7 @@
330 }; 333 };
331 334
332 pinctrl_led_gpio_default: led_gpio_default { 335 pinctrl_led_gpio_default: led_gpio_default {
333 pinmux = <PIN_PA27__GPIO>, 336 pinmux = <PIN_PA10__GPIO>,
334 <PIN_PB1__GPIO>, 337 <PIN_PB1__GPIO>,
335 <PIN_PA31__GPIO>; 338 <PIN_PA31__GPIO>;
336 bias-pull-up; 339 bias-pull-up;
@@ -396,7 +399,7 @@
396 }; 399 };
397 400
398 pinctrl_usb_default: usb_default { 401 pinctrl_usb_default: usb_default {
399 pinmux = <PIN_PA10__GPIO>, 402 pinmux = <PIN_PA27__GPIO>,
400 <PIN_PD19__GPIO>; 403 <PIN_PD19__GPIO>;
401 bias-disable; 404 bias-disable;
402 }; 405 };
@@ -520,17 +523,17 @@
520 523
521 red { 524 red {
522 label = "red"; 525 label = "red";
523 gpios = <&pioA PIN_PA27 GPIO_ACTIVE_LOW>; 526 gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>;
524 }; 527 };
525 528
526 green { 529 green {
527 label = "green"; 530 label = "green";
528 gpios = <&pioA PIN_PB1 GPIO_ACTIVE_LOW>; 531 gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>;
529 }; 532 };
530 533
531 blue { 534 blue {
532 label = "blue"; 535 label = "blue";
533 gpios = <&pioA PIN_PA31 GPIO_ACTIVE_LOW>; 536 gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>;
534 linux,default-trigger = "heartbeat"; 537 linux,default-trigger = "heartbeat";
535 }; 538 };
536 }; 539 };
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index c7e9ccf2bc87..cbc26001247b 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -309,7 +309,7 @@
309 vddana-supply = <&vdd_3v3_lp_reg>; 309 vddana-supply = <&vdd_3v3_lp_reg>;
310 vref-supply = <&vdd_3v3_lp_reg>; 310 vref-supply = <&vdd_3v3_lp_reg>;
311 pinctrl-names = "default"; 311 pinctrl-names = "default";
312 pinctrl-0 = <&pinctrl_adc_default>; 312 pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
313 status = "okay"; 313 status = "okay";
314 }; 314 };
315 315
@@ -340,6 +340,20 @@
340 bias-disable; 340 bias-disable;
341 }; 341 };
342 342
343 /*
344 * The ADTRG pin can work on any edge type.
345 * In here it's being pulled up, so need to
346 * connect it to ground to get an edge e.g.
347 * Trigger can be configured on falling, rise
348 * or any edge, and the pull-up can be changed
349 * to pull-down or left floating according to
350 * needs.
351 */
352 pinctrl_adtrg_default: adtrg_default {
353 pinmux = <PIN_PD31__ADTRG>;
354 bias-pull-up;
355 };
356
343 pinctrl_charger_chglev: charger_chglev { 357 pinctrl_charger_chglev: charger_chglev {
344 pinmux = <PIN_PA12__GPIO>; 358 pinmux = <PIN_PA12__GPIO>;
345 bias-disable; 359 bias-disable;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 82651c3eb682..b8565fc33eea 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -18,12 +18,9 @@
18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; 18 compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
19 model = "Raspberry Pi Zero W"; 19 model = "Raspberry Pi Zero W";
20 20
21 /* Needed by firmware to properly init UARTs */ 21 chosen {
22 aliases { 22 /* 8250 auxiliary UART instead of pl011 */
23 uart0 = "/soc/serial@7e201000"; 23 stdout-path = "serial1:115200n8";
24 uart1 = "/soc/serial@7e215040";
25 serial0 = "/soc/serial@7e201000";
26 serial1 = "/soc/serial@7e215040";
27 }; 24 };
28 25
29 leds { 26 leds {
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 20725ca487f3..c71a0d73d2a2 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -8,6 +8,11 @@
8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; 8 compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
9 model = "Raspberry Pi 3 Model B"; 9 model = "Raspberry Pi 3 Model B";
10 10
11 chosen {
12 /* 8250 auxiliary UART instead of pl011 */
13 stdout-path = "serial1:115200n8";
14 };
15
11 memory { 16 memory {
12 reg = <0 0x40000000>; 17 reg = <0 0x40000000>;
13 }; 18 };
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 431dcfc900c0..013431e3d7c3 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -20,8 +20,13 @@
20 #address-cells = <1>; 20 #address-cells = <1>;
21 #size-cells = <1>; 21 #size-cells = <1>;
22 22
23 aliases {
24 serial0 = &uart0;
25 serial1 = &uart1;
26 };
27
23 chosen { 28 chosen {
24 bootargs = "earlyprintk console=ttyAMA0"; 29 stdout-path = "serial0:115200n8";
25 }; 30 };
26 31
27 thermal-zones { 32 thermal-zones {
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 67e72bc72e80..c75507922f7d 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -15,6 +15,13 @@
15 compatible = "ti,da850-evm", "ti,da850"; 15 compatible = "ti,da850-evm", "ti,da850";
16 model = "DA850/AM1808/OMAP-L138 EVM"; 16 model = "DA850/AM1808/OMAP-L138 EVM";
17 17
18 aliases {
19 serial0 = &serial0;
20 serial1 = &serial1;
21 serial2 = &serial2;
22 ethernet0 = &eth0;
23 };
24
18 soc@1c00000 { 25 soc@1c00000 {
19 pmx_core: pinmux@14120 { 26 pmx_core: pinmux@14120 {
20 status = "okay"; 27 status = "okay";
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index cf229dfabf61..e62b62875cba 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -1817,6 +1817,8 @@
1817 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; 1817 clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>;
1818 ti,bit-shift = <24>; 1818 ti,bit-shift = <24>;
1819 reg = <0x1868>; 1819 reg = <0x1868>;
1820 assigned-clocks = <&mcasp3_ahclkx_mux>;
1821 assigned-clock-parents = <&abe_24m_fclk>;
1820 }; 1822 };
1821 1823
1822 mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { 1824 mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 {
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index c68e8d430234..f0d178c77153 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -145,11 +145,12 @@
145 }; 145 };
146 146
147 watchdog@41000000 { 147 watchdog@41000000 {
148 compatible = "cortina,gemini-watchdog"; 148 compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
149 reg = <0x41000000 0x1000>; 149 reg = <0x41000000 0x1000>;
150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; 150 interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
151 resets = <&syscon GEMINI_RESET_WDOG>; 151 resets = <&syscon GEMINI_RESET_WDOG>;
152 clocks = <&syscon GEMINI_CLK_APB>; 152 clocks = <&syscon GEMINI_CLK_APB>;
153 clock-names = "PCLK";
153 }; 154 };
154 155
155 uart0: serial@42000000 { 156 uart0: serial@42000000 {
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index f46814a7ea44..4d308d17f040 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -144,10 +144,10 @@
144 interrupt-names = "msi"; 144 interrupt-names = "msi";
145 #interrupt-cells = <1>; 145 #interrupt-cells = <1>;
146 interrupt-map-mask = <0 0 0 0x7>; 146 interrupt-map-mask = <0 0 0 0x7>;
147 interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, 147 interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
148 <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, 148 <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
149 <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 149 <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
150 <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; 150 <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, 151 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, 152 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>; 153 <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1f4c795d3f72..da7b3237bfe9 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -87,9 +87,10 @@
87 }; 87 };
88 88
89 watchdog: watchdog@98500000 { 89 watchdog: watchdog@98500000 {
90 compatible = "moxa,moxart-watchdog"; 90 compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
91 reg = <0x98500000 0x10>; 91 reg = <0x98500000 0x10>;
92 clocks = <&clk_apb>; 92 clocks = <&clk_apb>;
93 clock-names = "PCLK";
93 }; 94 };
94 95
95 sdhci: sdhci@98e00000 { 96 sdhci: sdhci@98e00000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 26c20e1167b9..4acd32a1c4ef 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -144,15 +144,6 @@
144 io-channel-names = "temp", "bsi", "vbat"; 144 io-channel-names = "temp", "bsi", "vbat";
145 }; 145 };
146 146
147 rear_camera: camera@0 {
148 compatible = "linux,camera";
149
150 module {
151 model = "TCM8341MD";
152 sensor = <&cam1>;
153 };
154 };
155
156 pwm9: dmtimer-pwm { 147 pwm9: dmtimer-pwm {
157 compatible = "ti,omap-dmtimer-pwm"; 148 compatible = "ti,omap-dmtimer-pwm";
158 #pwm-cells = <3>; 149 #pwm-cells = <3>;
@@ -189,10 +180,8 @@
189 clock-lanes = <1>; 180 clock-lanes = <1>;
190 data-lanes = <0>; 181 data-lanes = <0>;
191 lane-polarity = <0 0>; 182 lane-polarity = <0 0>;
192 clock-inv = <0>;
193 /* Select strobe = <1> for back camera, <0> for front camera */ 183 /* Select strobe = <1> for back camera, <0> for front camera */
194 strobe = <1>; 184 strobe = <1>;
195 crc = <0>;
196 }; 185 };
197 }; 186 };
198 }; 187 };
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index cb47ae79a5f9..1b0bd72945f2 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -267,15 +267,19 @@
267 clock-frequency = <400000>; 267 clock-frequency = <400000>;
268 268
269 as3645a@30 { 269 as3645a@30 {
270 #address-cells = <1>;
271 #size-cells = <0>;
270 reg = <0x30>; 272 reg = <0x30>;
271 compatible = "ams,as3645a"; 273 compatible = "ams,as3645a";
272 flash { 274 flash@0 {
275 reg = <0x0>;
273 flash-timeout-us = <150000>; 276 flash-timeout-us = <150000>;
274 flash-max-microamp = <320000>; 277 flash-max-microamp = <320000>;
275 led-max-microamp = <60000>; 278 led-max-microamp = <60000>;
276 peak-current-limit = <1750000>; 279 ams,input-max-microamp = <1750000>;
277 }; 280 };
278 indicator { 281 indicator@1 {
282 reg = <0x1>;
279 led-max-microamp = <10000>; 283 led-max-microamp = <10000>;
280 }; 284 };
281 }; 285 };
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 38d2216c7ead..b1a26b42d190 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1430,6 +1430,7 @@
1430 atmel,min-sample-rate-hz = <200000>; 1430 atmel,min-sample-rate-hz = <200000>;
1431 atmel,max-sample-rate-hz = <20000000>; 1431 atmel,max-sample-rate-hz = <20000000>;
1432 atmel,startup-time-ms = <4>; 1432 atmel,startup-time-ms = <4>;
1433 atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
1433 status = "disabled"; 1434 status = "disabled";
1434 }; 1435 };
1435 1436
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 97b1c2321ba9..293ecb957227 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -47,6 +47,7 @@
47 47
48/dts-v1/; 48/dts-v1/;
49#include "stm32f429.dtsi" 49#include "stm32f429.dtsi"
50#include "stm32f429-pinctrl.dtsi"
50#include <dt-bindings/input/input.h> 51#include <dt-bindings/input/input.h>
51#include <dt-bindings/gpio/gpio.h> 52#include <dt-bindings/gpio/gpio.h>
52 53
@@ -202,10 +203,8 @@
202 stmpe1600: stmpe1600@42 { 203 stmpe1600: stmpe1600@42 {
203 compatible = "st,stmpe1600"; 204 compatible = "st,stmpe1600";
204 reg = <0x42>; 205 reg = <0x42>;
205 irq-gpio = <&gpioi 8 0>;
206 irq-trigger = <3>;
207 interrupts = <8 3>; 206 interrupts = <8 3>;
208 interrupt-parent = <&exti>; 207 interrupt-parent = <&gpioi>;
209 interrupt-controller; 208 interrupt-controller;
210 wakeup-source; 209 wakeup-source;
211 210
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
new file mode 100644
index 000000000000..7f3560c0211d
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi
@@ -0,0 +1,343 @@
1/*
2 * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
3 *
4 * This file is dual-licensed: you can use it either under the terms
5 * of the GPL or the X11 license, at your option. Note that this dual
6 * licensing only applies to this file, and not this project as a
7 * whole.
8 *
9 * a) This file is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This file is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * Or, alternatively,
20 *
21 * b) Permission is hereby granted, free of charge, to any person
22 * obtaining a copy of this software and associated documentation
23 * files (the "Software"), to deal in the Software without
24 * restriction, including without limitation the rights to use,
25 * copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the
27 * Software is furnished to do so, subject to the following
28 * conditions:
29 *
30 * The above copyright notice and this permission notice shall be
31 * included in all copies or substantial portions of the Software.
32 *
33 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
35 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
36 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
37 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
38 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
39 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
40 * OTHER DEALINGS IN THE SOFTWARE.
41 */
42
43#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
44#include <dt-bindings/mfd/stm32f4-rcc.h>
45
46/ {
47 soc {
48 pinctrl: pin-controller {
49 #address-cells = <1>;
50 #size-cells = <1>;
51 ranges = <0 0x40020000 0x3000>;
52 interrupt-parent = <&exti>;
53 st,syscfg = <&syscfg 0x8>;
54 pins-are-numbered;
55
56 gpioa: gpio@40020000 {
57 gpio-controller;
58 #gpio-cells = <2>;
59 interrupt-controller;
60 #interrupt-cells = <2>;
61 reg = <0x0 0x400>;
62 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
63 st,bank-name = "GPIOA";
64 };
65
66 gpiob: gpio@40020400 {
67 gpio-controller;
68 #gpio-cells = <2>;
69 interrupt-controller;
70 #interrupt-cells = <2>;
71 reg = <0x400 0x400>;
72 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
73 st,bank-name = "GPIOB";
74 };
75
76 gpioc: gpio@40020800 {
77 gpio-controller;
78 #gpio-cells = <2>;
79 interrupt-controller;
80 #interrupt-cells = <2>;
81 reg = <0x800 0x400>;
82 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
83 st,bank-name = "GPIOC";
84 };
85
86 gpiod: gpio@40020c00 {
87 gpio-controller;
88 #gpio-cells = <2>;
89 interrupt-controller;
90 #interrupt-cells = <2>;
91 reg = <0xc00 0x400>;
92 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
93 st,bank-name = "GPIOD";
94 };
95
96 gpioe: gpio@40021000 {
97 gpio-controller;
98 #gpio-cells = <2>;
99 interrupt-controller;
100 #interrupt-cells = <2>;
101 reg = <0x1000 0x400>;
102 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
103 st,bank-name = "GPIOE";
104 };
105
106 gpiof: gpio@40021400 {
107 gpio-controller;
108 #gpio-cells = <2>;
109 interrupt-controller;
110 #interrupt-cells = <2>;
111 reg = <0x1400 0x400>;
112 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
113 st,bank-name = "GPIOF";
114 };
115
116 gpiog: gpio@40021800 {
117 gpio-controller;
118 #gpio-cells = <2>;
119 interrupt-controller;
120 #interrupt-cells = <2>;
121 reg = <0x1800 0x400>;
122 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
123 st,bank-name = "GPIOG";
124 };
125
126 gpioh: gpio@40021c00 {
127 gpio-controller;
128 #gpio-cells = <2>;
129 interrupt-controller;
130 #interrupt-cells = <2>;
131 reg = <0x1c00 0x400>;
132 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
133 st,bank-name = "GPIOH";
134 };
135
136 gpioi: gpio@40022000 {
137 gpio-controller;
138 #gpio-cells = <2>;
139 interrupt-controller;
140 #interrupt-cells = <2>;
141 reg = <0x2000 0x400>;
142 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
143 st,bank-name = "GPIOI";
144 };
145
146 gpioj: gpio@40022400 {
147 gpio-controller;
148 #gpio-cells = <2>;
149 interrupt-controller;
150 #interrupt-cells = <2>;
151 reg = <0x2400 0x400>;
152 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
153 st,bank-name = "GPIOJ";
154 };
155
156 gpiok: gpio@40022800 {
157 gpio-controller;
158 #gpio-cells = <2>;
159 interrupt-controller;
160 #interrupt-cells = <2>;
161 reg = <0x2800 0x400>;
162 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
163 st,bank-name = "GPIOK";
164 };
165
166 usart1_pins_a: usart1@0 {
167 pins1 {
168 pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
169 bias-disable;
170 drive-push-pull;
171 slew-rate = <0>;
172 };
173 pins2 {
174 pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
175 bias-disable;
176 };
177 };
178
179 usart3_pins_a: usart3@0 {
180 pins1 {
181 pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
182 bias-disable;
183 drive-push-pull;
184 slew-rate = <0>;
185 };
186 pins2 {
187 pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
188 bias-disable;
189 };
190 };
191
192 usbotg_fs_pins_a: usbotg_fs@0 {
193 pins {
194 pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
195 <STM32F429_PA11_FUNC_OTG_FS_DM>,
196 <STM32F429_PA12_FUNC_OTG_FS_DP>;
197 bias-disable;
198 drive-push-pull;
199 slew-rate = <2>;
200 };
201 };
202
203 usbotg_fs_pins_b: usbotg_fs@1 {
204 pins {
205 pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
206 <STM32F429_PB14_FUNC_OTG_HS_DM>,
207 <STM32F429_PB15_FUNC_OTG_HS_DP>;
208 bias-disable;
209 drive-push-pull;
210 slew-rate = <2>;
211 };
212 };
213
214 usbotg_hs_pins_a: usbotg_hs@0 {
215 pins {
216 pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
217 <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
218 <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
219 <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
220 <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
221 <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
222 <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
223 <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
224 <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
225 <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
226 <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
227 <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
228 bias-disable;
229 drive-push-pull;
230 slew-rate = <2>;
231 };
232 };
233
234 ethernet_mii: mii@0 {
235 pins {
236 pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
237 <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
238 <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
239 <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
240 <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
241 <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
242 <STM32F429_PA2_FUNC_ETH_MDIO>,
243 <STM32F429_PC1_FUNC_ETH_MDC>,
244 <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
245 <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
246 <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
247 <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
248 <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
249 <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
250 slew-rate = <2>;
251 };
252 };
253
254 adc3_in8_pin: adc@200 {
255 pins {
256 pinmux = <STM32F429_PF10_FUNC_ANALOG>;
257 };
258 };
259
260 pwm1_pins: pwm@1 {
261 pins {
262 pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
263 <STM32F429_PB13_FUNC_TIM1_CH1N>,
264 <STM32F429_PB12_FUNC_TIM1_BKIN>;
265 };
266 };
267
268 pwm3_pins: pwm@3 {
269 pins {
270 pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
271 <STM32F429_PB5_FUNC_TIM3_CH2>;
272 };
273 };
274
275 i2c1_pins: i2c1@0 {
276 pins {
277 pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
278 <STM32F429_PB6_FUNC_I2C1_SCL>;
279 bias-disable;
280 drive-open-drain;
281 slew-rate = <3>;
282 };
283 };
284
285 ltdc_pins: ltdc@0 {
286 pins {
287 pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
288 <STM32F429_PI13_FUNC_LCD_VSYNC>,
289 <STM32F429_PI14_FUNC_LCD_CLK>,
290 <STM32F429_PI15_FUNC_LCD_R0>,
291 <STM32F429_PJ0_FUNC_LCD_R1>,
292 <STM32F429_PJ1_FUNC_LCD_R2>,
293 <STM32F429_PJ2_FUNC_LCD_R3>,
294 <STM32F429_PJ3_FUNC_LCD_R4>,
295 <STM32F429_PJ4_FUNC_LCD_R5>,
296 <STM32F429_PJ5_FUNC_LCD_R6>,
297 <STM32F429_PJ6_FUNC_LCD_R7>,
298 <STM32F429_PJ7_FUNC_LCD_G0>,
299 <STM32F429_PJ8_FUNC_LCD_G1>,
300 <STM32F429_PJ9_FUNC_LCD_G2>,
301 <STM32F429_PJ10_FUNC_LCD_G3>,
302 <STM32F429_PJ11_FUNC_LCD_G4>,
303 <STM32F429_PJ12_FUNC_LCD_B0>,
304 <STM32F429_PJ13_FUNC_LCD_B1>,
305 <STM32F429_PJ14_FUNC_LCD_B2>,
306 <STM32F429_PJ15_FUNC_LCD_B3>,
307 <STM32F429_PK0_FUNC_LCD_G5>,
308 <STM32F429_PK1_FUNC_LCD_G6>,
309 <STM32F429_PK2_FUNC_LCD_G7>,
310 <STM32F429_PK3_FUNC_LCD_B4>,
311 <STM32F429_PK4_FUNC_LCD_B5>,
312 <STM32F429_PK5_FUNC_LCD_B6>,
313 <STM32F429_PK6_FUNC_LCD_B7>,
314 <STM32F429_PK7_FUNC_LCD_DE>;
315 slew-rate = <2>;
316 };
317 };
318
319 dcmi_pins: dcmi@0 {
320 pins {
321 pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
322 <STM32F429_PB7_FUNC_DCMI_VSYNC>,
323 <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
324 <STM32F429_PC6_FUNC_DCMI_D0>,
325 <STM32F429_PC7_FUNC_DCMI_D1>,
326 <STM32F429_PC8_FUNC_DCMI_D2>,
327 <STM32F429_PC9_FUNC_DCMI_D3>,
328 <STM32F429_PC11_FUNC_DCMI_D4>,
329 <STM32F429_PD3_FUNC_DCMI_D5>,
330 <STM32F429_PB8_FUNC_DCMI_D6>,
331 <STM32F429_PE6_FUNC_DCMI_D7>,
332 <STM32F429_PC10_FUNC_DCMI_D8>,
333 <STM32F429_PC12_FUNC_DCMI_D9>,
334 <STM32F429_PD6_FUNC_DCMI_D10>,
335 <STM32F429_PD2_FUNC_DCMI_D11>;
336 bias-disable;
337 drive-push-pull;
338 slew-rate = <3>;
339 };
340 };
341 };
342 };
343};
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index c66d617e4245..5ceb2cf3777f 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -47,6 +47,7 @@
47 47
48/dts-v1/; 48/dts-v1/;
49#include "stm32f429.dtsi" 49#include "stm32f429.dtsi"
50#include "stm32f429-pinctrl.dtsi"
50#include <dt-bindings/input/input.h> 51#include <dt-bindings/input/input.h>
51 52
52/ { 53/ {
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
new file mode 100644
index 000000000000..3e7a17d9112e
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
3 *
4 * This file is dual-licensed: you can use it either under the terms
5 * of the GPL or the X11 license, at your option. Note that this dual
6 * licensing only applies to this file, and not this project as a
7 * whole.
8 *
9 * a) This file is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This file is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * Or, alternatively,
20 *
21 * b) Permission is hereby granted, free of charge, to any person
22 * obtaining a copy of this software and associated documentation
23 * files (the "Software"), to deal in the Software without
24 * restriction, including without limitation the rights to use,
25 * copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the
27 * Software is furnished to do so, subject to the following
28 * conditions:
29 *
30 * The above copyright notice and this permission notice shall be
31 * included in all copies or substantial portions of the Software.
32 *
33 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
35 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
36 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
37 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
38 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
39 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
40 * OTHER DEALINGS IN THE SOFTWARE.
41 */
42
43#include "stm32f4-pinctrl.dtsi"
44
45/ {
46 soc {
47 pinctrl: pin-controller {
48 compatible = "st,stm32f429-pinctrl";
49
50 gpioa: gpio@40020000 {
51 gpio-ranges = <&pinctrl 0 0 16>;
52 };
53
54 gpiob: gpio@40020400 {
55 gpio-ranges = <&pinctrl 0 16 16>;
56 };
57
58 gpioc: gpio@40020800 {
59 gpio-ranges = <&pinctrl 0 32 16>;
60 };
61
62 gpiod: gpio@40020c00 {
63 gpio-ranges = <&pinctrl 0 48 16>;
64 };
65
66 gpioe: gpio@40021000 {
67 gpio-ranges = <&pinctrl 0 64 16>;
68 };
69
70 gpiof: gpio@40021400 {
71 gpio-ranges = <&pinctrl 0 80 16>;
72 };
73
74 gpiog: gpio@40021800 {
75 gpio-ranges = <&pinctrl 0 96 16>;
76 };
77
78 gpioh: gpio@40021c00 {
79 gpio-ranges = <&pinctrl 0 112 16>;
80 };
81
82 gpioi: gpio@40022000 {
83 gpio-ranges = <&pinctrl 0 128 16>;
84 };
85
86 gpioj: gpio@40022400 {
87 gpio-ranges = <&pinctrl 0 144 16>;
88 };
89
90 gpiok: gpio@40022800 {
91 gpio-ranges = <&pinctrl 0 160 8>;
92 };
93 };
94 };
95};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index dd7e99b1f43b..5b36eb114ddc 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -47,7 +47,6 @@
47 47
48#include "skeleton.dtsi" 48#include "skeleton.dtsi"
49#include "armv7-m.dtsi" 49#include "armv7-m.dtsi"
50#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
51#include <dt-bindings/clock/stm32fx-clock.h> 50#include <dt-bindings/clock/stm32fx-clock.h>
52#include <dt-bindings/mfd/stm32f4-rcc.h> 51#include <dt-bindings/mfd/stm32f4-rcc.h>
53 52
@@ -591,302 +590,6 @@
591 status = "disabled"; 590 status = "disabled";
592 }; 591 };
593 592
594 pinctrl: pin-controller {
595 #address-cells = <1>;
596 #size-cells = <1>;
597 compatible = "st,stm32f429-pinctrl";
598 ranges = <0 0x40020000 0x3000>;
599 interrupt-parent = <&exti>;
600 st,syscfg = <&syscfg 0x8>;
601 pins-are-numbered;
602
603 gpioa: gpio@40020000 {
604 gpio-controller;
605 #gpio-cells = <2>;
606 interrupt-controller;
607 #interrupt-cells = <2>;
608 reg = <0x0 0x400>;
609 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>;
610 st,bank-name = "GPIOA";
611 };
612
613 gpiob: gpio@40020400 {
614 gpio-controller;
615 #gpio-cells = <2>;
616 interrupt-controller;
617 #interrupt-cells = <2>;
618 reg = <0x400 0x400>;
619 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>;
620 st,bank-name = "GPIOB";
621 };
622
623 gpioc: gpio@40020800 {
624 gpio-controller;
625 #gpio-cells = <2>;
626 interrupt-controller;
627 #interrupt-cells = <2>;
628 reg = <0x800 0x400>;
629 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>;
630 st,bank-name = "GPIOC";
631 };
632
633 gpiod: gpio@40020c00 {
634 gpio-controller;
635 #gpio-cells = <2>;
636 interrupt-controller;
637 #interrupt-cells = <2>;
638 reg = <0xc00 0x400>;
639 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>;
640 st,bank-name = "GPIOD";
641 };
642
643 gpioe: gpio@40021000 {
644 gpio-controller;
645 #gpio-cells = <2>;
646 interrupt-controller;
647 #interrupt-cells = <2>;
648 reg = <0x1000 0x400>;
649 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>;
650 st,bank-name = "GPIOE";
651 };
652
653 gpiof: gpio@40021400 {
654 gpio-controller;
655 #gpio-cells = <2>;
656 interrupt-controller;
657 #interrupt-cells = <2>;
658 reg = <0x1400 0x400>;
659 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>;
660 st,bank-name = "GPIOF";
661 };
662
663 gpiog: gpio@40021800 {
664 gpio-controller;
665 #gpio-cells = <2>;
666 interrupt-controller;
667 #interrupt-cells = <2>;
668 reg = <0x1800 0x400>;
669 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>;
670 st,bank-name = "GPIOG";
671 };
672
673 gpioh: gpio@40021c00 {
674 gpio-controller;
675 #gpio-cells = <2>;
676 interrupt-controller;
677 #interrupt-cells = <2>;
678 reg = <0x1c00 0x400>;
679 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>;
680 st,bank-name = "GPIOH";
681 };
682
683 gpioi: gpio@40022000 {
684 gpio-controller;
685 #gpio-cells = <2>;
686 interrupt-controller;
687 #interrupt-cells = <2>;
688 reg = <0x2000 0x400>;
689 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>;
690 st,bank-name = "GPIOI";
691 };
692
693 gpioj: gpio@40022400 {
694 gpio-controller;
695 #gpio-cells = <2>;
696 interrupt-controller;
697 #interrupt-cells = <2>;
698 reg = <0x2400 0x400>;
699 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>;
700 st,bank-name = "GPIOJ";
701 };
702
703 gpiok: gpio@40022800 {
704 gpio-controller;
705 #gpio-cells = <2>;
706 interrupt-controller;
707 #interrupt-cells = <2>;
708 reg = <0x2800 0x400>;
709 clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>;
710 st,bank-name = "GPIOK";
711 };
712
713 usart1_pins_a: usart1@0 {
714 pins1 {
715 pinmux = <STM32F429_PA9_FUNC_USART1_TX>;
716 bias-disable;
717 drive-push-pull;
718 slew-rate = <0>;
719 };
720 pins2 {
721 pinmux = <STM32F429_PA10_FUNC_USART1_RX>;
722 bias-disable;
723 };
724 };
725
726 usart3_pins_a: usart3@0 {
727 pins1 {
728 pinmux = <STM32F429_PB10_FUNC_USART3_TX>;
729 bias-disable;
730 drive-push-pull;
731 slew-rate = <0>;
732 };
733 pins2 {
734 pinmux = <STM32F429_PB11_FUNC_USART3_RX>;
735 bias-disable;
736 };
737 };
738
739 usbotg_fs_pins_a: usbotg_fs@0 {
740 pins {
741 pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>,
742 <STM32F429_PA11_FUNC_OTG_FS_DM>,
743 <STM32F429_PA12_FUNC_OTG_FS_DP>;
744 bias-disable;
745 drive-push-pull;
746 slew-rate = <2>;
747 };
748 };
749
750 usbotg_fs_pins_b: usbotg_fs@1 {
751 pins {
752 pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>,
753 <STM32F429_PB14_FUNC_OTG_HS_DM>,
754 <STM32F429_PB15_FUNC_OTG_HS_DP>;
755 bias-disable;
756 drive-push-pull;
757 slew-rate = <2>;
758 };
759 };
760
761 usbotg_hs_pins_a: usbotg_hs@0 {
762 pins {
763 pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>,
764 <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>,
765 <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>,
766 <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>,
767 <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>,
768 <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>,
769 <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>,
770 <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>,
771 <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>,
772 <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>,
773 <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>,
774 <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>;
775 bias-disable;
776 drive-push-pull;
777 slew-rate = <2>;
778 };
779 };
780
781 ethernet_mii: mii@0 {
782 pins {
783 pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
784 <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
785 <STM32F429_PC2_FUNC_ETH_MII_TXD2>,
786 <STM32F429_PB8_FUNC_ETH_MII_TXD3>,
787 <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>,
788 <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>,
789 <STM32F429_PA2_FUNC_ETH_MDIO>,
790 <STM32F429_PC1_FUNC_ETH_MDC>,
791 <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>,
792 <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>,
793 <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>,
794 <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>,
795 <STM32F429_PH6_FUNC_ETH_MII_RXD2>,
796 <STM32F429_PH7_FUNC_ETH_MII_RXD3>;
797 slew-rate = <2>;
798 };
799 };
800
801 adc3_in8_pin: adc@200 {
802 pins {
803 pinmux = <STM32F429_PF10_FUNC_ANALOG>;
804 };
805 };
806
807 pwm1_pins: pwm@1 {
808 pins {
809 pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>,
810 <STM32F429_PB13_FUNC_TIM1_CH1N>,
811 <STM32F429_PB12_FUNC_TIM1_BKIN>;
812 };
813 };
814
815 pwm3_pins: pwm@3 {
816 pins {
817 pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>,
818 <STM32F429_PB5_FUNC_TIM3_CH2>;
819 };
820 };
821
822 i2c1_pins: i2c1@0 {
823 pins {
824 pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>,
825 <STM32F429_PB6_FUNC_I2C1_SCL>;
826 bias-disable;
827 drive-open-drain;
828 slew-rate = <3>;
829 };
830 };
831
832 ltdc_pins: ltdc@0 {
833 pins {
834 pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>,
835 <STM32F429_PI13_FUNC_LCD_VSYNC>,
836 <STM32F429_PI14_FUNC_LCD_CLK>,
837 <STM32F429_PI15_FUNC_LCD_R0>,
838 <STM32F429_PJ0_FUNC_LCD_R1>,
839 <STM32F429_PJ1_FUNC_LCD_R2>,
840 <STM32F429_PJ2_FUNC_LCD_R3>,
841 <STM32F429_PJ3_FUNC_LCD_R4>,
842 <STM32F429_PJ4_FUNC_LCD_R5>,
843 <STM32F429_PJ5_FUNC_LCD_R6>,
844 <STM32F429_PJ6_FUNC_LCD_R7>,
845 <STM32F429_PJ7_FUNC_LCD_G0>,
846 <STM32F429_PJ8_FUNC_LCD_G1>,
847 <STM32F429_PJ9_FUNC_LCD_G2>,
848 <STM32F429_PJ10_FUNC_LCD_G3>,
849 <STM32F429_PJ11_FUNC_LCD_G4>,
850 <STM32F429_PJ12_FUNC_LCD_B0>,
851 <STM32F429_PJ13_FUNC_LCD_B1>,
852 <STM32F429_PJ14_FUNC_LCD_B2>,
853 <STM32F429_PJ15_FUNC_LCD_B3>,
854 <STM32F429_PK0_FUNC_LCD_G5>,
855 <STM32F429_PK1_FUNC_LCD_G6>,
856 <STM32F429_PK2_FUNC_LCD_G7>,
857 <STM32F429_PK3_FUNC_LCD_B4>,
858 <STM32F429_PK4_FUNC_LCD_B5>,
859 <STM32F429_PK5_FUNC_LCD_B6>,
860 <STM32F429_PK6_FUNC_LCD_B7>,
861 <STM32F429_PK7_FUNC_LCD_DE>;
862 slew-rate = <2>;
863 };
864 };
865
866 dcmi_pins: dcmi@0 {
867 pins {
868 pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>,
869 <STM32F429_PB7_FUNC_DCMI_VSYNC>,
870 <STM32F429_PA6_FUNC_DCMI_PIXCLK>,
871 <STM32F429_PC6_FUNC_DCMI_D0>,
872 <STM32F429_PC7_FUNC_DCMI_D1>,
873 <STM32F429_PC8_FUNC_DCMI_D2>,
874 <STM32F429_PC9_FUNC_DCMI_D3>,
875 <STM32F429_PC11_FUNC_DCMI_D4>,
876 <STM32F429_PD3_FUNC_DCMI_D5>,
877 <STM32F429_PB8_FUNC_DCMI_D6>,
878 <STM32F429_PE6_FUNC_DCMI_D7>,
879 <STM32F429_PC10_FUNC_DCMI_D8>,
880 <STM32F429_PC12_FUNC_DCMI_D9>,
881 <STM32F429_PD6_FUNC_DCMI_D10>,
882 <STM32F429_PD2_FUNC_DCMI_D11>;
883 bias-disable;
884 drive-push-pull;
885 slew-rate = <3>;
886 };
887 };
888 };
889
890 crc: crc@40023000 { 593 crc: crc@40023000 {
891 compatible = "st,stm32f4-crc"; 594 compatible = "st,stm32f4-crc";
892 reg = <0x40023000 0x400>; 595 reg = <0x40023000 0x400>;
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index 6ae1f037f3f0..c18acbe4cf4e 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -47,6 +47,7 @@
47 47
48/dts-v1/; 48/dts-v1/;
49#include "stm32f429.dtsi" 49#include "stm32f429.dtsi"
50#include "stm32f469-pinctrl.dtsi"
50 51
51/ { 52/ {
52 model = "STMicroelectronics STM32F469i-DISCO board"; 53 model = "STMicroelectronics STM32F469i-DISCO board";
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
new file mode 100644
index 000000000000..fff542662eea
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
3 *
4 * This file is dual-licensed: you can use it either under the terms
5 * of the GPL or the X11 license, at your option. Note that this dual
6 * licensing only applies to this file, and not this project as a
7 * whole.
8 *
9 * a) This file is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This file is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * Or, alternatively,
20 *
21 * b) Permission is hereby granted, free of charge, to any person
22 * obtaining a copy of this software and associated documentation
23 * files (the "Software"), to deal in the Software without
24 * restriction, including without limitation the rights to use,
25 * copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the
27 * Software is furnished to do so, subject to the following
28 * conditions:
29 *
30 * The above copyright notice and this permission notice shall be
31 * included in all copies or substantial portions of the Software.
32 *
33 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
34 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
35 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
36 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
37 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
38 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
39 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
40 * OTHER DEALINGS IN THE SOFTWARE.
41 */
42
43#include "stm32f4-pinctrl.dtsi"
44
45/ {
46 soc {
47 pinctrl: pin-controller {
48 compatible = "st,stm32f469-pinctrl";
49
50 gpioa: gpio@40020000 {
51 gpio-ranges = <&pinctrl 0 0 16>;
52 };
53
54 gpiob: gpio@40020400 {
55 gpio-ranges = <&pinctrl 0 16 16>;
56 };
57
58 gpioc: gpio@40020800 {
59 gpio-ranges = <&pinctrl 0 32 16>;
60 };
61
62 gpiod: gpio@40020c00 {
63 gpio-ranges = <&pinctrl 0 48 16>;
64 };
65
66 gpioe: gpio@40021000 {
67 gpio-ranges = <&pinctrl 0 64 16>;
68 };
69
70 gpiof: gpio@40021400 {
71 gpio-ranges = <&pinctrl 0 80 16>;
72 };
73
74 gpiog: gpio@40021800 {
75 gpio-ranges = <&pinctrl 0 96 16>;
76 };
77
78 gpioh: gpio@40021c00 {
79 gpio-ranges = <&pinctrl 0 112 16>;
80 };
81
82 gpioi: gpio@40022000 {
83 gpio-ranges = <&pinctrl 0 128 16>;
84 };
85
86 gpioj: gpio@40022400 {
87 gpio-ranges = <&pinctrl 0 144 6>,
88 <&pinctrl 12 156 4>;
89 };
90
91 gpiok: gpio@40022800 {
92 gpio-ranges = <&pinctrl 3 163 5>;
93 };
94 };
95 };
96};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index b147cb0dc14b..eef072a21acc 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -311,8 +311,8 @@
311 #size-cells = <0>; 311 #size-cells = <0>;
312 reg = <0>; 312 reg = <0>;
313 313
314 tcon1_in_drc1: endpoint@0 { 314 tcon1_in_drc1: endpoint@1 {
315 reg = <0>; 315 reg = <1>;
316 remote-endpoint = <&drc1_out_tcon1>; 316 remote-endpoint = <&drc1_out_tcon1>;
317 }; 317 };
318 }; 318 };
@@ -1012,8 +1012,8 @@
1012 #size-cells = <0>; 1012 #size-cells = <0>;
1013 reg = <1>; 1013 reg = <1>;
1014 1014
1015 be1_out_drc1: endpoint@0 { 1015 be1_out_drc1: endpoint@1 {
1016 reg = <0>; 1016 reg = <1>;
1017 remote-endpoint = <&drc1_in_be1>; 1017 remote-endpoint = <&drc1_in_be1>;
1018 }; 1018 };
1019 }; 1019 };
@@ -1042,8 +1042,8 @@
1042 #size-cells = <0>; 1042 #size-cells = <0>;
1043 reg = <0>; 1043 reg = <0>;
1044 1044
1045 drc1_in_be1: endpoint@0 { 1045 drc1_in_be1: endpoint@1 {
1046 reg = <0>; 1046 reg = <1>;
1047 remote-endpoint = <&be1_out_drc1>; 1047 remote-endpoint = <&be1_out_drc1>;
1048 }; 1048 };
1049 }; 1049 };
@@ -1053,8 +1053,8 @@
1053 #size-cells = <0>; 1053 #size-cells = <0>;
1054 reg = <1>; 1054 reg = <1>;
1055 1055
1056 drc1_out_tcon1: endpoint@0 { 1056 drc1_out_tcon1: endpoint@1 {
1057 reg = <0>; 1057 reg = <1>;
1058 remote-endpoint = <&tcon1_in_drc1>; 1058 remote-endpoint = <&tcon1_in_drc1>;
1059 }; 1059 };
1060 }; 1060 };
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index d2d75fa664a6..2a63fa10c813 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
32CONFIG_BLK_DEV_SD=y 32CONFIG_BLK_DEV_SD=y
33# CONFIG_SCSI_LOWLEVEL is not set 33# CONFIG_SCSI_LOWLEVEL is not set
34CONFIG_ATA=y 34CONFIG_ATA=y
35CONFIG_PATA_FTIDE010=y
35CONFIG_INPUT_EVDEV=y 36CONFIG_INPUT_EVDEV=y
36CONFIG_KEYBOARD_GPIO=y 37CONFIG_KEYBOARD_GPIO=y
37# CONFIG_INPUT_MOUSE is not set 38# CONFIG_INPUT_MOUSE is not set
@@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y
55CONFIG_LEDS_TRIGGERS=y 56CONFIG_LEDS_TRIGGERS=y
56CONFIG_LEDS_TRIGGER_HEARTBEAT=y 57CONFIG_LEDS_TRIGGER_HEARTBEAT=y
57CONFIG_RTC_CLASS=y 58CONFIG_RTC_CLASS=y
58CONFIG_RTC_DRV_GEMINI=y
59CONFIG_DMADEVICES=y 59CONFIG_DMADEVICES=y
60CONFIG_AMBA_PL08X=y
60# CONFIG_DNOTIFY is not set 61# CONFIG_DNOTIFY is not set
61CONFIG_TMPFS=y 62CONFIG_TMPFS=y
62CONFIG_TMPFS_POSIX_ACL=y 63CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 64e3a2a8cede..d5e1370ec303 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -471,7 +471,7 @@ CONFIG_LCD_PLATFORM=m
471CONFIG_LCD_TOSA=m 471CONFIG_LCD_TOSA=m
472CONFIG_BACKLIGHT_PWM=m 472CONFIG_BACKLIGHT_PWM=m
473CONFIG_BACKLIGHT_TOSA=m 473CONFIG_BACKLIGHT_TOSA=m
474CONFIG_FRAMEBUFFER_CONSOLE=m 474CONFIG_FRAMEBUFFER_CONSOLE=y
475CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y 475CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
476CONFIG_LOGO=y 476CONFIG_LOGO=y
477CONFIG_SOUND=m 477CONFIG_SOUND=m
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 44d4fa57ba0a..070e5074f1ee 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y
113CONFIG_BACKLIGHT_LCD_SUPPORT=y 113CONFIG_BACKLIGHT_LCD_SUPPORT=y
114CONFIG_BACKLIGHT_PWM=m 114CONFIG_BACKLIGHT_PWM=m
115# CONFIG_VGA_CONSOLE is not set 115# CONFIG_VGA_CONSOLE is not set
116CONFIG_FRAMEBUFFER_CONSOLE=m 116CONFIG_FRAMEBUFFER_CONSOLE=y
117CONFIG_LOGO=y 117CONFIG_LOGO=y
118CONFIG_SOUND=m 118CONFIG_SOUND=m
119CONFIG_SND=m 119CONFIG_SND=m
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 8d4c0c926c34..09e7050d5653 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -112,7 +112,7 @@ CONFIG_FB_PXA=m
112CONFIG_FB_PXA_PARAMETERS=y 112CONFIG_FB_PXA_PARAMETERS=y
113CONFIG_BACKLIGHT_LCD_SUPPORT=y 113CONFIG_BACKLIGHT_LCD_SUPPORT=y
114# CONFIG_VGA_CONSOLE is not set 114# CONFIG_VGA_CONSOLE is not set
115CONFIG_FRAMEBUFFER_CONSOLE=m 115CONFIG_FRAMEBUFFER_CONSOLE=y
116CONFIG_LOGO=y 116CONFIG_LOGO=y
117CONFIG_SOUND=m 117CONFIG_SOUND=m
118CONFIG_SND=m 118CONFIG_SND=m
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index ea9646cc2a0e..0a498cb3fad8 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -115,7 +115,11 @@ ENTRY(printascii)
115 mov r1, r0 115 mov r1, r0
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118#ifdef CONFIG_CPU_V7M
119 THUMB( bkpt #0xab )
120#else
118 THUMB( svc #0xab ) 121 THUMB( svc #0xab )
122#endif
119 ret lr 123 ret lr
120ENDPROC(printascii) 124ENDPROC(printascii)
121 125
@@ -124,7 +128,11 @@ ENTRY(printch)
124 strb r0, [r1] 128 strb r0, [r1]
125 mov r0, #0x03 @ SYS_WRITEC 129 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 130 ARM( svc #0x123456 )
131#ifdef CONFIG_CPU_V7M
132 THUMB( bkpt #0xab )
133#else
127 THUMB( svc #0xab ) 134 THUMB( svc #0xab )
135#endif
128 ret lr 136 ret lr
129ENDPROC(printch) 137ENDPROC(printch)
130 138
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 5036f996e694..849014c01cf4 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void)
533 } 533 }
534 534
535 pm_bu->suspended = 0; 535 pm_bu->suspended = 0;
536 pm_bu->canary = virt_to_phys(&canary); 536 pm_bu->canary = __pa_symbol(&canary);
537 pm_bu->resume = virt_to_phys(cpu_resume); 537 pm_bu->resume = __pa_symbol(cpu_resume);
538 538
539 return; 539 return;
540 540
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 5b614388d72f..6d28aa20a7d3 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c)
58 struct platform_device *pdev; 58 struct platform_device *pdev;
59 int res; 59 int res;
60 60
61 if (omap_hsmmc_done != 1) 61 if (omap_hsmmc_done)
62 return; 62 return;
63 63
64 omap_hsmmc_done++; 64 omap_hsmmc_done = 1;
65 65
66 for (; c->mmc; c++) { 66 for (; c->mmc; c++) {
67 pdev = c->pdev; 67 pdev = c->pdev;
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index f040244c57e7..2f4f7002f38d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = {
839 .name = "gpio1", 839 .name = "gpio1",
840 .class = &dra7xx_gpio_hwmod_class, 840 .class = &dra7xx_gpio_hwmod_class,
841 .clkdm_name = "wkupaon_clkdm", 841 .clkdm_name = "wkupaon_clkdm",
842 .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
842 .main_clk = "wkupaon_iclk_mux", 843 .main_clk = "wkupaon_iclk_mux",
843 .prcm = { 844 .prcm = {
844 .omap4 = { 845 .omap4 = {
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 71a34e8c345a..57058ac46f49 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,6 +32,7 @@
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33 33
34#include "db8500-regs.h" 34#include "db8500-regs.h"
35#include "pm_domains.h"
35 36
36static int __init ux500_l2x0_unlock(void) 37static int __init ux500_l2x0_unlock(void)
37{ 38{
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
157 158
158static void __init u8500_init_machine(void) 159static void __init u8500_init_machine(void)
159{ 160{
161 /* Initialize ux500 power domains */
162 ux500_pm_domains_init();
163
160 /* automatically probe child nodes of dbx5x0 devices */ 164 /* automatically probe child nodes of dbx5x0 devices */
161 if (of_machine_is_compatible("st-ericsson,u8540")) 165 if (of_machine_is_compatible("st-ericsson,u8540"))
162 of_platform_populate(NULL, u8500_local_bus_nodes, 166 of_platform_populate(NULL, u8500_local_bus_nodes,
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index a970e7fcba9e..f6c33a0c1c61 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -19,7 +19,6 @@
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20 20
21#include "db8500-regs.h" 21#include "db8500-regs.h"
22#include "pm_domains.h"
23 22
24/* ARM WFI Standby signal register */ 23/* ARM WFI Standby signal register */
25#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) 24#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
203 202
204 /* Set up ux500 suspend callbacks. */ 203 /* Set up ux500 suspend callbacks. */
205 suspend_set_ops(UX500_SUSPEND_OPS); 204 suspend_set_ops(UX500_SUSPEND_OPS);
206
207 /* Initialize ux500 power domains */
208 ux500_pm_domains_init();
209} 205}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b8e728cc944..91537d90f5f5 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
344 * reserved here. 344 * reserved here.
345 */ 345 */
346#endif 346#endif
347 /*
348 * In any case, always ensure address 0 is never used as many things
349 * get very confused if 0 is returned as a legitimate address.
350 */
351 memblock_reserve(0, 1);
347} 352}
348 353
349void __init adjust_lowmem_bounds(void) 354void __init adjust_lowmem_bounds(void)
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e71eefa2e427..0641ba54ab62 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,7 +1,7 @@
1#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/gfp.h> 2#include <linux/gfp.h>
3#include <linux/export.h> 3#include <linux/export.h>
4#include <linux/rwlock.h> 4#include <linux/spinlock.h>
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index caf8b6fbe5e3..d06e34b5d192 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -61,13 +61,6 @@
61 chosen { 61 chosen {
62 stdout-path = "serial0:115200n8"; 62 stdout-path = "serial0:115200n8";
63 }; 63 };
64
65 reg_vcc3v3: vcc3v3 {
66 compatible = "regulator-fixed";
67 regulator-name = "vcc3v3";
68 regulator-min-microvolt = <3300000>;
69 regulator-max-microvolt = <3300000>;
70 };
71}; 64};
72 65
73&ehci0 { 66&ehci0 {
@@ -91,7 +84,7 @@
91&mmc0 { 84&mmc0 {
92 pinctrl-names = "default"; 85 pinctrl-names = "default";
93 pinctrl-0 = <&mmc0_pins>; 86 pinctrl-0 = <&mmc0_pins>;
94 vmmc-supply = <&reg_vcc3v3>; 87 vmmc-supply = <&reg_dcdc1>;
95 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; 88 cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
96 cd-inverted; 89 cd-inverted;
97 disable-wp; 90 disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
index c89010e56488..4157987f4a3d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -168,7 +168,8 @@
168&sd_emmc_a { 168&sd_emmc_a {
169 status = "okay"; 169 status = "okay";
170 pinctrl-0 = <&sdio_pins>; 170 pinctrl-0 = <&sdio_pins>;
171 pinctrl-names = "default"; 171 pinctrl-1 = <&sdio_clk_gate_pins>;
172 pinctrl-names = "default", "clk-gate";
172 #address-cells = <1>; 173 #address-cells = <1>;
173 #size-cells = <0>; 174 #size-cells = <0>;
174 175
@@ -194,7 +195,8 @@
194&sd_emmc_b { 195&sd_emmc_b {
195 status = "okay"; 196 status = "okay";
196 pinctrl-0 = <&sdcard_pins>; 197 pinctrl-0 = <&sdcard_pins>;
197 pinctrl-names = "default"; 198 pinctrl-1 = <&sdcard_clk_gate_pins>;
199 pinctrl-names = "default", "clk-gate";
198 200
199 bus-width = <4>; 201 bus-width = <4>;
200 cap-sd-highspeed; 202 cap-sd-highspeed;
@@ -212,10 +214,10 @@
212&sd_emmc_c { 214&sd_emmc_c {
213 status = "okay"; 215 status = "okay";
214 pinctrl-0 = <&emmc_pins>; 216 pinctrl-0 = <&emmc_pins>;
215 pinctrl-names = "default"; 217 pinctrl-1 = <&emmc_clk_gate_pins>;
218 pinctrl-names = "default", "clk-gate";
216 219
217 bus-width = <8>; 220 bus-width = <8>;
218 cap-sd-highspeed;
219 cap-mmc-highspeed; 221 cap-mmc-highspeed;
220 max-frequency = <200000000>; 222 max-frequency = <200000000>;
221 non-removable; 223 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
index 9697a7a79464..4b17a76959b2 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
@@ -107,6 +107,9 @@
107 107
108 states = <3300000 0>, 108 states = <3300000 0>,
109 <1800000 1>; 109 <1800000 1>;
110
111 regulator-settling-time-up-us = <100>;
112 regulator-settling-time-down-us = <5000>;
110 }; 113 };
111 114
112 wifi_32k: wifi-32k { 115 wifi_32k: wifi-32k {
@@ -250,7 +253,8 @@
250&sd_emmc_a { 253&sd_emmc_a {
251 status = "okay"; 254 status = "okay";
252 pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; 255 pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>;
253 pinctrl-names = "default"; 256 pinctrl-1 = <&sdio_clk_gate_pins>;
257 pinctrl-names = "default", "clk-gate";
254 #address-cells = <1>; 258 #address-cells = <1>;
255 #size-cells = <0>; 259 #size-cells = <0>;
256 260
@@ -276,11 +280,16 @@
276&sd_emmc_b { 280&sd_emmc_b {
277 status = "okay"; 281 status = "okay";
278 pinctrl-0 = <&sdcard_pins>; 282 pinctrl-0 = <&sdcard_pins>;
279 pinctrl-names = "default"; 283 pinctrl-1 = <&sdcard_clk_gate_pins>;
284 pinctrl-names = "default", "clk-gate";
280 285
281 bus-width = <4>; 286 bus-width = <4>;
282 cap-sd-highspeed; 287 cap-sd-highspeed;
283 max-frequency = <100000000>; 288 sd-uhs-sdr12;
289 sd-uhs-sdr25;
290 sd-uhs-sdr50;
291 sd-uhs-sdr104;
292 max-frequency = <200000000>;
284 disable-wp; 293 disable-wp;
285 294
286 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; 295 cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
@@ -294,10 +303,10 @@
294&sd_emmc_c { 303&sd_emmc_c {
295 status = "disabled"; 304 status = "disabled";
296 pinctrl-0 = <&emmc_pins>; 305 pinctrl-0 = <&emmc_pins>;
297 pinctrl-names = "default"; 306 pinctrl-1 = <&emmc_clk_gate_pins>;
307 pinctrl-names = "default", "clk-gate";
298 308
299 bus-width = <8>; 309 bus-width = <8>;
300 cap-sd-highspeed;
301 max-frequency = <200000000>; 310 max-frequency = <200000000>;
302 non-removable; 311 non-removable;
303 disable-wp; 312 disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
index 9c59c3c6d1b6..38dfdde5c147 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -51,7 +51,7 @@
51/ { 51/ {
52 compatible = "nexbox,a95x", "amlogic,meson-gxbb"; 52 compatible = "nexbox,a95x", "amlogic,meson-gxbb";
53 model = "NEXBOX A95X"; 53 model = "NEXBOX A95X";
54 54
55 aliases { 55 aliases {
56 serial0 = &uart_AO; 56 serial0 = &uart_AO;
57 }; 57 };
@@ -232,7 +232,8 @@
232&sd_emmc_a { 232&sd_emmc_a {
233 status = "okay"; 233 status = "okay";
234 pinctrl-0 = <&sdio_pins>; 234 pinctrl-0 = <&sdio_pins>;
235 pinctrl-names = "default"; 235 pinctrl-1 = <&sdio_clk_gate_pins>;
236 pinctrl-names = "default", "clk-gate";
236 #address-cells = <1>; 237 #address-cells = <1>;
237 #size-cells = <0>; 238 #size-cells = <0>;
238 239
@@ -253,7 +254,8 @@
253&sd_emmc_b { 254&sd_emmc_b {
254 status = "okay"; 255 status = "okay";
255 pinctrl-0 = <&sdcard_pins>; 256 pinctrl-0 = <&sdcard_pins>;
256 pinctrl-names = "default"; 257 pinctrl-1 = <&sdcard_clk_gate_pins>;
258 pinctrl-names = "default", "clk-gate";
257 259
258 bus-width = <4>; 260 bus-width = <4>;
259 cap-sd-highspeed; 261 cap-sd-highspeed;
@@ -271,10 +273,10 @@
271&sd_emmc_c { 273&sd_emmc_c {
272 status = "okay"; 274 status = "okay";
273 pinctrl-0 = <&emmc_pins>; 275 pinctrl-0 = <&emmc_pins>;
274 pinctrl-names = "default"; 276 pinctrl-1 = <&emmc_clk_gate_pins>;
277 pinctrl-names = "default", "clk-gate";
275 278
276 bus-width = <8>; 279 bus-width = <8>;
277 cap-sd-highspeed;
278 cap-mmc-highspeed; 280 cap-mmc-highspeed;
279 max-frequency = <200000000>; 281 max-frequency = <200000000>;
280 non-removable; 282 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index d147c853ab05..1ffa1c238a72 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -50,7 +50,7 @@
50/ { 50/ {
51 compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; 51 compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb";
52 model = "Hardkernel ODROID-C2"; 52 model = "Hardkernel ODROID-C2";
53 53
54 aliases { 54 aliases {
55 serial0 = &uart_AO; 55 serial0 = &uart_AO;
56 }; 56 };
@@ -253,7 +253,8 @@
253&sd_emmc_b { 253&sd_emmc_b {
254 status = "okay"; 254 status = "okay";
255 pinctrl-0 = <&sdcard_pins>; 255 pinctrl-0 = <&sdcard_pins>;
256 pinctrl-names = "default"; 256 pinctrl-1 = <&sdcard_clk_gate_pins>;
257 pinctrl-names = "default", "clk-gate";
257 258
258 bus-width = <4>; 259 bus-width = <4>;
259 cap-sd-highspeed; 260 cap-sd-highspeed;
@@ -271,10 +272,10 @@
271&sd_emmc_c { 272&sd_emmc_c {
272 status = "okay"; 273 status = "okay";
273 pinctrl-0 = <&emmc_pins>; 274 pinctrl-0 = <&emmc_pins>;
274 pinctrl-names = "default"; 275 pinctrl-1 = <&emmc_clk_gate_pins>;
276 pinctrl-names = "default", "clk-gate";
275 277
276 bus-width = <8>; 278 bus-width = <8>;
277 cap-sd-highspeed;
278 max-frequency = <200000000>; 279 max-frequency = <200000000>;
279 non-removable; 280 non-removable;
280 disable-wp; 281 disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 81ffc689a5bf..23c08c3afd0a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -194,7 +194,8 @@
194&sd_emmc_a { 194&sd_emmc_a {
195 status = "okay"; 195 status = "okay";
196 pinctrl-0 = <&sdio_pins>; 196 pinctrl-0 = <&sdio_pins>;
197 pinctrl-names = "default"; 197 pinctrl-1 = <&sdio_clk_gate_pins>;
198 pinctrl-names = "default", "clk-gate";
198 #address-cells = <1>; 199 #address-cells = <1>;
199 #size-cells = <0>; 200 #size-cells = <0>;
200 201
@@ -220,10 +221,14 @@
220&sd_emmc_b { 221&sd_emmc_b {
221 status = "okay"; 222 status = "okay";
222 pinctrl-0 = <&sdcard_pins>; 223 pinctrl-0 = <&sdcard_pins>;
223 pinctrl-names = "default"; 224 pinctrl-1 = <&sdcard_clk_gate_pins>;
225 pinctrl-names = "default", "clk-gate";
224 226
225 bus-width = <4>; 227 bus-width = <4>;
226 cap-sd-highspeed; 228 cap-sd-highspeed;
229 sd-uhs-sdr12;
230 sd-uhs-sdr25;
231 sd-uhs-sdr50;
227 max-frequency = <100000000>; 232 max-frequency = <100000000>;
228 disable-wp; 233 disable-wp;
229 234
@@ -238,10 +243,10 @@
238&sd_emmc_c { 243&sd_emmc_c {
239 status = "okay"; 244 status = "okay";
240 pinctrl-0 = <&emmc_pins>; 245 pinctrl-0 = <&emmc_pins>;
241 pinctrl-names = "default"; 246 pinctrl-1 = <&emmc_clk_gate_pins>;
247 pinctrl-names = "default", "clk-gate";
242 248
243 bus-width = <8>; 249 bus-width = <8>;
244 cap-sd-highspeed;
245 cap-mmc-highspeed; 250 cap-mmc-highspeed;
246 max-frequency = <200000000>; 251 max-frequency = <200000000>;
247 non-removable; 252 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 346753fb6324..f2bc6dea1fc6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -155,7 +155,8 @@
155&sd_emmc_a { 155&sd_emmc_a {
156 status = "okay"; 156 status = "okay";
157 pinctrl-0 = <&sdio_pins &sdio_irq_pins>; 157 pinctrl-0 = <&sdio_pins &sdio_irq_pins>;
158 pinctrl-names = "default"; 158 pinctrl-1 = <&sdio_clk_gate_pins>;
159 pinctrl-names = "default", "clk-gate";
159 #address-cells = <1>; 160 #address-cells = <1>;
160 #size-cells = <0>; 161 #size-cells = <0>;
161 162
@@ -181,7 +182,8 @@
181&sd_emmc_b { 182&sd_emmc_b {
182 status = "okay"; 183 status = "okay";
183 pinctrl-0 = <&sdcard_pins>; 184 pinctrl-0 = <&sdcard_pins>;
184 pinctrl-names = "default"; 185 pinctrl-1 = <&sdcard_clk_gate_pins>;
186 pinctrl-names = "default", "clk-gate";
185 187
186 bus-width = <4>; 188 bus-width = <4>;
187 cap-sd-highspeed; 189 cap-sd-highspeed;
@@ -198,10 +200,10 @@
198&sd_emmc_c { 200&sd_emmc_c {
199 status = "okay"; 201 status = "okay";
200 pinctrl-0 = <&emmc_pins>; 202 pinctrl-0 = <&emmc_pins>;
201 pinctrl-names = "default"; 203 pinctrl-1 = <&emmc_clk_gate_pins>;
204 pinctrl-names = "default", "clk-gate";
202 205
203 bus-width = <8>; 206 bus-width = <8>;
204 cap-sd-highspeed;
205 cap-mmc-highspeed; 207 cap-mmc-highspeed;
206 max-frequency = <200000000>; 208 max-frequency = <200000000>;
207 non-removable; 209 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 52f1687e7a09..af834cdbba79 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -392,6 +392,17 @@
392 }; 392 };
393 }; 393 };
394 394
395 emmc_clk_gate_pins: emmc_clk_gate {
396 mux {
397 groups = "BOOT_8";
398 function = "gpio_periphs";
399 };
400 cfg-pull-down {
401 pins = "BOOT_8";
402 bias-pull-down;
403 };
404 };
405
395 nor_pins: nor { 406 nor_pins: nor {
396 mux { 407 mux {
397 groups = "nor_d", 408 groups = "nor_d",
@@ -430,6 +441,17 @@
430 }; 441 };
431 }; 442 };
432 443
444 sdcard_clk_gate_pins: sdcard_clk_gate {
445 mux {
446 groups = "CARD_2";
447 function = "gpio_periphs";
448 };
449 cfg-pull-down {
450 pins = "CARD_2";
451 bias-pull-down;
452 };
453 };
454
433 sdio_pins: sdio { 455 sdio_pins: sdio {
434 mux { 456 mux {
435 groups = "sdio_d0", 457 groups = "sdio_d0",
@@ -442,6 +464,17 @@
442 }; 464 };
443 }; 465 };
444 466
467 sdio_clk_gate_pins: sdio_clk_gate {
468 mux {
469 groups = "GPIOX_4";
470 function = "gpio_periphs";
471 };
472 cfg-pull-down {
473 pins = "GPIOX_4";
474 bias-pull-down;
475 };
476 };
477
445 sdio_irq_pins: sdio_irq { 478 sdio_irq_pins: sdio_irq {
446 mux { 479 mux {
447 groups = "sdio_irq"; 480 groups = "sdio_irq";
@@ -661,21 +694,21 @@
661 694
662&sd_emmc_a { 695&sd_emmc_a {
663 clocks = <&clkc CLKID_SD_EMMC_A>, 696 clocks = <&clkc CLKID_SD_EMMC_A>,
664 <&xtal>, 697 <&clkc CLKID_SD_EMMC_A_CLK0>,
665 <&clkc CLKID_FCLK_DIV2>; 698 <&clkc CLKID_FCLK_DIV2>;
666 clock-names = "core", "clkin0", "clkin1"; 699 clock-names = "core", "clkin0", "clkin1";
667}; 700};
668 701
669&sd_emmc_b { 702&sd_emmc_b {
670 clocks = <&clkc CLKID_SD_EMMC_B>, 703 clocks = <&clkc CLKID_SD_EMMC_B>,
671 <&xtal>, 704 <&clkc CLKID_SD_EMMC_B_CLK0>,
672 <&clkc CLKID_FCLK_DIV2>; 705 <&clkc CLKID_FCLK_DIV2>;
673 clock-names = "core", "clkin0", "clkin1"; 706 clock-names = "core", "clkin0", "clkin1";
674}; 707};
675 708
676&sd_emmc_c { 709&sd_emmc_c {
677 clocks = <&clkc CLKID_SD_EMMC_C>, 710 clocks = <&clkc CLKID_SD_EMMC_C>,
678 <&xtal>, 711 <&clkc CLKID_SD_EMMC_C_CLK0>,
679 <&clkc CLKID_FCLK_DIV2>; 712 <&clkc CLKID_FCLK_DIV2>;
680 clock-names = "core", "clkin0", "clkin1"; 713 clock-names = "core", "clkin0", "clkin1";
681}; 714};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
index 2a5804ce7f4b..977b4240f3c1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts
@@ -123,7 +123,8 @@
123&sd_emmc_b { 123&sd_emmc_b {
124 status = "okay"; 124 status = "okay";
125 pinctrl-0 = <&sdcard_pins>; 125 pinctrl-0 = <&sdcard_pins>;
126 pinctrl-names = "default"; 126 pinctrl-1 = <&sdcard_clk_gate_pins>;
127 pinctrl-names = "default", "clk-gate";
127 128
128 bus-width = <4>; 129 bus-width = <4>;
129 cap-sd-highspeed; 130 cap-sd-highspeed;
@@ -141,10 +142,10 @@
141&sd_emmc_c { 142&sd_emmc_c {
142 status = "okay"; 143 status = "okay";
143 pinctrl-0 = <&emmc_pins>; 144 pinctrl-0 = <&emmc_pins>;
144 pinctrl-names = "default"; 145 pinctrl-1 = <&emmc_clk_gate_pins>;
146 pinctrl-names = "default", "clk-gate";
145 147
146 bus-width = <8>; 148 bus-width = <8>;
147 cap-sd-highspeed;
148 cap-mmc-highspeed; 149 cap-mmc-highspeed;
149 max-frequency = <100000000>; 150 max-frequency = <100000000>;
150 non-removable; 151 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 69ca14ac10fa..64c54c92e214 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -91,6 +91,9 @@
91 91
92 states = <3300000 0>, 92 states = <3300000 0>,
93 <1800000 1>; 93 <1800000 1>;
94
95 regulator-settling-time-up-us = <200>;
96 regulator-settling-time-down-us = <50000>;
94 }; 97 };
95 98
96 vddio_boot: regulator-vddio_boot { 99 vddio_boot: regulator-vddio_boot {
@@ -197,10 +200,14 @@
197&sd_emmc_b { 200&sd_emmc_b {
198 status = "okay"; 201 status = "okay";
199 pinctrl-0 = <&sdcard_pins>; 202 pinctrl-0 = <&sdcard_pins>;
200 pinctrl-names = "default"; 203 pinctrl-1 = <&sdcard_clk_gate_pins>;
204 pinctrl-names = "default", "clk-gate";
201 205
202 bus-width = <4>; 206 bus-width = <4>;
203 cap-sd-highspeed; 207 cap-sd-highspeed;
208 sd-uhs-sdr12;
209 sd-uhs-sdr25;
210 sd-uhs-sdr50;
204 max-frequency = <100000000>; 211 max-frequency = <100000000>;
205 disable-wp; 212 disable-wp;
206 213
@@ -215,10 +222,12 @@
215&sd_emmc_c { 222&sd_emmc_c {
216 status = "okay"; 223 status = "okay";
217 pinctrl-0 = <&emmc_pins>; 224 pinctrl-0 = <&emmc_pins>;
218 pinctrl-names = "default"; 225 pinctrl-1 = <&emmc_clk_gate_pins>;
226 pinctrl-names = "default", "clk-gate";
219 227
220 bus-width = <8>; 228 bus-width = <8>;
221 cap-mmc-highspeed; 229 cap-mmc-highspeed;
230 mmc-ddr-3_3v;
222 max-frequency = <50000000>; 231 max-frequency = <50000000>;
223 non-removable; 232 non-removable;
224 disable-wp; 233 disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
index 4c2ac7650fcd..1b8f32867aa1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
@@ -189,7 +189,8 @@
189&sd_emmc_a { 189&sd_emmc_a {
190 status = "okay"; 190 status = "okay";
191 pinctrl-0 = <&sdio_pins>; 191 pinctrl-0 = <&sdio_pins>;
192 pinctrl-names = "default"; 192 pinctrl-1 = <&sdio_clk_gate_pins>;
193 pinctrl-names = "default", "clk-gate";
193 #address-cells = <1>; 194 #address-cells = <1>;
194 #size-cells = <0>; 195 #size-cells = <0>;
195 196
@@ -210,7 +211,8 @@
210&sd_emmc_b { 211&sd_emmc_b {
211 status = "okay"; 212 status = "okay";
212 pinctrl-0 = <&sdcard_pins>; 213 pinctrl-0 = <&sdcard_pins>;
213 pinctrl-names = "default"; 214 pinctrl-1 = <&sdcard_clk_gate_pins>;
215 pinctrl-names = "default", "clk-gate";
214 216
215 bus-width = <4>; 217 bus-width = <4>;
216 cap-sd-highspeed; 218 cap-sd-highspeed;
@@ -228,10 +230,10 @@
228&sd_emmc_c { 230&sd_emmc_c {
229 status = "okay"; 231 status = "okay";
230 pinctrl-0 = <&emmc_pins>; 232 pinctrl-0 = <&emmc_pins>;
231 pinctrl-names = "default"; 233 pinctrl-1 = <&emmc_clk_gate_pins>;
234 pinctrl-names = "default", "clk-gate";
232 235
233 bus-width = <8>; 236 bus-width = <8>;
234 cap-sd-highspeed;
235 cap-mmc-highspeed; 237 cap-mmc-highspeed;
236 max-frequency = <200000000>; 238 max-frequency = <200000000>;
237 non-removable; 239 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index f3eea8e89d12..129af9068814 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -95,7 +95,8 @@
95&sd_emmc_a { 95&sd_emmc_a {
96 status = "okay"; 96 status = "okay";
97 pinctrl-0 = <&sdio_pins>; 97 pinctrl-0 = <&sdio_pins>;
98 pinctrl-names = "default"; 98 pinctrl-1 = <&sdio_clk_gate_pins>;
99 pinctrl-names = "default", "clk-gate";
99 #address-cells = <1>; 100 #address-cells = <1>;
100 #size-cells = <0>; 101 #size-cells = <0>;
101 102
@@ -116,7 +117,8 @@
116&sd_emmc_b { 117&sd_emmc_b {
117 status = "okay"; 118 status = "okay";
118 pinctrl-0 = <&sdcard_pins>; 119 pinctrl-0 = <&sdcard_pins>;
119 pinctrl-names = "default"; 120 pinctrl-1 = <&sdcard_clk_gate_pins>;
121 pinctrl-names = "default", "clk-gate";
120 122
121 bus-width = <4>; 123 bus-width = <4>;
122 cap-sd-highspeed; 124 cap-sd-highspeed;
@@ -134,10 +136,10 @@
134&sd_emmc_c { 136&sd_emmc_c {
135 status = "okay"; 137 status = "okay";
136 pinctrl-0 = <&emmc_pins>; 138 pinctrl-0 = <&emmc_pins>;
137 pinctrl-names = "default"; 139 pinctrl-1 = <&emmc_clk_gate_pins>;
140 pinctrl-names = "default", "clk-gate";
138 141
139 bus-width = <8>; 142 bus-width = <8>;
140 cap-sd-highspeed;
141 cap-mmc-highspeed; 143 cap-mmc-highspeed;
142 max-frequency = <200000000>; 144 max-frequency = <200000000>;
143 non-removable; 145 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index d6876e64979e..d8dd3298b15c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -281,6 +281,17 @@
281 }; 281 };
282 }; 282 };
283 283
284 emmc_clk_gate_pins: emmc_clk_gate {
285 mux {
286 groups = "BOOT_8";
287 function = "gpio_periphs";
288 };
289 cfg-pull-down {
290 pins = "BOOT_8";
291 bias-pull-down;
292 };
293 };
294
284 nor_pins: nor { 295 nor_pins: nor {
285 mux { 296 mux {
286 groups = "nor_d", 297 groups = "nor_d",
@@ -319,6 +330,17 @@
319 }; 330 };
320 }; 331 };
321 332
333 sdcard_clk_gate_pins: sdcard_clk_gate {
334 mux {
335 groups = "CARD_2";
336 function = "gpio_periphs";
337 };
338 cfg-pull-down {
339 pins = "CARD_2";
340 bias-pull-down;
341 };
342 };
343
322 sdio_pins: sdio { 344 sdio_pins: sdio {
323 mux { 345 mux {
324 groups = "sdio_d0", 346 groups = "sdio_d0",
@@ -331,6 +353,17 @@
331 }; 353 };
332 }; 354 };
333 355
356 sdio_clk_gate_pins: sdio_clk_gate {
357 mux {
358 groups = "GPIOX_4";
359 function = "gpio_periphs";
360 };
361 cfg-pull-down {
362 pins = "GPIOX_4";
363 bias-pull-down;
364 };
365 };
366
334 sdio_irq_pins: sdio_irq { 367 sdio_irq_pins: sdio_irq {
335 mux { 368 mux {
336 groups = "sdio_irq"; 369 groups = "sdio_irq";
@@ -603,21 +636,21 @@
603 636
604&sd_emmc_a { 637&sd_emmc_a {
605 clocks = <&clkc CLKID_SD_EMMC_A>, 638 clocks = <&clkc CLKID_SD_EMMC_A>,
606 <&xtal>, 639 <&clkc CLKID_SD_EMMC_A_CLK0>,
607 <&clkc CLKID_FCLK_DIV2>; 640 <&clkc CLKID_FCLK_DIV2>;
608 clock-names = "core", "clkin0", "clkin1"; 641 clock-names = "core", "clkin0", "clkin1";
609}; 642};
610 643
611&sd_emmc_b { 644&sd_emmc_b {
612 clocks = <&clkc CLKID_SD_EMMC_B>, 645 clocks = <&clkc CLKID_SD_EMMC_B>,
613 <&xtal>, 646 <&clkc CLKID_SD_EMMC_B_CLK0>,
614 <&clkc CLKID_FCLK_DIV2>; 647 <&clkc CLKID_FCLK_DIV2>;
615 clock-names = "core", "clkin0", "clkin1"; 648 clock-names = "core", "clkin0", "clkin1";
616}; 649};
617 650
618&sd_emmc_c { 651&sd_emmc_c {
619 clocks = <&clkc CLKID_SD_EMMC_C>, 652 clocks = <&clkc CLKID_SD_EMMC_C>,
620 <&xtal>, 653 <&clkc CLKID_SD_EMMC_C_CLK0>,
621 <&clkc CLKID_FCLK_DIV2>; 654 <&clkc CLKID_FCLK_DIV2>;
622 clock-names = "core", "clkin0", "clkin1"; 655 clock-names = "core", "clkin0", "clkin1";
623}; 656};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 9b10c5f4f8c0..22c697732f66 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -175,7 +175,8 @@
175&sd_emmc_b { 175&sd_emmc_b {
176 status = "okay"; 176 status = "okay";
177 pinctrl-0 = <&sdcard_pins>; 177 pinctrl-0 = <&sdcard_pins>;
178 pinctrl-names = "default"; 178 pinctrl-1 = <&sdcard_clk_gate_pins>;
179 pinctrl-names = "default", "clk-gate";
179 180
180 bus-width = <4>; 181 bus-width = <4>;
181 cap-sd-highspeed; 182 cap-sd-highspeed;
@@ -193,10 +194,10 @@
193&sd_emmc_c { 194&sd_emmc_c {
194 status = "okay"; 195 status = "okay";
195 pinctrl-0 = <&emmc_pins>; 196 pinctrl-0 = <&emmc_pins>;
196 pinctrl-names = "default"; 197 pinctrl-1 = <&emmc_clk_gate_pins>;
198 pinctrl-names = "default", "clk-gate";
197 199
198 bus-width = <8>; 200 bus-width = <8>;
199 cap-sd-highspeed;
200 cap-mmc-highspeed; 201 cap-mmc-highspeed;
201 max-frequency = <200000000>; 202 max-frequency = <200000000>;
202 non-removable; 203 non-removable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
index 08f1dd69b679..470f72bb863c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
@@ -220,7 +220,6 @@
220 pinctrl-names = "default"; 220 pinctrl-names = "default";
221 221
222 bus-width = <8>; 222 bus-width = <8>;
223 cap-sd-highspeed;
224 cap-mmc-highspeed; 223 cap-mmc-highspeed;
225 max-frequency = <200000000>; 224 max-frequency = <200000000>;
226 non-removable; 225 non-removable;
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 4d360713ed12..30d48ecf46e0 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -254,7 +254,7 @@
254 254
255 ap_syscon: system-controller@6f4000 { 255 ap_syscon: system-controller@6f4000 {
256 compatible = "syscon", "simple-mfd"; 256 compatible = "syscon", "simple-mfd";
257 reg = <0x6f4000 0x1000>; 257 reg = <0x6f4000 0x2000>;
258 258
259 ap_clk: clock { 259 ap_clk: clock {
260 compatible = "marvell,ap806-clock"; 260 compatible = "marvell,ap806-clock";
@@ -265,7 +265,7 @@
265 compatible = "marvell,ap806-pinctrl"; 265 compatible = "marvell,ap806-pinctrl";
266 }; 266 };
267 267
268 ap_gpio: gpio { 268 ap_gpio: gpio@1040 {
269 compatible = "marvell,armada-8k-gpio"; 269 compatible = "marvell,armada-8k-gpio";
270 offset = <0x1040>; 270 offset = <0x1040>;
271 ngpios = <20>; 271 ngpios = <20>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 8263a8a504a8..f2aa2a81de4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -336,7 +336,7 @@
336 /* non-prefetchable memory */ 336 /* non-prefetchable memory */
337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; 337 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
338 interrupt-map-mask = <0 0 0 0>; 338 interrupt-map-mask = <0 0 0 0>;
339 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 340 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
341 num-lanes = <1>; 341 num-lanes = <1>;
342 clocks = <&cpm_clk 1 13>; 342 clocks = <&cpm_clk 1 13>;
@@ -362,7 +362,7 @@
362 /* non-prefetchable memory */ 362 /* non-prefetchable memory */
363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>; 363 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
364 interrupt-map-mask = <0 0 0 0>; 364 interrupt-map-mask = <0 0 0 0>;
365 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 366 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
367 367
368 num-lanes = <1>; 368 num-lanes = <1>;
@@ -389,7 +389,7 @@
389 /* non-prefetchable memory */ 389 /* non-prefetchable memory */
390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>; 390 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
391 interrupt-map-mask = <0 0 0 0>; 391 interrupt-map-mask = <0 0 0 0>;
392 interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 393 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
394 394
395 num-lanes = <1>; 395 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index b71ee6c83668..4fe70323abb3 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -335,7 +335,7 @@
335 /* non-prefetchable memory */ 335 /* non-prefetchable memory */
336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>; 336 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
337 interrupt-map-mask = <0 0 0 0>; 337 interrupt-map-mask = <0 0 0 0>;
338 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 338 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; 339 interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
340 num-lanes = <1>; 340 num-lanes = <1>;
341 clocks = <&cps_clk 1 13>; 341 clocks = <&cps_clk 1 13>;
@@ -361,7 +361,7 @@
361 /* non-prefetchable memory */ 361 /* non-prefetchable memory */
362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>; 362 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
363 interrupt-map-mask = <0 0 0 0>; 363 interrupt-map-mask = <0 0 0 0>;
364 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 364 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; 365 interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
366 366
367 num-lanes = <1>; 367 num-lanes = <1>;
@@ -388,7 +388,7 @@
388 /* non-prefetchable memory */ 388 /* non-prefetchable memory */
389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>; 389 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
390 interrupt-map-mask = <0 0 0 0>; 390 interrupt-map-mask = <0 0 0 0>;
391 interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 391 interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; 392 interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
393 393
394 num-lanes = <1>; 394 num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 4786c67b5e65..d9d885006a8e 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -62,6 +62,7 @@
62 brightness-levels = <256 128 64 16 8 4 0>; 62 brightness-levels = <256 128 64 16 8 4 0>;
63 default-brightness-level = <6>; 63 default-brightness-level = <6>;
64 64
65 power-supply = <&reg_12v>;
65 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; 66 enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
66 }; 67 };
67 68
@@ -83,6 +84,15 @@
83 regulator-always-on; 84 regulator-always-on;
84 }; 85 };
85 86
87 reg_12v: regulator2 {
88 compatible = "regulator-fixed";
89 regulator-name = "fixed-12V";
90 regulator-min-microvolt = <12000000>;
91 regulator-max-microvolt = <12000000>;
92 regulator-boot-on;
93 regulator-always-on;
94 };
95
86 rsnd_ak4613: sound { 96 rsnd_ak4613: sound {
87 compatible = "simple-audio-card"; 97 compatible = "simple-audio-card";
88 98
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d615cb6e64d..41d61840fb99 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -582,7 +582,7 @@
582 vop_mmu: iommu@ff373f00 { 582 vop_mmu: iommu@ff373f00 {
583 compatible = "rockchip,iommu"; 583 compatible = "rockchip,iommu";
584 reg = <0x0 0xff373f00 0x0 0x100>; 584 reg = <0x0 0xff373f00 0x0 0x100>;
585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>; 585 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
586 interrupt-names = "vop_mmu"; 586 interrupt-names = "vop_mmu";
587 #iommu-cells = <0>; 587 #iommu-cells = <0>;
588 status = "disabled"; 588 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index e0518b4bc6c2..1070c8264c13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -113,8 +113,7 @@
113 compatible = "arm,cortex-a53", "arm,armv8"; 113 compatible = "arm,cortex-a53", "arm,armv8";
114 reg = <0x0 0x0>; 114 reg = <0x0 0x0>;
115 enable-method = "psci"; 115 enable-method = "psci";
116 clocks = <&cru ARMCLKL>; 116
117 operating-points-v2 = <&cluster0_opp>;
118 #cooling-cells = <2>; /* min followed by max */ 117 #cooling-cells = <2>; /* min followed by max */
119 }; 118 };
120 119
@@ -123,8 +122,6 @@
123 compatible = "arm,cortex-a53", "arm,armv8"; 122 compatible = "arm,cortex-a53", "arm,armv8";
124 reg = <0x0 0x1>; 123 reg = <0x0 0x1>;
125 enable-method = "psci"; 124 enable-method = "psci";
126 clocks = <&cru ARMCLKL>;
127 operating-points-v2 = <&cluster0_opp>;
128 }; 125 };
129 126
130 cpu_l2: cpu@2 { 127 cpu_l2: cpu@2 {
@@ -132,8 +129,6 @@
132 compatible = "arm,cortex-a53", "arm,armv8"; 129 compatible = "arm,cortex-a53", "arm,armv8";
133 reg = <0x0 0x2>; 130 reg = <0x0 0x2>;
134 enable-method = "psci"; 131 enable-method = "psci";
135 clocks = <&cru ARMCLKL>;
136 operating-points-v2 = <&cluster0_opp>;
137 }; 132 };
138 133
139 cpu_l3: cpu@3 { 134 cpu_l3: cpu@3 {
@@ -141,8 +136,6 @@
141 compatible = "arm,cortex-a53", "arm,armv8"; 136 compatible = "arm,cortex-a53", "arm,armv8";
142 reg = <0x0 0x3>; 137 reg = <0x0 0x3>;
143 enable-method = "psci"; 138 enable-method = "psci";
144 clocks = <&cru ARMCLKL>;
145 operating-points-v2 = <&cluster0_opp>;
146 }; 139 };
147 140
148 cpu_b0: cpu@100 { 141 cpu_b0: cpu@100 {
@@ -150,8 +143,7 @@
150 compatible = "arm,cortex-a53", "arm,armv8"; 143 compatible = "arm,cortex-a53", "arm,armv8";
151 reg = <0x0 0x100>; 144 reg = <0x0 0x100>;
152 enable-method = "psci"; 145 enable-method = "psci";
153 clocks = <&cru ARMCLKB>; 146
154 operating-points-v2 = <&cluster1_opp>;
155 #cooling-cells = <2>; /* min followed by max */ 147 #cooling-cells = <2>; /* min followed by max */
156 }; 148 };
157 149
@@ -160,8 +152,6 @@
160 compatible = "arm,cortex-a53", "arm,armv8"; 152 compatible = "arm,cortex-a53", "arm,armv8";
161 reg = <0x0 0x101>; 153 reg = <0x0 0x101>;
162 enable-method = "psci"; 154 enable-method = "psci";
163 clocks = <&cru ARMCLKB>;
164 operating-points-v2 = <&cluster1_opp>;
165 }; 155 };
166 156
167 cpu_b2: cpu@102 { 157 cpu_b2: cpu@102 {
@@ -169,8 +159,6 @@
169 compatible = "arm,cortex-a53", "arm,armv8"; 159 compatible = "arm,cortex-a53", "arm,armv8";
170 reg = <0x0 0x102>; 160 reg = <0x0 0x102>;
171 enable-method = "psci"; 161 enable-method = "psci";
172 clocks = <&cru ARMCLKB>;
173 operating-points-v2 = <&cluster1_opp>;
174 }; 162 };
175 163
176 cpu_b3: cpu@103 { 164 cpu_b3: cpu@103 {
@@ -178,62 +166,6 @@
178 compatible = "arm,cortex-a53", "arm,armv8"; 166 compatible = "arm,cortex-a53", "arm,armv8";
179 reg = <0x0 0x103>; 167 reg = <0x0 0x103>;
180 enable-method = "psci"; 168 enable-method = "psci";
181 clocks = <&cru ARMCLKB>;
182 operating-points-v2 = <&cluster1_opp>;
183 };
184 };
185
186 cluster0_opp: opp-table0 {
187 compatible = "operating-points-v2";
188 opp-shared;
189
190 opp00 {
191 opp-hz = /bits/ 64 <312000000>;
192 opp-microvolt = <950000>;
193 clock-latency-ns = <40000>;
194 };
195 opp01 {
196 opp-hz = /bits/ 64 <408000000>;
197 opp-microvolt = <950000>;
198 };
199 opp02 {
200 opp-hz = /bits/ 64 <600000000>;
201 opp-microvolt = <950000>;
202 };
203 opp03 {
204 opp-hz = /bits/ 64 <816000000>;
205 opp-microvolt = <1025000>;
206 };
207 opp04 {
208 opp-hz = /bits/ 64 <1008000000>;
209 opp-microvolt = <1125000>;
210 };
211 };
212
213 cluster1_opp: opp-table1 {
214 compatible = "operating-points-v2";
215 opp-shared;
216
217 opp00 {
218 opp-hz = /bits/ 64 <312000000>;
219 opp-microvolt = <950000>;
220 clock-latency-ns = <40000>;
221 };
222 opp01 {
223 opp-hz = /bits/ 64 <408000000>;
224 opp-microvolt = <950000>;
225 };
226 opp02 {
227 opp-hz = /bits/ 64 <600000000>;
228 opp-microvolt = <950000>;
229 };
230 opp03 {
231 opp-hz = /bits/ 64 <816000000>;
232 opp-microvolt = <975000>;
233 };
234 opp04 {
235 opp-hz = /bits/ 64 <1008000000>;
236 opp-microvolt = <1050000>;
237 }; 169 };
238 }; 170 };
239 171
@@ -808,7 +740,7 @@
808 iep_mmu: iommu@ff900800 { 740 iep_mmu: iommu@ff900800 {
809 compatible = "rockchip,iommu"; 741 compatible = "rockchip,iommu";
810 reg = <0x0 0xff900800 0x0 0x100>; 742 reg = <0x0 0xff900800 0x0 0x100>;
811 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; 743 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
812 interrupt-names = "iep_mmu"; 744 interrupt-names = "iep_mmu";
813 #iommu-cells = <0>; 745 #iommu-cells = <0>;
814 status = "disabled"; 746 status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 7fd4bfcaa38e..fef82274a39d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -371,10 +371,10 @@
371 regulator-always-on; 371 regulator-always-on;
372 regulator-boot-on; 372 regulator-boot-on;
373 regulator-min-microvolt = <1800000>; 373 regulator-min-microvolt = <1800000>;
374 regulator-max-microvolt = <3300000>; 374 regulator-max-microvolt = <3000000>;
375 regulator-state-mem { 375 regulator-state-mem {
376 regulator-on-in-suspend; 376 regulator-on-in-suspend;
377 regulator-suspend-microvolt = <3300000>; 377 regulator-suspend-microvolt = <3000000>;
378 }; 378 };
379 }; 379 };
380 380
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 53ff3d191a1d..910628d18add 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -325,12 +325,12 @@
325 vcc_sd: LDO_REG4 { 325 vcc_sd: LDO_REG4 {
326 regulator-name = "vcc_sd"; 326 regulator-name = "vcc_sd";
327 regulator-min-microvolt = <1800000>; 327 regulator-min-microvolt = <1800000>;
328 regulator-max-microvolt = <3300000>; 328 regulator-max-microvolt = <3000000>;
329 regulator-always-on; 329 regulator-always-on;
330 regulator-boot-on; 330 regulator-boot-on;
331 regulator-state-mem { 331 regulator-state-mem {
332 regulator-on-in-suspend; 332 regulator-on-in-suspend;
333 regulator-suspend-microvolt = <3300000>; 333 regulator-suspend-microvolt = <3000000>;
334 }; 334 };
335 }; 335 };
336 336
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 6c30bb02210d..0f873c897d0d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -315,10 +315,10 @@
315 regulator-always-on; 315 regulator-always-on;
316 regulator-boot-on; 316 regulator-boot-on;
317 regulator-min-microvolt = <1800000>; 317 regulator-min-microvolt = <1800000>;
318 regulator-max-microvolt = <3300000>; 318 regulator-max-microvolt = <3000000>;
319 regulator-state-mem { 319 regulator-state-mem {
320 regulator-on-in-suspend; 320 regulator-on-in-suspend;
321 regulator-suspend-microvolt = <3300000>; 321 regulator-suspend-microvolt = <3000000>;
322 }; 322 };
323 }; 323 };
324 324
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d79e9b3265b9..ab7629c5b856 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1629,9 +1629,9 @@
1629 compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; 1629 compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi";
1630 reg = <0x0 0xff960000 0x0 0x8000>; 1630 reg = <0x0 0xff960000 0x0 0x8000>;
1631 interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>; 1631 interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>;
1632 clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>, 1632 clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>,
1633 <&cru SCLK_DPHY_TX0_CFG>; 1633 <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>;
1634 clock-names = "ref", "pclk", "phy_cfg"; 1634 clock-names = "ref", "pclk", "phy_cfg", "grf";
1635 power-domains = <&power RK3399_PD_VIO>; 1635 power-domains = <&power RK3399_PD_VIO>;
1636 rockchip,grf = <&grf>; 1636 rockchip,grf = <&grf>;
1637 status = "disabled"; 1637 status = "disabled";
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3585a5e26151..f7c4d2146aed 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -95,16 +95,19 @@
95#define KERNEL_END _end 95#define KERNEL_END _end
96 96
97/* 97/*
98 * The size of the KASAN shadow region. This should be 1/8th of the 98 * KASAN requires 1/8th of the kernel virtual address space for the shadow
99 * size of the entire kernel virtual address space. 99 * region. KASAN can bloat the stack significantly, so double the (minimum)
100 * stack size when KASAN is in use.
100 */ 101 */
101#ifdef CONFIG_KASAN 102#ifdef CONFIG_KASAN
102#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) 103#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
104#define KASAN_THREAD_SHIFT 1
103#else 105#else
104#define KASAN_SHADOW_SIZE (0) 106#define KASAN_SHADOW_SIZE (0)
107#define KASAN_THREAD_SHIFT 0
105#endif 108#endif
106 109
107#define MIN_THREAD_SHIFT 14 110#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
108 111
109/* 112/*
110 * VMAP'd stacks are allocated at page granularity, so we must ensure that such 113 * VMAP'd stacks are allocated at page granularity, so we must ensure that such
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bc4e92337d16..b46e54c2399b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -401,7 +401,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
401/* Find an entry in the third-level page table. */ 401/* Find an entry in the third-level page table. */
402#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 402#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
403 403
404#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t)) 404#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
405#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) 405#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
406 406
407#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 407#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index f0e6d717885b..d06fbe4cd38d 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
649 return 0; 649 return 0;
650} 650}
651 651
652late_initcall(armv8_deprecated_init); 652core_initcall(armv8_deprecated_init);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index cd52d365d1f0..21e2c95d24e7 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void)
1307 return 0; 1307 return 0;
1308} 1308}
1309 1309
1310late_initcall(enable_mrs_emulation); 1310core_initcall(enable_mrs_emulation);
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f444f374bd7b..5d547deb6996 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -444,4 +444,4 @@ static int __init fpsimd_init(void)
444 444
445 return 0; 445 return 0;
446} 446}
447late_initcall(fpsimd_init); 447core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7434ec0c7a27..0b243ecaf7ac 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -384,6 +384,7 @@ ENTRY(kimage_vaddr)
384 * booted in EL1 or EL2 respectively. 384 * booted in EL1 or EL2 respectively.
385 */ 385 */
386ENTRY(el2_setup) 386ENTRY(el2_setup)
387 msr SPsel, #1 // We want to use SP_EL{1,2}
387 mrs x0, CurrentEL 388 mrs x0, CurrentEL
388 cmp x0, #CurrentEL_EL2 389 cmp x0, #CurrentEL_EL2
389 b.eq 1f 390 b.eq 1f
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 89993c4be1be..b64958b23a7f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
97 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, 97 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
98 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); 98 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
99 } else { 99 } else {
100 pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK); 100 pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
101 } 101 }
102 102
103 pr_alert(" CM = %lu, WnR = %lu\n", 103 pr_alert(" CM = %lu, WnR = %lu\n",
@@ -651,7 +651,7 @@ static const struct fault_info fault_info[] = {
651 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, 651 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
652 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 652 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
653 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 653 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
654 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 654 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
655 { do_bad, SIGBUS, 0, "unknown 8" }, 655 { do_bad, SIGBUS, 0, "unknown 8" },
656 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 656 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
657 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 657 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 87cde1e4b38c..0777f3a8a1f3 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -194,6 +194,10 @@ config TIMER_DIVIDE
194 int "Timer divider (integer)" 194 int "Timer divider (integer)"
195 default "128" 195 default "128"
196 196
197config CPU_BIG_ENDIAN
198 bool "Generate big endian code"
199 default n
200
197config CPU_LITTLE_ENDIAN 201config CPU_LITTLE_ENDIAN
198 bool "Generate little endian code" 202 bool "Generate little endian code"
199 default n 203 default n
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 647dd94a0c39..72b96f282689 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -114,6 +114,15 @@ static void set_eit_vector_entries(void)
114 _flush_cache_copyback_all(); 114 _flush_cache_copyback_all();
115} 115}
116 116
117void abort(void)
118{
119 BUG();
120
121 /* if that doesn't kill us, halt */
122 panic("Oops failed to kill thread");
123}
124EXPORT_SYMBOL(abort);
125
117void __init trap_init(void) 126void __init trap_init(void)
118{ 127{
119 set_eit_vector_entries(); 128 set_eit_vector_entries();
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 9d26abdf0dc1..4f798aa671dd 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -39,7 +39,7 @@ config MICROBLAZE
39# Endianness selection 39# Endianness selection
40choice 40choice
41 prompt "Endianness selection" 41 prompt "Endianness selection"
42 default CPU_BIG_ENDIAN 42 default CPU_LITTLE_ENDIAN
43 help 43 help
44 microblaze architectures can be configured for either little or 44 microblaze architectures can be configured for either little or
45 big endian formats. Be sure to select the appropriate mode. 45 big endian formats. Be sure to select the appropriate mode.
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index e77a596f3f1e..06609ca36115 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += fcntl.h
7generic-y += ioctl.h 7generic-y += ioctl.h
8generic-y += ioctls.h 8generic-y += ioctls.h
9generic-y += ipcbuf.h 9generic-y += ipcbuf.h
10generic-y += kvm_para.h
10generic-y += mman.h 11generic-y += mman.h
11generic-y += msgbuf.h 12generic-y += msgbuf.h
12generic-y += param.h 13generic-y += param.h
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index e45ada8fb006..94700c5270a9 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -165,7 +165,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
165 unsigned long attrs) 165 unsigned long attrs)
166{ 166{
167#ifdef CONFIG_MMU 167#ifdef CONFIG_MMU
168 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 168 unsigned long user_count = vma_pages(vma);
169 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 169 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
170 unsigned long off = vma->vm_pgoff; 170 unsigned long off = vma->vm_pgoff;
171 unsigned long pfn; 171 unsigned long pfn;
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 903f3bf48419..7e25c5cc353a 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
155 return __cmpxchg_small(ptr, old, new, size); 155 return __cmpxchg_small(ptr, old, new, size);
156 156
157 case 4: 157 case 4:
158 return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); 158 return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
159 (u32)old, new);
159 160
160 case 8: 161 case 8:
161 /* lld/scd are only available for MIPS64 */ 162 /* lld/scd are only available for MIPS64 */
162 if (!IS_ENABLED(CONFIG_64BIT)) 163 if (!IS_ENABLED(CONFIG_64BIT))
163 return __cmpxchg_called_with_bad_pointer(); 164 return __cmpxchg_called_with_bad_pointer();
164 165
165 return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); 166 return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
167 (u64)old, new);
166 168
167 default: 169 default:
168 return __cmpxchg_called_with_bad_pointer(); 170 return __cmpxchg_called_with_bad_pointer();
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 100f23dfa438..ac584c5823d0 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
183} 183}
184 184
185static struct plat_stmmacenet_data ls1x_eth0_pdata = { 185static struct plat_stmmacenet_data ls1x_eth0_pdata = {
186 .bus_id = 0, 186 .bus_id = 0,
187 .phy_addr = -1, 187 .phy_addr = -1,
188#if defined(CONFIG_LOONGSON1_LS1B) 188#if defined(CONFIG_LOONGSON1_LS1B)
189 .interface = PHY_INTERFACE_MODE_MII, 189 .interface = PHY_INTERFACE_MODE_MII,
190#elif defined(CONFIG_LOONGSON1_LS1C) 190#elif defined(CONFIG_LOONGSON1_LS1C)
191 .interface = PHY_INTERFACE_MODE_RMII, 191 .interface = PHY_INTERFACE_MODE_RMII,
192#endif 192#endif
193 .mdio_bus_data = &ls1x_mdio_bus_data, 193 .mdio_bus_data = &ls1x_mdio_bus_data,
194 .dma_cfg = &ls1x_eth_dma_cfg, 194 .dma_cfg = &ls1x_eth_dma_cfg,
195 .has_gmac = 1, 195 .has_gmac = 1,
196 .tx_coe = 1, 196 .tx_coe = 1,
197 .init = ls1x_eth_mux_init, 197 .rx_queues_to_use = 1,
198 .tx_queues_to_use = 1,
199 .init = ls1x_eth_mux_init,
198}; 200};
199 201
200static struct resource ls1x_eth0_resources[] = { 202static struct resource ls1x_eth0_resources[] = {
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = {
222 224
223#ifdef CONFIG_LOONGSON1_LS1B 225#ifdef CONFIG_LOONGSON1_LS1B
224static struct plat_stmmacenet_data ls1x_eth1_pdata = { 226static struct plat_stmmacenet_data ls1x_eth1_pdata = {
225 .bus_id = 1, 227 .bus_id = 1,
226 .phy_addr = -1, 228 .phy_addr = -1,
227 .interface = PHY_INTERFACE_MODE_MII, 229 .interface = PHY_INTERFACE_MODE_MII,
228 .mdio_bus_data = &ls1x_mdio_bus_data, 230 .mdio_bus_data = &ls1x_mdio_bus_data,
229 .dma_cfg = &ls1x_eth_dma_cfg, 231 .dma_cfg = &ls1x_eth_dma_cfg,
230 .has_gmac = 1, 232 .has_gmac = 1,
231 .tx_coe = 1, 233 .tx_coe = 1,
232 .init = ls1x_eth_mux_init, 234 .rx_queues_to_use = 1,
235 .tx_queues_to_use = 1,
236 .init = ls1x_eth_mux_init,
233}; 237};
234 238
235static struct resource ls1x_eth1_resources[] = { 239static struct resource ls1x_eth1_resources[] = {
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 192542dbd972..16d9ef5a78c5 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2558,7 +2558,6 @@ dcopuop:
2558 break; 2558 break;
2559 default: 2559 default:
2560 /* Reserved R6 ops */ 2560 /* Reserved R6 ops */
2561 pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
2562 return SIGILL; 2561 return SIGILL;
2563 } 2562 }
2564 } 2563 }
@@ -2719,7 +2718,6 @@ dcopuop:
2719 break; 2718 break;
2720 default: 2719 default:
2721 /* Reserved R6 ops */ 2720 /* Reserved R6 ops */
2722 pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
2723 return SIGILL; 2721 return SIGILL;
2724 } 2722 }
2725 } 2723 }
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 7646891c4e9b..01b7a87ea678 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
667{ 667{
668 int src, dst, r, td, ts, mem_off, b_off; 668 int src, dst, r, td, ts, mem_off, b_off;
669 bool need_swap, did_move, cmp_eq; 669 bool need_swap, did_move, cmp_eq;
670 unsigned int target; 670 unsigned int target = 0;
671 u64 t64; 671 u64 t64;
672 s64 t64s; 672 s64 t64s;
673 int bpf_op = BPF_OP(insn->code); 673 int bpf_op = BPF_OP(insn->code);
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh
index 5c4f93687039..654d652d7fa1 100755
--- a/arch/mips/tools/generic-board-config.sh
+++ b/arch/mips/tools/generic-board-config.sh
@@ -30,8 +30,6 @@ cfg="$4"
30boards_origin="$5" 30boards_origin="$5"
31shift 5 31shift 5
32 32
33cd "${srctree}"
34
35# Only print Skipping... lines if the user explicitly specified BOARDS=. In the 33# Only print Skipping... lines if the user explicitly specified BOARDS=. In the
36# general case it only serves to obscure the useful output about what actually 34# general case it only serves to obscure the useful output about what actually
37# was included. 35# was included.
@@ -48,7 +46,7 @@ environment*)
48esac 46esac
49 47
50for board in $@; do 48for board in $@; do
51 board_cfg="arch/mips/configs/generic/board-${board}.config" 49 board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config"
52 if [ ! -f "${board_cfg}" ]; then 50 if [ ! -f "${board_cfg}" ]; then
53 echo "WARNING: Board config '${board_cfg}' not found" 51 echo "WARNING: Board config '${board_cfg}' not found"
54 continue 52 continue
@@ -84,7 +82,7 @@ for board in $@; do
84 done || continue 82 done || continue
85 83
86 # Merge this board config fragment into our final config file 84 # Merge this board config fragment into our final config file
87 ./scripts/kconfig/merge_config.sh \ 85 ${srctree}/scripts/kconfig/merge_config.sh \
88 -m -O ${objtree} ${cfg} ${board_cfg} \ 86 -m -O ${objtree} ${cfg} ${board_cfg} \
89 | grep -Ev '^(#|Using)' 87 | grep -Ev '^(#|Using)'
90done 88done
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index c6d6272a934f..7baa2265d439 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
35EXPORT_SYMBOL(__xchg8); 35EXPORT_SYMBOL(__xchg8);
36EXPORT_SYMBOL(__xchg32); 36EXPORT_SYMBOL(__xchg32);
37EXPORT_SYMBOL(__cmpxchg_u32); 37EXPORT_SYMBOL(__cmpxchg_u32);
38EXPORT_SYMBOL(__cmpxchg_u64);
38#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
39EXPORT_SYMBOL(__atomic_hash); 40EXPORT_SYMBOL(__atomic_hash);
40#endif 41#endif
41#ifdef CONFIG_64BIT 42#ifdef CONFIG_64BIT
42EXPORT_SYMBOL(__xchg64); 43EXPORT_SYMBOL(__xchg64);
43EXPORT_SYMBOL(__cmpxchg_u64);
44#endif 44#endif
45 45
46#include <linux/uaccess.h> 46#include <linux/uaccess.h>
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index a45a67d526f8..30f92391a93e 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -146,7 +146,7 @@ void machine_power_off(void)
146 146
147 /* prevent soft lockup/stalled CPU messages for endless loop. */ 147 /* prevent soft lockup/stalled CPU messages for endless loop. */
148 rcu_sysrq_start(); 148 rcu_sysrq_start();
149 lockup_detector_suspend(); 149 lockup_detector_soft_poweroff();
150 for (;;); 150 for (;;);
151} 151}
152 152
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307c3052..41e60a9c7db2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
74210: ldd 0(%r25), %r25 74210: ldd 0(%r25), %r25
74311: ldd 0(%r24), %r24 74311: ldd 0(%r24), %r24
744#else 744#else
745 /* Load new value into r22/r23 - high/low */ 745 /* Load old value into r22/r23 - high/low */
74610: ldw 0(%r25), %r22 74610: ldw 0(%r25), %r22
74711: ldw 4(%r25), %r23 74711: ldw 4(%r25), %r23
748 /* Load new value into fr4 for atomic store later */ 748 /* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
834 copy %r0, %r28 834 copy %r0, %r28
835#else 835#else
836 /* Compare first word */ 836 /* Compare first word */
83719: ldw,ma 0(%r26), %r29 83719: ldw 0(%r26), %r29
838 sub,= %r29, %r22, %r0 838 sub,= %r29, %r22, %r0
839 b,n cas2_end 839 b,n cas2_end
840 /* Compare second word */ 840 /* Compare second word */
84120: ldw,ma 4(%r26), %r29 84120: ldw 4(%r26), %r29
842 sub,= %r29, %r23, %r0 842 sub,= %r29, %r23, %r0
843 b,n cas2_end 843 b,n cas2_end
844 /* Perform the store */ 844 /* Perform the store */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 2d956aa0a38a..8c0105a49839 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; 253 cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
254 254
255 for_each_online_cpu(cpu) { 255 for_each_online_cpu(cpu) {
256 if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc) 256 if (cpu == 0)
257 continue;
258 if ((cpu0_loc != 0) &&
259 (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
257 continue; 260 continue;
258 261
259 clocksource_cr16.name = "cr16_unstable"; 262 clocksource_cr16.name = "cr16_unstable";
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 1df770e8cbe0..7275fed271af 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
102 case PVR_POWER8: 102 case PVR_POWER8:
103 case PVR_POWER8E: 103 case PVR_POWER8E:
104 case PVR_POWER8NVL: 104 case PVR_POWER8NVL:
105 __flush_tlb_power8(POWER8_TLB_SETS); 105 __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
106 break; 106 break;
107 case PVR_POWER9: 107 case PVR_POWER9:
108 __flush_tlb_power9(POWER9_TLB_SETS_HASH); 108 __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
109 break; 109 break;
110 default: 110 default:
111 pr_err("unknown CPU version for boot TLB flush\n"); 111 pr_err("unknown CPU version for boot TLB flush\n");
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 48da0f5d2f7f..b82586c53560 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
734EXC_VIRT(program_check, 0x4700, 0x100, 0x700) 734EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
735TRAMP_KVM(PACA_EXGEN, 0x700) 735TRAMP_KVM(PACA_EXGEN, 0x700)
736EXC_COMMON_BEGIN(program_check_common) 736EXC_COMMON_BEGIN(program_check_common)
737 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 737 /*
738 * It's possible to receive a TM Bad Thing type program check with
739 * userspace register values (in particular r1), but with SRR1 reporting
740 * that we came from the kernel. Normally that would confuse the bad
741 * stack logic, and we would report a bad kernel stack pointer. Instead
742 * we switch to the emergency stack if we're taking a TM Bad Thing from
743 * the kernel.
744 */
745 li r10,MSR_PR /* Build a mask of MSR_PR .. */
746 oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
747 and r10,r10,r12 /* Mask SRR1 with that. */
748 srdi r10,r10,8 /* Shift it so we can compare */
749 cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
750 bne 1f /* If != go to normal path. */
751
752 /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
753 andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
754 /* 3 in EXCEPTION_PROLOG_COMMON */
755 mr r10,r1 /* Save r1 */
756 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
757 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
758 b 3f /* Jump into the macro !! */
7591: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
738 bl save_nvgprs 760 bl save_nvgprs
739 RECONCILE_IRQ_STATE(r10, r11) 761 RECONCILE_IRQ_STATE(r10, r11)
740 addi r3,r1,STACK_FRAME_OVERHEAD 762 addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b76ca198e09c..72f153c6f3fa 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
624 624
625long __machine_check_early_realmode_p9(struct pt_regs *regs) 625long __machine_check_early_realmode_p9(struct pt_regs *regs)
626{ 626{
627 /*
628 * On POWER9 DD2.1 and below, it's possible to get a machine check
629 * caused by a paste instruction where only DSISR bit 25 is set. This
630 * will result in the MCE handler seeing an unknown event and the kernel
631 * crashing. An MCE that occurs like this is spurious, so we don't need
632 * to do anything in terms of servicing it. If there is something that
633 * needs to be serviced, the CPU will raise the MCE again with the
634 * correct DSISR so that it can be serviced properly. So detect this
635 * case and mark it as handled.
636 */
637 if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
638 return 1;
639
627 return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); 640 return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
628} 641}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 0ac741fae90e..2e3bc16d02b2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p)
904#endif 904#endif
905#endif 905#endif
906 906
907#ifdef CONFIG_PPC_64K_PAGES
908 init_mm.context.pte_frag = NULL;
909#endif
910#ifdef CONFIG_SPAPR_TCE_IOMMU 907#ifdef CONFIG_SPAPR_TCE_IOMMU
911 mm_iommu_init(&init_mm); 908 mm_iommu_init(&init_mm);
912#endif 909#endif
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c83c115858c1..b2c002993d78 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
452 if (MSR_TM_RESV(msr)) 452 if (MSR_TM_RESV(msr))
453 return -EINVAL; 453 return -EINVAL;
454 454
455 /* pull in MSR TM from user context */ 455 /* pull in MSR TS bits from user context */
456 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); 456 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
457 457
458 /*
459 * Ensure that TM is enabled in regs->msr before we leave the signal
460 * handler. It could be the case that (a) user disabled the TM bit
461 * through the manipulation of the MSR bits in uc_mcontext or (b) the
462 * TM bit was disabled because a sufficient number of context switches
463 * happened whilst in the signal handler and load_tm overflowed,
464 * disabling the TM bit. In either case we can end up with an illegal
465 * TM state leading to a TM Bad Thing when we return to userspace.
466 */
467 regs->msr |= MSR_TM;
468
458 /* pull in MSR LE from user context */ 469 /* pull in MSR LE from user context */
459 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); 470 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
460 471
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index c98e90b4ea7b..b4e2b7165f79 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub)
181 * - we have no stack frame and can not allocate one 181 * - we have no stack frame and can not allocate one
182 * - LR points back to the original caller (in A) 182 * - LR points back to the original caller (in A)
183 * - CTR holds the new NIP in C 183 * - CTR holds the new NIP in C
184 * - r0 & r12 are free 184 * - r0, r11 & r12 are free
185 *
186 * r0 can't be used as the base register for a DS-form load or store, so
187 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
188 */ 185 */
189livepatch_handler: 186livepatch_handler:
190 CURRENT_THREAD_INFO(r12, r1) 187 CURRENT_THREAD_INFO(r12, r1)
191 188
192 /* Save stack pointer into r0 */
193 mr r0, r1
194
195 /* Allocate 3 x 8 bytes */ 189 /* Allocate 3 x 8 bytes */
196 ld r1, TI_livepatch_sp(r12) 190 ld r11, TI_livepatch_sp(r12)
197 addi r1, r1, 24 191 addi r11, r11, 24
198 std r1, TI_livepatch_sp(r12) 192 std r11, TI_livepatch_sp(r12)
199 193
200 /* Save toc & real LR on livepatch stack */ 194 /* Save toc & real LR on livepatch stack */
201 std r2, -24(r1) 195 std r2, -24(r11)
202 mflr r12 196 mflr r12
203 std r12, -16(r1) 197 std r12, -16(r11)
204 198
205 /* Store stack end marker */ 199 /* Store stack end marker */
206 lis r12, STACK_END_MAGIC@h 200 lis r12, STACK_END_MAGIC@h
207 ori r12, r12, STACK_END_MAGIC@l 201 ori r12, r12, STACK_END_MAGIC@l
208 std r12, -8(r1) 202 std r12, -8(r11)
209
210 /* Restore real stack pointer */
211 mr r1, r0
212 203
213 /* Put ctr in r12 for global entry and branch there */ 204 /* Put ctr in r12 for global entry and branch there */
214 mfctr r12 205 mfctr r12
@@ -216,36 +207,30 @@ livepatch_handler:
216 207
217 /* 208 /*
218 * Now we are returning from the patched function to the original 209 * Now we are returning from the patched function to the original
219 * caller A. We are free to use r0 and r12, and we can use r2 until we 210 * caller A. We are free to use r11, r12 and we can use r2 until we
220 * restore it. 211 * restore it.
221 */ 212 */
222 213
223 CURRENT_THREAD_INFO(r12, r1) 214 CURRENT_THREAD_INFO(r12, r1)
224 215
225 /* Save stack pointer into r0 */ 216 ld r11, TI_livepatch_sp(r12)
226 mr r0, r1
227
228 ld r1, TI_livepatch_sp(r12)
229 217
230 /* Check stack marker hasn't been trashed */ 218 /* Check stack marker hasn't been trashed */
231 lis r2, STACK_END_MAGIC@h 219 lis r2, STACK_END_MAGIC@h
232 ori r2, r2, STACK_END_MAGIC@l 220 ori r2, r2, STACK_END_MAGIC@l
233 ld r12, -8(r1) 221 ld r12, -8(r11)
2341: tdne r12, r2 2221: tdne r12, r2
235 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 223 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
236 224
237 /* Restore LR & toc from livepatch stack */ 225 /* Restore LR & toc from livepatch stack */
238 ld r12, -16(r1) 226 ld r12, -16(r11)
239 mtlr r12 227 mtlr r12
240 ld r2, -24(r1) 228 ld r2, -24(r11)
241 229
242 /* Pop livepatch stack frame */ 230 /* Pop livepatch stack frame */
243 CURRENT_THREAD_INFO(r12, r0) 231 CURRENT_THREAD_INFO(r12, r1)
244 subi r1, r1, 24 232 subi r11, r11, 24
245 std r1, TI_livepatch_sp(r12) 233 std r11, TI_livepatch_sp(r12)
246
247 /* Restore real stack pointer */
248 mr r1, r0
249 234
250 /* Return to original caller of live patched function */ 235 /* Return to original caller of live patched function */
251 blr 236 blr
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 2f6eadd9408d..c702a8981452 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu)
310 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) 310 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
311 return 0; 311 return 0;
312 312
313 if (watchdog_suspended)
314 return 0;
315
316 if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) 313 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
317 return 0; 314 return 0;
318 315
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void)
358 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; 355 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
359} 356}
360 357
361void watchdog_nmi_reconfigure(void) 358void watchdog_nmi_stop(void)
362{ 359{
363 int cpu; 360 int cpu;
364 361
365 watchdog_calc_timeouts();
366
367 for_each_cpu(cpu, &wd_cpus_enabled) 362 for_each_cpu(cpu, &wd_cpus_enabled)
368 stop_wd_on_cpu(cpu); 363 stop_wd_on_cpu(cpu);
364}
369 365
366void watchdog_nmi_start(void)
367{
368 int cpu;
369
370 watchdog_calc_timeouts();
370 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) 371 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
371 start_wd_on_cpu(cpu); 372 start_wd_on_cpu(cpu);
372} 373}
373 374
374/* 375/*
375 * This runs after lockup_detector_init() which sets up watchdog_cpumask. 376 * Invoked from core watchdog init.
376 */ 377 */
377static int __init powerpc_watchdog_init(void) 378int __init watchdog_nmi_probe(void)
378{ 379{
379 int err; 380 int err;
380 381
381 watchdog_calc_timeouts(); 382 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
382 383 "powerpc/watchdog:online",
383 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", 384 start_wd_on_cpu, stop_wd_on_cpu);
384 start_wd_on_cpu, stop_wd_on_cpu); 385 if (err < 0) {
385 if (err < 0)
386 pr_warn("Watchdog could not be initialized"); 386 pr_warn("Watchdog could not be initialized");
387 387 return err;
388 }
388 return 0; 389 return 0;
389} 390}
390arch_initcall(powerpc_watchdog_init);
391 391
392static void handle_backtrace_ipi(struct pt_regs *regs) 392static void handle_backtrace_ipi(struct pt_regs *regs)
393{ 393{
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
478 return ret; 478 return ret;
479 479
480 dir = iommu_tce_direction(tce); 480 dir = iommu_tce_direction(tce);
481
482 idx = srcu_read_lock(&vcpu->kvm->srcu);
483
481 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, 484 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
482 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) 485 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
483 return H_PARAMETER; 486 ret = H_PARAMETER;
487 goto unlock_exit;
488 }
484 489
485 entry = ioba >> stt->page_shift; 490 entry = ioba >> stt->page_shift;
486 491
487 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 492 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
488 if (dir == DMA_NONE) { 493 if (dir == DMA_NONE)
489 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, 494 ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
490 stit->tbl, entry); 495 stit->tbl, entry);
491 } else { 496 else
492 idx = srcu_read_lock(&vcpu->kvm->srcu);
493 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, 497 ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
494 entry, ua, dir); 498 entry, ua, dir);
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496 }
497 499
498 if (ret == H_SUCCESS) 500 if (ret == H_SUCCESS)
499 continue; 501 continue;
500 502
501 if (ret == H_TOO_HARD) 503 if (ret == H_TOO_HARD)
502 return ret; 504 goto unlock_exit;
503 505
504 WARN_ON_ONCE(1); 506 WARN_ON_ONCE(1);
505 kvmppc_clear_tce(stit->tbl, entry); 507 kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
507 509
508 kvmppc_tce_put(stt, entry, tce); 510 kvmppc_tce_put(stt, entry, tce);
509 511
510 return H_SUCCESS; 512unlock_exit:
513 srcu_read_unlock(&vcpu->kvm->srcu, idx);
514
515 return ret;
511} 516}
512EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 517EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
513 518
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 17936f82d3c7..42639fba89e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
989 beq no_xive 989 beq no_xive
990 ld r11, VCPU_XIVE_SAVED_STATE(r4) 990 ld r11, VCPU_XIVE_SAVED_STATE(r4)
991 li r9, TM_QW1_OS 991 li r9, TM_QW1_OS
992 stdcix r11,r9,r10
993 eieio 992 eieio
993 stdcix r11,r9,r10
994 lwz r11, VCPU_XIVE_CAM_WORD(r4) 994 lwz r11, VCPU_XIVE_CAM_WORD(r4)
995 li r9, TM_QW1_OS + TM_WORD2 995 li r9, TM_QW1_OS + TM_WORD2
996 stwcix r11,r9,r10 996 stwcix r11,r9,r10
997 li r9, 1 997 li r9, 1
998 stw r9, VCPU_XIVE_PUSHED(r4) 998 stw r9, VCPU_XIVE_PUSHED(r4)
999 eieio
999no_xive: 1000no_xive:
1000#endif /* CONFIG_KVM_XICS */ 1001#endif /* CONFIG_KVM_XICS */
1001 1002
@@ -1121,6 +1122,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1121BEGIN_FTR_SECTION 1122BEGIN_FTR_SECTION
1122 mtspr SPRN_PPR, r0 1123 mtspr SPRN_PPR, r0
1123END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1124END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1125
1126/* Move canary into DSISR to check for later */
1127BEGIN_FTR_SECTION
1128 li r0, 0x7fff
1129 mtspr SPRN_HDSISR, r0
1130END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1131
1124 ld r0, VCPU_GPR(R0)(r4) 1132 ld r0, VCPU_GPR(R0)(r4)
1125 ld r4, VCPU_GPR(R4)(r4) 1133 ld r4, VCPU_GPR(R4)(r4)
1126 1134
@@ -1303,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1303 bne 3f 1311 bne 3f
1304BEGIN_FTR_SECTION 1312BEGIN_FTR_SECTION
1305 PPC_MSGSYNC 1313 PPC_MSGSYNC
1314 lwsync
1306END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1307 lbz r0, HSTATE_HOST_IPI(r13) 1316 lbz r0, HSTATE_HOST_IPI(r13)
1308 cmpwi r0, 0 1317 cmpwi r0, 0
@@ -1393,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1393 cmpldi cr0, r10, 0 1402 cmpldi cr0, r10, 0
1394 beq 1f 1403 beq 1f
1395 /* First load to pull the context, we ignore the value */ 1404 /* First load to pull the context, we ignore the value */
1396 lwzx r11, r7, r10
1397 eieio 1405 eieio
1406 lwzx r11, r7, r10
1398 /* Second load to recover the context state (Words 0 and 1) */ 1407 /* Second load to recover the context state (Words 0 and 1) */
1399 ldx r11, r6, r10 1408 ldx r11, r6, r10
1400 b 3f 1409 b 3f
@@ -1402,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1402 cmpldi cr0, r10, 0 1411 cmpldi cr0, r10, 0
1403 beq 1f 1412 beq 1f
1404 /* First load to pull the context, we ignore the value */ 1413 /* First load to pull the context, we ignore the value */
1405 lwzcix r11, r7, r10
1406 eieio 1414 eieio
1415 lwzcix r11, r7, r10
1407 /* Second load to recover the context state (Words 0 and 1) */ 1416 /* Second load to recover the context state (Words 0 and 1) */
1408 ldcix r11, r6, r10 1417 ldcix r11, r6, r10
14093: std r11, VCPU_XIVE_SAVED_STATE(r9) 14183: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1413,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1413 stw r10, VCPU_XIVE_PUSHED(r9) 1422 stw r10, VCPU_XIVE_PUSHED(r9)
1414 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) 1423 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1415 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) 1424 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1425 eieio
14161: 14261:
1417#endif /* CONFIG_KVM_XICS */ 1427#endif /* CONFIG_KVM_XICS */
1418 /* Save more register state */ 1428 /* Save more register state */
@@ -1956,9 +1966,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1956kvmppc_hdsi: 1966kvmppc_hdsi:
1957 ld r3, VCPU_KVM(r9) 1967 ld r3, VCPU_KVM(r9)
1958 lbz r0, KVM_RADIX(r3) 1968 lbz r0, KVM_RADIX(r3)
1959 cmpwi r0, 0
1960 mfspr r4, SPRN_HDAR 1969 mfspr r4, SPRN_HDAR
1961 mfspr r6, SPRN_HDSISR 1970 mfspr r6, SPRN_HDSISR
1971BEGIN_FTR_SECTION
1972 /* Look for DSISR canary. If we find it, retry instruction */
1973 cmpdi r6, 0x7fff
1974 beq 6f
1975END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1976 cmpwi r0, 0
1962 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ 1977 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
1963 /* HPTE not found fault or protection fault? */ 1978 /* HPTE not found fault or protection fault? */
1964 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1979 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
@@ -2776,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2776 PPC_MSGCLR(6) 2791 PPC_MSGCLR(6)
2777 /* see if it's a host IPI */ 2792 /* see if it's a host IPI */
2778 li r3, 1 2793 li r3, 1
2794BEGIN_FTR_SECTION
2795 PPC_MSGSYNC
2796 lwsync
2797END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2779 lbz r0, HSTATE_HOST_IPI(r13) 2798 lbz r0, HSTATE_HOST_IPI(r13)
2780 cmpwi r0, 0 2799 cmpwi r0, 0
2781 bnelr 2800 bnelr
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 13304622ab1c..bf457843e032 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
622 return -EINVAL; 622 return -EINVAL;
623 state = &sb->irq_state[idx]; 623 state = &sb->irq_state[idx];
624 arch_spin_lock(&sb->lock); 624 arch_spin_lock(&sb->lock);
625 *server = state->guest_server; 625 *server = state->act_server;
626 *priority = state->guest_priority; 626 *priority = state->guest_priority;
627 arch_spin_unlock(&sb->lock); 627 arch_spin_unlock(&sb->lock);
628 628
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1331 xive->saved_src_count++; 1331 xive->saved_src_count++;
1332 1332
1333 /* Convert saved state into something compatible with xics */ 1333 /* Convert saved state into something compatible with xics */
1334 val = state->guest_server; 1334 val = state->act_server;
1335 prio = state->saved_scan_prio; 1335 prio = state->saved_scan_prio;
1336 1336
1337 if (prio == MASKED) { 1337 if (prio == MASKED) {
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1507 /* First convert prio and mark interrupt as untargetted */ 1507 /* First convert prio and mark interrupt as untargetted */
1508 act_prio = xive_prio_from_guest(guest_prio); 1508 act_prio = xive_prio_from_guest(guest_prio);
1509 state->act_priority = MASKED; 1509 state->act_priority = MASKED;
1510 state->guest_server = server;
1511 1510
1512 /* 1511 /*
1513 * We need to drop the lock due to the mutex below. Hopefully 1512 * We need to drop the lock due to the mutex below. Hopefully
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 5938f7644dc1..6ba63f8e8a61 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
35 struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ 35 struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
36 36
37 /* Targetting as set by guest */ 37 /* Targetting as set by guest */
38 u32 guest_server; /* Current guest selected target */
39 u8 guest_priority; /* Guest set priority */ 38 u8 guest_priority; /* Guest set priority */
40 u8 saved_priority; /* Saved priority when masking */ 39 u8 saved_priority; /* Saved priority when masking */
41 40
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3480faaf1ef8..ee279c7f4802 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
644 break; 644 break;
645#endif 645#endif
646 case KVM_CAP_PPC_HTM: 646 case KVM_CAP_PPC_HTM:
647 r = cpu_has_feature(CPU_FTR_TM_COMP) && 647 r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
648 is_kvmppc_hv_enabled(kvm);
649 break; 648 break;
650 default: 649 default:
651 r = 0; 650 r = 0;
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 5e8418c28bd8..f208f560aecd 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1684 * Logical instructions 1684 * Logical instructions
1685 */ 1685 */
1686 case 26: /* cntlzw */ 1686 case 26: /* cntlzw */
1687 op->val = __builtin_clz((unsigned int) regs->gpr[rd]); 1687 val = (unsigned int) regs->gpr[rd];
1688 op->val = ( val ? __builtin_clz(val) : 32 );
1688 goto logical_done; 1689 goto logical_done;
1689#ifdef __powerpc64__ 1690#ifdef __powerpc64__
1690 case 58: /* cntlzd */ 1691 case 58: /* cntlzd */
1691 op->val = __builtin_clzl(regs->gpr[rd]); 1692 val = regs->gpr[rd];
1693 op->val = ( val ? __builtin_clzl(val) : 64 );
1692 goto logical_done; 1694 goto logical_done;
1693#endif 1695#endif
1694 case 28: /* and */ 1696 case 28: /* and */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b95c584ce19d..a51df9ef529d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1438,7 +1438,6 @@ out:
1438 1438
1439int arch_update_cpu_topology(void) 1439int arch_update_cpu_topology(void)
1440{ 1440{
1441 lockdep_assert_cpus_held();
1442 return numa_update_cpu_topology(true); 1441 return numa_update_cpu_topology(true);
1443} 1442}
1444 1443
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 65eda1997c3f..f6c7f54c0515 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
361 break; 361 break;
362 } 362 }
363 wmb(); 363 wmb();
364 local_irq_restore(flags);
364 flush_tlb_kernel_range((unsigned long)page_address(start), 365 flush_tlb_kernel_range((unsigned long)page_address(start),
365 (unsigned long)page_address(page)); 366 (unsigned long)page_address(page));
366 local_irq_restore(flags);
367 return err; 367 return err;
368} 368}
369 369
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 9ccac86f3463..88126245881b 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event)
399 399
400 /* Take the mutex lock for this node and then decrement the reference count */ 400 /* Take the mutex lock for this node and then decrement the reference count */
401 mutex_lock(&ref->lock); 401 mutex_lock(&ref->lock);
402 if (ref->refc == 0) {
403 /*
404 * The scenario where this is true is, when perf session is
405 * started, followed by offlining of all cpus in a given node.
406 *
407 * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline()
408 * function set the ref->count to zero, if the cpu which is
409 * about to offline is the last cpu in a given node and make
410 * an OPAL call to disable the engine in that node.
411 *
412 */
413 mutex_unlock(&ref->lock);
414 return;
415 }
402 ref->refc--; 416 ref->refc--;
403 if (ref->refc == 0) { 417 if (ref->refc == 0) {
404 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 418 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size)
523 537
524 /* We need only vbase for core counters */ 538 /* We need only vbase for core counters */
525 mem_info->vbase = page_address(alloc_pages_node(phys_id, 539 mem_info->vbase = page_address(alloc_pages_node(phys_id,
526 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 540 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
527 get_order(size))); 541 __GFP_NOWARN, get_order(size)));
528 if (!mem_info->vbase) 542 if (!mem_info->vbase)
529 return -ENOMEM; 543 return -ENOMEM;
530 544
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event)
646 return; 660 return;
647 661
648 mutex_lock(&ref->lock); 662 mutex_lock(&ref->lock);
663 if (ref->refc == 0) {
664 /*
665 * The scenario where this is true is, when perf session is
666 * started, followed by offlining of all cpus in a given core.
667 *
668 * In the cpuhotplug offline path, ppc_core_imc_cpu_offline()
669 * function set the ref->count to zero, if the cpu which is
670 * about to offline is the last cpu in a given core and make
671 * an OPAL call to disable the engine in that core.
672 *
673 */
674 mutex_unlock(&ref->lock);
675 return;
676 }
649 ref->refc--; 677 ref->refc--;
650 if (ref->refc == 0) { 678 if (ref->refc == 0) {
651 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 679 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
763 * free the memory in cpu offline path. 791 * free the memory in cpu offline path.
764 */ 792 */
765 local_mem = page_address(alloc_pages_node(phys_id, 793 local_mem = page_address(alloc_pages_node(phys_id,
766 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 794 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
767 get_order(size))); 795 __GFP_NOWARN, get_order(size)));
768 if (!local_mem) 796 if (!local_mem)
769 return -ENOMEM; 797 return -ENOMEM;
770 798
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
1148 } 1176 }
1149 1177
1150 /* Only free the attr_groups which are dynamically allocated */ 1178 /* Only free the attr_groups which are dynamically allocated */
1151 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1179 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR])
1180 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
1152 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1181 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
1153 kfree(pmu_ptr); 1182 kfree(pmu_ptr);
1154 return; 1183 return;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 897aa1400eb8..bbb73aa0eb8f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
272#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 272#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
273static unsigned long pnv_memory_block_size(void) 273static unsigned long pnv_memory_block_size(void)
274{ 274{
275 return 256UL * 1024 * 1024; 275 /*
276 * We map the kernel linear region with 1GB large pages on radix. For
277 * memory hot unplug to work our memory block size must be at least
278 * this size.
279 */
280 if (radix_enabled())
281 return 1UL * 1024 * 1024 * 1024;
282 else
283 return 256UL * 1024 * 1024;
276} 284}
277#endif 285#endif
278 286
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9234be1e66f5..5011ffea4e4b 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -71,6 +71,8 @@
71#define RIWAR_WRTYP_ALLOC 0x00006000 71#define RIWAR_WRTYP_ALLOC 0x00006000
72#define RIWAR_SIZE_MASK 0x0000003F 72#define RIWAR_SIZE_MASK 0x0000003F
73 73
74static DEFINE_SPINLOCK(fsl_rio_config_lock);
75
74#define __fsl_read_rio_config(x, addr, err, op) \ 76#define __fsl_read_rio_config(x, addr, err, op) \
75 __asm__ __volatile__( \ 77 __asm__ __volatile__( \
76 "1: "op" %1,0(%2)\n" \ 78 "1: "op" %1,0(%2)\n" \
@@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
184 u8 hopcount, u32 offset, int len, u32 *val) 186 u8 hopcount, u32 offset, int len, u32 *val)
185{ 187{
186 struct rio_priv *priv = mport->priv; 188 struct rio_priv *priv = mport->priv;
189 unsigned long flags;
187 u8 *data; 190 u8 *data;
188 u32 rval, err = 0; 191 u32 rval, err = 0;
189 192
@@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
197 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) 200 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
198 return -EINVAL; 201 return -EINVAL;
199 202
203 spin_lock_irqsave(&fsl_rio_config_lock, flags);
204
200 out_be32(&priv->maint_atmu_regs->rowtar, 205 out_be32(&priv->maint_atmu_regs->rowtar,
201 (destid << 22) | (hopcount << 12) | (offset >> 12)); 206 (destid << 22) | (hopcount << 12) | (offset >> 12));
202 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); 207 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
213 __fsl_read_rio_config(rval, data, err, "lwz"); 218 __fsl_read_rio_config(rval, data, err, "lwz");
214 break; 219 break;
215 default: 220 default:
221 spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
216 return -EINVAL; 222 return -EINVAL;
217 } 223 }
218 224
@@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
221 err, destid, hopcount, offset); 227 err, destid, hopcount, offset);
222 } 228 }
223 229
230 spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
224 *val = rval; 231 *val = rval;
225 232
226 return err; 233 return err;
@@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
244 u8 hopcount, u32 offset, int len, u32 val) 251 u8 hopcount, u32 offset, int len, u32 val)
245{ 252{
246 struct rio_priv *priv = mport->priv; 253 struct rio_priv *priv = mport->priv;
254 unsigned long flags;
247 u8 *data; 255 u8 *data;
256 int ret = 0;
257
248 pr_debug 258 pr_debug
249 ("fsl_rio_config_write:" 259 ("fsl_rio_config_write:"
250 " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", 260 " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
@@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
255 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) 265 if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
256 return -EINVAL; 266 return -EINVAL;
257 267
268 spin_lock_irqsave(&fsl_rio_config_lock, flags);
269
258 out_be32(&priv->maint_atmu_regs->rowtar, 270 out_be32(&priv->maint_atmu_regs->rowtar,
259 (destid << 22) | (hopcount << 12) | (offset >> 12)); 271 (destid << 22) | (hopcount << 12) | (offset >> 12));
260 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); 272 out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
@@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
271 out_be32((u32 *) data, val); 283 out_be32((u32 *) data, val);
272 break; 284 break;
273 default: 285 default:
274 return -EINVAL; 286 ret = -EINVAL;
275 } 287 }
288 spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
276 289
277 return 0; 290 return ret;
278} 291}
279 292
280static void fsl_rio_inbound_mem_init(struct rio_priv *priv) 293static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index ab7a74c75be8..88b35a3dcdc5 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -104,6 +104,8 @@
104 104
105#define DOORBELL_MESSAGE_SIZE 0x08 105#define DOORBELL_MESSAGE_SIZE 0x08
106 106
107static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
108
107struct rio_msg_regs { 109struct rio_msg_regs {
108 u32 omr; 110 u32 omr;
109 u32 osr; 111 u32 osr;
@@ -626,9 +628,13 @@ err_out:
626int fsl_rio_doorbell_send(struct rio_mport *mport, 628int fsl_rio_doorbell_send(struct rio_mport *mport,
627 int index, u16 destid, u16 data) 629 int index, u16 destid, u16 data)
628{ 630{
631 unsigned long flags;
632
629 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", 633 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
630 index, destid, data); 634 index, destid, data);
631 635
636 spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
637
632 /* In the serial version silicons, such as MPC8548, MPC8641, 638 /* In the serial version silicons, such as MPC8548, MPC8641,
633 * below operations is must be. 639 * below operations is must be.
634 */ 640 */
@@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport,
638 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); 644 out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
639 out_be32(&dbell->dbell_regs->odmr, 0x00000001); 645 out_be32(&dbell->dbell_regs->odmr, 0x00000001);
640 646
647 spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
648
641 return 0; 649 return 0;
642} 650}
643 651
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index f387318678b9..a3b8d7d1316e 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void)
1402 1402
1403 if (xive_ops->teardown_cpu) 1403 if (xive_ops->teardown_cpu)
1404 xive_ops->teardown_cpu(cpu, xc); 1404 xive_ops->teardown_cpu(cpu, xc);
1405
1406#ifdef CONFIG_SMP
1407 /* Get rid of IPI */
1408 xive_cleanup_cpu_ipi(cpu, xc);
1409#endif
1410
1411 /* Disable and free the queues */
1412 xive_cleanup_cpu_queues(cpu, xc);
1405} 1413}
1406 1414
1407void xive_kexec_teardown_cpu(int secondary) 1415void xive_kexec_teardown_cpu(int secondary)
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index f24a70bc6855..d9c4c9366049 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
431 431
432static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) 432static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
433{ 433{
434 if (!xc->hw_ipi)
435 return;
436
434 xive_irq_bitmap_free(xc->hw_ipi); 437 xive_irq_bitmap_free(xc->hw_ipi);
438 xc->hw_ipi = 0;
435} 439}
436#endif /* CONFIG_SMP */ 440#endif /* CONFIG_SMP */
437 441
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index afa46a7406ea..04e042edbab7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -27,6 +27,7 @@ CONFIG_NET=y
27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_DEVTMPFS=y 28CONFIG_DEVTMPFS=y
29# CONFIG_FIRMWARE_IN_KERNEL is not set 29# CONFIG_FIRMWARE_IN_KERNEL is not set
30CONFIG_BLK_DEV_RAM=y
30# CONFIG_BLK_DEV_XPRAM is not set 31# CONFIG_BLK_DEV_XPRAM is not set
31# CONFIG_DCSSBLK is not set 32# CONFIG_DCSSBLK is not set
32# CONFIG_DASD is not set 33# CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
59# CONFIG_NETWORK_FILESYSTEMS is not set 60# CONFIG_NETWORK_FILESYSTEMS is not set
60CONFIG_PRINTK_TIME=y 61CONFIG_PRINTK_TIME=y
61CONFIG_DEBUG_INFO=y 62CONFIG_DEBUG_INFO=y
63CONFIG_DEBUG_FS=y
62CONFIG_DEBUG_KERNEL=y 64CONFIG_DEBUG_KERNEL=y
63CONFIG_PANIC_ON_OOPS=y 65CONFIG_PANIC_ON_OOPS=y
64# CONFIG_SCHED_DEBUG is not set 66# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 21900e1cee9c..d185aa3965bf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
521 tmhh %r8,0x0001 # test problem state bit 521 tmhh %r8,0x0001 # test problem state bit
522 jnz 2f # -> fault in user space 522 jnz 2f # -> fault in user space
523#if IS_ENABLED(CONFIG_KVM) 523#if IS_ENABLED(CONFIG_KVM)
524 # cleanup critical section for sie64a 524 # cleanup critical section for program checks in sie64a
525 lgr %r14,%r9 525 lgr %r14,%r9
526 slg %r14,BASED(.Lsie_critical_start) 526 slg %r14,BASED(.Lsie_critical_start)
527 clg %r14,BASED(.Lsie_critical_length) 527 clg %r14,BASED(.Lsie_critical_length)
528 jhe 0f 528 jhe 0f
529 brasl %r14,.Lcleanup_sie 529 lg %r14,__SF_EMPTY(%r15) # get control block pointer
530 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
531 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
532 larl %r9,sie_exit # skip forward to sie_exit
530#endif 533#endif
5310: tmhh %r8,0x4000 # PER bit set in old PSW ? 5340: tmhh %r8,0x4000 # PER bit set in old PSW ?
532 jnz 1f # -> enabled, can't be a double fault 535 jnz 1f # -> enabled, can't be a double fault
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1cee6753d47a..495ff6959dec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
293 lc->lpp = LPP_MAGIC; 293 lc->lpp = LPP_MAGIC;
294 lc->current_pid = tsk->pid; 294 lc->current_pid = tsk->pid;
295 lc->user_timer = tsk->thread.user_timer; 295 lc->user_timer = tsk->thread.user_timer;
296 lc->guest_timer = tsk->thread.guest_timer;
296 lc->system_timer = tsk->thread.system_timer; 297 lc->system_timer = tsk->thread.system_timer;
298 lc->hardirq_timer = tsk->thread.hardirq_timer;
299 lc->softirq_timer = tsk->thread.softirq_timer;
297 lc->steal_timer = 0; 300 lc->steal_timer = 0;
298} 301}
299 302
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
index 4d1ef6d74bd6..2ae0e938b657 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h
@@ -43,9 +43,7 @@ enum {
43 GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, 43 GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
44 GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, 44 GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
45 45
46 /* Port H */ 46 /* Port H - Port H does not have a Data Register */
47 GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
48 GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
49 47
50 /* Port I - not on device */ 48 /* Port I - not on device */
51 49
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 2a0ca8780f0d..13c495a9fc00 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -45,9 +45,7 @@ enum {
45 GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, 45 GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4,
46 GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, 46 GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0,
47 47
48 /* Port H */ 48 /* Port H - Port H does not have a Data Register */
49 GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4,
50 GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0,
51 49
52 /* Port I - not on device */ 50 /* Port I - not on device */
53 51
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h
index 3bb74e534d0f..78961ab78a5a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7722.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h
@@ -67,7 +67,7 @@ enum {
67 GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, 67 GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0,
68 68
69 /* PTQ */ 69 /* PTQ */
70 GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, 70 GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4,
71 GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, 71 GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0,
72 72
73 /* PTR */ 73 /* PTR */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 5340f3bc1863..b40fb541e72a 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -40,7 +40,7 @@ enum {
40 40
41 /* PTJ */ 41 /* PTJ */
42 GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, 42 GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3,
43 GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV, 43 GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6,
44 44
45 /* PTK */ 45 /* PTK */
46 GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, 46 GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3,
@@ -48,7 +48,7 @@ enum {
48 48
49 /* PTL */ 49 /* PTL */
50 GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, 50 GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3,
51 GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV, 51 GPIO_PTL4, GPIO_PTL5, GPIO_PTL6,
52 52
53 /* PTM */ 53 /* PTM */
54 GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, 54 GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3,
@@ -56,7 +56,7 @@ enum {
56 56
57 /* PTN */ 57 /* PTN */
58 GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, 58 GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3,
59 GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV, 59 GPIO_PTN4, GPIO_PTN5, GPIO_PTN6,
60 60
61 /* PTO */ 61 /* PTO */
62 GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, 62 GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3,
@@ -68,7 +68,7 @@ enum {
68 68
69 /* PTQ */ 69 /* PTQ */
70 GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, 70 GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3,
71 GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV, 71 GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6,
72 72
73 /* PTR */ 73 /* PTR */
74 GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, 74 GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 0be3828752e5..4e83f950713e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -44,7 +44,6 @@ config SPARC
44 select ARCH_HAS_SG_CHAIN 44 select ARCH_HAS_SG_CHAIN
45 select CPU_NO_EFFICIENT_FFS 45 select CPU_NO_EFFICIENT_FFS
46 select LOCKDEP_SMALL if LOCKDEP 46 select LOCKDEP_SMALL if LOCKDEP
47 select ARCH_WANT_RELAX_ORDER
48 47
49config SPARC32 48config SPARC32
50 def_bool !64BIT 49 def_bool !64BIT
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 0b034ebbda2a..7f69d17de354 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -98,7 +98,7 @@ static struct clocksource timer_clocksource = {
98 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 98 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
99}; 99};
100 100
101static void __init timer_setup(void) 101static void __init um_timer_setup(void)
102{ 102{
103 int err; 103 int err;
104 104
@@ -132,5 +132,5 @@ void read_persistent_clock(struct timespec *ts)
132void __init time_init(void) 132void __init time_init(void)
133{ 133{
134 timer_set_signal_handler(); 134 timer_set_signal_handler();
135 late_time_init = timer_setup; 135 late_time_init = um_timer_setup;
136} 136}
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
45 45
46 vzeroupper 46 vzeroupper
47 # 4 * 32 byte stack, 32-byte aligned 47 # 4 * 32 byte stack, 32-byte aligned
48 mov %rsp, %r8 48 lea 8(%rsp),%r10
49 and $~31, %rsp 49 and $~31, %rsp
50 sub $0x80, %rsp 50 sub $0x80, %rsp
51 51
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
443 vmovdqu %ymm15,0x01e0(%rsi) 443 vmovdqu %ymm15,0x01e0(%rsi)
444 444
445 vzeroupper 445 vzeroupper
446 mov %r8,%rsp 446 lea -8(%r10),%rsp
447 ret 447 ret
448ENDPROC(chacha20_8block_xor_avx2) 448ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
160 # done with the slightly better performing SSSE3 byte shuffling, 160 # done with the slightly better performing SSSE3 byte shuffling,
161 # 7/12-bit word rotation uses traditional shift+OR. 161 # 7/12-bit word rotation uses traditional shift+OR.
162 162
163 mov %rsp,%r11 163 lea 8(%rsp),%r10
164 sub $0x80,%rsp 164 sub $0x80,%rsp
165 and $~63,%rsp 165 and $~63,%rsp
166 166
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
625 pxor %xmm1,%xmm15 625 pxor %xmm1,%xmm15
626 movdqu %xmm15,0xf0(%rsi) 626 movdqu %xmm15,0xf0(%rsi)
627 627
628 mov %r11,%rsp 628 lea -8(%r10),%rsp
629 ret 629 ret
630ENDPROC(chacha20_4block_xor_ssse3) 630ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 8a13d468635a..50e0d2bc4528 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -176,7 +176,7 @@
176/* 176/*
177 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The 177 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
178 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding 178 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
179 * is just setting the LSB, which makes it an invalid stack address and is also 179 * is just clearing the MSB, which makes it an invalid stack address and is also
180 * a signal to the unwinder that it's a pt_regs pointer in disguise. 180 * a signal to the unwinder that it's a pt_regs pointer in disguise.
181 * 181 *
182 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the 182 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
@@ -185,7 +185,7 @@
185.macro ENCODE_FRAME_POINTER 185.macro ENCODE_FRAME_POINTER
186#ifdef CONFIG_FRAME_POINTER 186#ifdef CONFIG_FRAME_POINTER
187 mov %esp, %ebp 187 mov %esp, %ebp
188 orl $0x1, %ebp 188 andl $0x7fffffff, %ebp
189#endif 189#endif
190.endm 190.endm
191 191
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 49167258d587..f6cdb7a1455e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
808 808
809.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 809.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
810ENTRY(\sym) 810ENTRY(\sym)
811 UNWIND_HINT_IRET_REGS offset=8 811 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
812 812
813 /* Sanity check */ 813 /* Sanity check */
814 .if \shift_ist != -1 && \paranoid == 0 814 .if \shift_ist != -1 && \paranoid == 0
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 16076eb34699..141e07b06216 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
546 if (event->attr.type != bts_pmu.type) 546 if (event->attr.type != bts_pmu.type)
547 return -ENOENT; 547 return -ENOENT;
548 548
549 if (x86_add_exclusive(x86_lbr_exclusive_bts))
550 return -EBUSY;
551
552 /* 549 /*
553 * BTS leaks kernel addresses even when CPL0 tracing is 550 * BTS leaks kernel addresses even when CPL0 tracing is
554 * disabled, so disallow intel_bts driver for unprivileged 551 * disabled, so disallow intel_bts driver for unprivileged
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
562 !capable(CAP_SYS_ADMIN)) 559 !capable(CAP_SYS_ADMIN))
563 return -EACCES; 560 return -EACCES;
564 561
562 if (x86_add_exclusive(x86_lbr_exclusive_bts))
563 return -EBUSY;
564
565 ret = x86_reserve_hardware(); 565 ret = x86_reserve_hardware();
566 if (ret) { 566 if (ret) {
567 x86_del_exclusive(x86_lbr_exclusive_bts); 567 x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 829e89cfcee2..9fb9a1f1e47b 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void)
4409 return 0; 4409 return 0;
4410 } 4410 }
4411 4411
4412 if (lockup_detector_suspend() != 0) { 4412 cpus_read_lock();
4413 pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n"); 4413
4414 return 0; 4414 hardlockup_detector_perf_stop();
4415 }
4416 4415
4417 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); 4416 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
4418 4417
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void)
4420 x86_pmu.commit_scheduling = NULL; 4419 x86_pmu.commit_scheduling = NULL;
4421 x86_pmu.stop_scheduling = NULL; 4420 x86_pmu.stop_scheduling = NULL;
4422 4421
4423 lockup_detector_resume(); 4422 hardlockup_detector_perf_restart();
4424
4425 cpus_read_lock();
4426 4423
4427 for_each_online_cpu(c) 4424 for_each_online_cpu(c)
4428 free_excl_cntrs(c); 4425 free_excl_cntrs(c);
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 4cf100ff2a37..72db0664a53d 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -552,6 +552,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
552 552
553 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates), 553 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
554 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), 554 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
555 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
555 556
556 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), 557 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
557 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), 558 X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
@@ -560,6 +561,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
560 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), 561 X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
561 562
562 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates), 563 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
564 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_DENVERTON, glm_cstates),
565
566 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GEMINI_LAKE, glm_cstates),
563 { }, 567 { },
564}; 568};
565MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match); 569MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 8e2457cb6b4a..005908ee9333 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -775,6 +775,9 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
775 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), 775 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
776 776
777 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), 777 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
778 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init),
779
780 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GEMINI_LAKE, hsw_rapl_init),
778 {}, 781 {},
779}; 782};
780 783
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 1c5390f1cf09..d45e06346f14 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
822 pmus[i].type = type; 822 pmus[i].type = type;
823 pmus[i].boxes = kzalloc(size, GFP_KERNEL); 823 pmus[i].boxes = kzalloc(size, GFP_KERNEL);
824 if (!pmus[i].boxes) 824 if (!pmus[i].boxes)
825 return -ENOMEM; 825 goto err;
826 } 826 }
827 827
828 type->pmus = pmus; 828 type->pmus = pmus;
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
836 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + 836 attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
837 sizeof(*attr_group), GFP_KERNEL); 837 sizeof(*attr_group), GFP_KERNEL);
838 if (!attr_group) 838 if (!attr_group)
839 return -ENOMEM; 839 goto err;
840 840
841 attrs = (struct attribute **)(attr_group + 1); 841 attrs = (struct attribute **)(attr_group + 1);
842 attr_group->name = "events"; 842 attr_group->name = "events";
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
849 } 849 }
850 850
851 type->pmu_group = &uncore_pmu_attr_group; 851 type->pmu_group = &uncore_pmu_attr_group;
852
852 return 0; 853 return 0;
854
855err:
856 for (i = 0; i < type->num_boxes; i++)
857 kfree(pmus[i].boxes);
858 kfree(pmus);
859
860 return -ENOMEM;
853} 861}
854 862
855static int __init 863static int __init
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index db1fe377e6dd..a7196818416a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3462,7 +3462,7 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
3462static struct intel_uncore_type skx_uncore_iio = { 3462static struct intel_uncore_type skx_uncore_iio = {
3463 .name = "iio", 3463 .name = "iio",
3464 .num_counters = 4, 3464 .num_counters = 4,
3465 .num_boxes = 5, 3465 .num_boxes = 6,
3466 .perf_ctr_bits = 48, 3466 .perf_ctr_bits = 48,
3467 .event_ctl = SKX_IIO0_MSR_PMON_CTL0, 3467 .event_ctl = SKX_IIO0_MSR_PMON_CTL0,
3468 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, 3468 .perf_ctr = SKX_IIO0_MSR_PMON_CTR0,
@@ -3492,7 +3492,7 @@ static const struct attribute_group skx_uncore_format_group = {
3492static struct intel_uncore_type skx_uncore_irp = { 3492static struct intel_uncore_type skx_uncore_irp = {
3493 .name = "irp", 3493 .name = "irp",
3494 .num_counters = 2, 3494 .num_counters = 2,
3495 .num_boxes = 5, 3495 .num_boxes = 6,
3496 .perf_ctr_bits = 48, 3496 .perf_ctr_bits = 48,
3497 .event_ctl = SKX_IRP0_MSR_PMON_CTL0, 3497 .event_ctl = SKX_IRP0_MSR_PMON_CTL0,
3498 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, 3498 .perf_ctr = SKX_IRP0_MSR_PMON_CTR0,
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 4bb3ec69e8ea..06723671ae4e 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -63,6 +63,14 @@ static bool test_intel(int idx)
63 case INTEL_FAM6_ATOM_SILVERMONT1: 63 case INTEL_FAM6_ATOM_SILVERMONT1:
64 case INTEL_FAM6_ATOM_SILVERMONT2: 64 case INTEL_FAM6_ATOM_SILVERMONT2:
65 case INTEL_FAM6_ATOM_AIRMONT: 65 case INTEL_FAM6_ATOM_AIRMONT:
66
67 case INTEL_FAM6_ATOM_GOLDMONT:
68 case INTEL_FAM6_ATOM_DENVERTON:
69
70 case INTEL_FAM6_ATOM_GEMINI_LAKE:
71
72 case INTEL_FAM6_XEON_PHI_KNL:
73 case INTEL_FAM6_XEON_PHI_KNM:
66 if (idx == PERF_MSR_SMI) 74 if (idx == PERF_MSR_SMI)
67 return true; 75 return true;
68 break; 76 break;
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 1a8eb550c40f..a5db63f728a2 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs);
85u32 *hv_vp_index; 85u32 *hv_vp_index;
86EXPORT_SYMBOL_GPL(hv_vp_index); 86EXPORT_SYMBOL_GPL(hv_vp_index);
87 87
88u32 hv_max_vp_index;
89
88static int hv_cpu_init(unsigned int cpu) 90static int hv_cpu_init(unsigned int cpu)
89{ 91{
90 u64 msr_vp_index; 92 u64 msr_vp_index;
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu)
93 95
94 hv_vp_index[smp_processor_id()] = msr_vp_index; 96 hv_vp_index[smp_processor_id()] = msr_vp_index;
95 97
98 if (msr_vp_index > hv_max_vp_index)
99 hv_max_vp_index = msr_vp_index;
100
96 return 0; 101 return 0;
97} 102}
98 103
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 39e7f6e50919..9cc9e1c1e2db 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex {
36/* Each gva in gva_list encodes up to 4096 pages to flush */ 36/* Each gva in gva_list encodes up to 4096 pages to flush */
37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) 37#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
38 38
39static struct hv_flush_pcpu __percpu *pcpu_flush; 39static struct hv_flush_pcpu __percpu **pcpu_flush;
40 40
41static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; 41static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex;
42 42
43/* 43/*
44 * Fills in gva_list starting from offset. Returns the number of items added. 44 * Fills in gva_list starting from offset. Returns the number of items added.
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
76{ 76{
77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; 77 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
78 78
79 /* valid_bank_mask can represent up to 64 banks */
80 if (hv_max_vp_index / 64 >= 64)
81 return 0;
82
83 /*
84 * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex
85 * structs are not cleared between calls, we risk flushing unneeded
86 * vCPUs otherwise.
87 */
88 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
89 flush->hv_vp_set.bank_contents[vcpu_bank] = 0;
90
79 /* 91 /*
80 * Some banks may end up being empty but this is acceptable. 92 * Some banks may end up being empty but this is acceptable.
81 */ 93 */
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush,
83 vcpu = hv_cpu_number_to_vp_number(cpu); 95 vcpu = hv_cpu_number_to_vp_number(cpu);
84 vcpu_bank = vcpu / 64; 96 vcpu_bank = vcpu / 64;
85 vcpu_offset = vcpu % 64; 97 vcpu_offset = vcpu % 64;
86
87 /* valid_bank_mask can represent up to 64 banks */
88 if (vcpu_bank >= 64)
89 return 0;
90
91 __set_bit(vcpu_offset, (unsigned long *) 98 __set_bit(vcpu_offset, (unsigned long *)
92 &flush->hv_vp_set.bank_contents[vcpu_bank]); 99 &flush->hv_vp_set.bank_contents[vcpu_bank]);
93 if (vcpu_bank >= nr_bank) 100 if (vcpu_bank >= nr_bank)
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
102 const struct flush_tlb_info *info) 109 const struct flush_tlb_info *info)
103{ 110{
104 int cpu, vcpu, gva_n, max_gvas; 111 int cpu, vcpu, gva_n, max_gvas;
112 struct hv_flush_pcpu **flush_pcpu;
105 struct hv_flush_pcpu *flush; 113 struct hv_flush_pcpu *flush;
106 u64 status = U64_MAX; 114 u64 status = U64_MAX;
107 unsigned long flags; 115 unsigned long flags;
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
116 124
117 local_irq_save(flags); 125 local_irq_save(flags);
118 126
119 flush = this_cpu_ptr(pcpu_flush); 127 flush_pcpu = this_cpu_ptr(pcpu_flush);
128
129 if (unlikely(!*flush_pcpu))
130 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
131
132 flush = *flush_pcpu;
133
134 if (unlikely(!flush)) {
135 local_irq_restore(flags);
136 goto do_native;
137 }
120 138
121 if (info->mm) { 139 if (info->mm) {
122 flush->address_space = virt_to_phys(info->mm->pgd); 140 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
173 const struct flush_tlb_info *info) 191 const struct flush_tlb_info *info)
174{ 192{
175 int nr_bank = 0, max_gvas, gva_n; 193 int nr_bank = 0, max_gvas, gva_n;
194 struct hv_flush_pcpu_ex **flush_pcpu;
176 struct hv_flush_pcpu_ex *flush; 195 struct hv_flush_pcpu_ex *flush;
177 u64 status = U64_MAX; 196 u64 status = U64_MAX;
178 unsigned long flags; 197 unsigned long flags;
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
187 206
188 local_irq_save(flags); 207 local_irq_save(flags);
189 208
190 flush = this_cpu_ptr(pcpu_flush_ex); 209 flush_pcpu = this_cpu_ptr(pcpu_flush_ex);
210
211 if (unlikely(!*flush_pcpu))
212 *flush_pcpu = page_address(alloc_page(GFP_ATOMIC));
213
214 flush = *flush_pcpu;
215
216 if (unlikely(!flush)) {
217 local_irq_restore(flags);
218 goto do_native;
219 }
191 220
192 if (info->mm) { 221 if (info->mm) {
193 flush->address_space = virt_to_phys(info->mm->pgd); 222 flush->address_space = virt_to_phys(info->mm->pgd);
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
222 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; 251 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
223 status = hv_do_rep_hypercall( 252 status = hv_do_rep_hypercall(
224 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 253 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
225 0, nr_bank + 2, flush, NULL); 254 0, nr_bank, flush, NULL);
226 } else if (info->end && 255 } else if (info->end &&
227 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { 256 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
228 status = hv_do_rep_hypercall( 257 status = hv_do_rep_hypercall(
229 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, 258 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
230 0, nr_bank + 2, flush, NULL); 259 0, nr_bank, flush, NULL);
231 } else { 260 } else {
232 gva_n = fill_gva_list(flush->gva_list, nr_bank, 261 gva_n = fill_gva_list(flush->gva_list, nr_bank,
233 info->start, info->end); 262 info->start, info->end);
234 status = hv_do_rep_hypercall( 263 status = hv_do_rep_hypercall(
235 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, 264 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
236 gva_n, nr_bank + 2, flush, NULL); 265 gva_n, nr_bank, flush, NULL);
237 } 266 }
238 267
239 local_irq_restore(flags); 268 local_irq_restore(flags);
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void)
266 return; 295 return;
267 296
268 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) 297 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
269 pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 298 pcpu_flush = alloc_percpu(struct hv_flush_pcpu *);
270 else 299 else
271 pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 300 pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *);
272} 301}
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index e0bb46c02857..0e2a5edbce00 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
231 ksig->ka.sa.sa_restorer) 231 ksig->ka.sa.sa_restorer)
232 sp = (unsigned long) ksig->ka.sa.sa_restorer; 232 sp = (unsigned long) ksig->ka.sa.sa_restorer;
233 233
234 if (fpu->fpstate_active) { 234 if (fpu->initialized) {
235 unsigned long fx_aligned, math_size; 235 unsigned long fx_aligned, math_size;
236 236
237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size); 237 sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index e7636bac7372..6c98821fef5e 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -62,8 +62,10 @@
62#define new_len2 145f-144f 62#define new_len2 145f-144f
63 63
64/* 64/*
65 * max without conditionals. Idea adapted from: 65 * gas compatible max based on the idea from:
66 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 66 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
67 *
68 * The additional "-" is needed because gas uses a "true" value of -1.
67 */ 69 */
68#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) 70#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
69 71
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index c096624137ae..ccbe24e697c4 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
103 alt_end_marker ":\n" 103 alt_end_marker ":\n"
104 104
105/* 105/*
106 * max without conditionals. Idea adapted from: 106 * gas compatible max based on the idea from:
107 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax 107 * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
108 * 108 *
109 * The additional "-" is needed because gas works with s32s. 109 * The additional "-" is needed because gas uses a "true" value of -1.
110 */ 110 */
111#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" 111#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
112 112
113/* 113/*
114 * Pad the second replacement alternative with additional NOPs if it is 114 * Pad the second replacement alternative with additional NOPs if it is
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index c1eadbaf1115..b0dc91f4bedc 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -11,10 +11,12 @@
11# define __ASM_FORM_COMMA(x) " " #x "," 11# define __ASM_FORM_COMMA(x) " " #x ","
12#endif 12#endif
13 13
14#ifdef CONFIG_X86_32 14#ifndef __x86_64__
15/* 32 bit */
15# define __ASM_SEL(a,b) __ASM_FORM(a) 16# define __ASM_SEL(a,b) __ASM_FORM(a)
16# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) 17# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
17#else 18#else
19/* 64 bit */
18# define __ASM_SEL(a,b) __ASM_FORM(b) 20# define __ASM_SEL(a,b) __ASM_FORM(b)
19# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) 21# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
20#endif 22#endif
@@ -139,8 +141,8 @@
139 * gets set up by the containing function. If you forget to do this, objtool 141 * gets set up by the containing function. If you forget to do this, objtool
140 * may print a "call without frame pointer save/setup" warning. 142 * may print a "call without frame pointer save/setup" warning.
141 */ 143 */
142register unsigned int __asm_call_sp asm("esp"); 144register unsigned long current_stack_pointer asm(_ASM_SP);
143#define ASM_CALL_CONSTRAINT "+r" (__asm_call_sp) 145#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
144#endif 146#endif
145 147
146#endif /* _ASM_X86_ASM_H */ 148#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 554cdb205d17..e3221ffa304e 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -23,11 +23,9 @@
23/* 23/*
24 * High level FPU state handling functions: 24 * High level FPU state handling functions:
25 */ 25 */
26extern void fpu__activate_curr(struct fpu *fpu); 26extern void fpu__initialize(struct fpu *fpu);
27extern void fpu__activate_fpstate_read(struct fpu *fpu); 27extern void fpu__prepare_read(struct fpu *fpu);
28extern void fpu__activate_fpstate_write(struct fpu *fpu); 28extern void fpu__prepare_write(struct fpu *fpu);
29extern void fpu__current_fpstate_write_begin(void);
30extern void fpu__current_fpstate_write_end(void);
31extern void fpu__save(struct fpu *fpu); 29extern void fpu__save(struct fpu *fpu);
32extern void fpu__restore(struct fpu *fpu); 30extern void fpu__restore(struct fpu *fpu);
33extern int fpu__restore_sig(void __user *buf, int ia32_frame); 31extern int fpu__restore_sig(void __user *buf, int ia32_frame);
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
120 err; \ 118 err; \
121}) 119})
122 120
123#define check_insn(insn, output, input...) \ 121#define kernel_insn(insn, output, input...) \
124({ \
125 int err; \
126 asm volatile("1:" #insn "\n\t" \ 122 asm volatile("1:" #insn "\n\t" \
127 "2:\n" \ 123 "2:\n" \
128 ".section .fixup,\"ax\"\n" \ 124 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
129 "3: movl $-1,%[err]\n" \ 125 : output : input)
130 " jmp 2b\n" \
131 ".previous\n" \
132 _ASM_EXTABLE(1b, 3b) \
133 : [err] "=r" (err), output \
134 : "0"(0), input); \
135 err; \
136})
137 126
138static inline int copy_fregs_to_user(struct fregs_state __user *fx) 127static inline int copy_fregs_to_user(struct fregs_state __user *fx)
139{ 128{
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153 142
154static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) 143static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
155{ 144{
156 int err;
157
158 if (IS_ENABLED(CONFIG_X86_32)) { 145 if (IS_ENABLED(CONFIG_X86_32)) {
159 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 146 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
160 } else { 147 } else {
161 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { 148 if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
162 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 149 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
163 } else { 150 } else {
164 /* See comment in copy_fxregs_to_kernel() below. */ 151 /* See comment in copy_fxregs_to_kernel() below. */
165 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); 152 kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
166 } 153 }
167 } 154 }
168 /* Copying from a kernel buffer to FPU registers should never fail: */
169 WARN_ON_FPU(err);
170} 155}
171 156
172static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) 157static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
183 168
184static inline void copy_kernel_to_fregs(struct fregs_state *fx) 169static inline void copy_kernel_to_fregs(struct fregs_state *fx)
185{ 170{
186 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); 171 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
187
188 WARN_ON_FPU(err);
189} 172}
190 173
191static inline int copy_user_to_fregs(struct fregs_state __user *fx) 174static inline int copy_user_to_fregs(struct fregs_state __user *fx)
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
281 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact 264 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
282 * XSAVE area format. 265 * XSAVE area format.
283 */ 266 */
284#define XSTATE_XRESTORE(st, lmask, hmask, err) \ 267#define XSTATE_XRESTORE(st, lmask, hmask) \
285 asm volatile(ALTERNATIVE(XRSTOR, \ 268 asm volatile(ALTERNATIVE(XRSTOR, \
286 XRSTORS, X86_FEATURE_XSAVES) \ 269 XRSTORS, X86_FEATURE_XSAVES) \
287 "\n" \ 270 "\n" \
288 "xor %[err], %[err]\n" \
289 "3:\n" \ 271 "3:\n" \
290 ".pushsection .fixup,\"ax\"\n" \ 272 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
291 "4: movl $-2, %[err]\n" \ 273 : \
292 "jmp 3b\n" \
293 ".popsection\n" \
294 _ASM_EXTABLE(661b, 4b) \
295 : [err] "=r" (err) \
296 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ 274 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
297 : "memory") 275 : "memory")
298 276
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
336 else 314 else
337 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 315 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
338 316
339 /* We should never fault when copying from a kernel buffer: */ 317 /*
318 * We should never fault when copying from a kernel buffer, and the FPU
319 * state we set at boot time should be valid.
320 */
340 WARN_ON_FPU(err); 321 WARN_ON_FPU(err);
341} 322}
342 323
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
350 u32 hmask = mask >> 32; 331 u32 hmask = mask >> 32;
351 int err; 332 int err;
352 333
353 WARN_ON(!alternatives_patched); 334 WARN_ON_FPU(!alternatives_patched);
354 335
355 XSTATE_XSAVE(xstate, lmask, hmask, err); 336 XSTATE_XSAVE(xstate, lmask, hmask, err);
356 337
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
365{ 346{
366 u32 lmask = mask; 347 u32 lmask = mask;
367 u32 hmask = mask >> 32; 348 u32 hmask = mask >> 32;
368 int err;
369
370 XSTATE_XRESTORE(xstate, lmask, hmask, err);
371 349
372 /* We should never fault when copying from a kernel buffer: */ 350 XSTATE_XRESTORE(xstate, lmask, hmask);
373 WARN_ON_FPU(err);
374} 351}
375 352
376/* 353/*
@@ -526,38 +503,17 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
526 */ 503 */
527static inline void fpregs_deactivate(struct fpu *fpu) 504static inline void fpregs_deactivate(struct fpu *fpu)
528{ 505{
529 WARN_ON_FPU(!fpu->fpregs_active);
530
531 fpu->fpregs_active = 0;
532 this_cpu_write(fpu_fpregs_owner_ctx, NULL); 506 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
533 trace_x86_fpu_regs_deactivated(fpu); 507 trace_x86_fpu_regs_deactivated(fpu);
534} 508}
535 509
536static inline void fpregs_activate(struct fpu *fpu) 510static inline void fpregs_activate(struct fpu *fpu)
537{ 511{
538 WARN_ON_FPU(fpu->fpregs_active);
539
540 fpu->fpregs_active = 1;
541 this_cpu_write(fpu_fpregs_owner_ctx, fpu); 512 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
542 trace_x86_fpu_regs_activated(fpu); 513 trace_x86_fpu_regs_activated(fpu);
543} 514}
544 515
545/* 516/*
546 * The question "does this thread have fpu access?"
547 * is slightly racy, since preemption could come in
548 * and revoke it immediately after the test.
549 *
550 * However, even in that very unlikely scenario,
551 * we can just assume we have FPU access - typically
552 * to save the FP state - we'll just take a #NM
553 * fault and get the FPU access back.
554 */
555static inline int fpregs_active(void)
556{
557 return current->thread.fpu.fpregs_active;
558}
559
560/*
561 * FPU state switching for scheduling. 517 * FPU state switching for scheduling.
562 * 518 *
563 * This is a two-stage process: 519 * This is a two-stage process:
@@ -571,14 +527,13 @@ static inline int fpregs_active(void)
571static inline void 527static inline void
572switch_fpu_prepare(struct fpu *old_fpu, int cpu) 528switch_fpu_prepare(struct fpu *old_fpu, int cpu)
573{ 529{
574 if (old_fpu->fpregs_active) { 530 if (old_fpu->initialized) {
575 if (!copy_fpregs_to_fpstate(old_fpu)) 531 if (!copy_fpregs_to_fpstate(old_fpu))
576 old_fpu->last_cpu = -1; 532 old_fpu->last_cpu = -1;
577 else 533 else
578 old_fpu->last_cpu = cpu; 534 old_fpu->last_cpu = cpu;
579 535
580 /* But leave fpu_fpregs_owner_ctx! */ 536 /* But leave fpu_fpregs_owner_ctx! */
581 old_fpu->fpregs_active = 0;
582 trace_x86_fpu_regs_deactivated(old_fpu); 537 trace_x86_fpu_regs_deactivated(old_fpu);
583 } else 538 } else
584 old_fpu->last_cpu = -1; 539 old_fpu->last_cpu = -1;
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
595static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) 550static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
596{ 551{
597 bool preload = static_cpu_has(X86_FEATURE_FPU) && 552 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
598 new_fpu->fpstate_active; 553 new_fpu->initialized;
599 554
600 if (preload) { 555 if (preload) {
601 if (!fpregs_state_valid(new_fpu, cpu)) 556 if (!fpregs_state_valid(new_fpu, cpu))
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
617 struct fpu *fpu = &current->thread.fpu; 572 struct fpu *fpu = &current->thread.fpu;
618 573
619 preempt_disable(); 574 preempt_disable();
620 if (!fpregs_active()) 575 fpregs_activate(fpu);
621 fpregs_activate(fpu);
622 preempt_enable(); 576 preempt_enable();
623} 577}
624 578
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 3c80f5b9c09d..a1520575d86b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -68,6 +68,9 @@ struct fxregs_state {
68/* Default value for fxregs_state.mxcsr: */ 68/* Default value for fxregs_state.mxcsr: */
69#define MXCSR_DEFAULT 0x1f80 69#define MXCSR_DEFAULT 0x1f80
70 70
71/* Copy both mxcsr & mxcsr_flags with a single u64 memcpy: */
72#define MXCSR_AND_FLAGS_SIZE sizeof(u64)
73
71/* 74/*
72 * Software based FPU emulation state. This is arbitrary really, 75 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand: 76 * it matches the x87 format to make it easier to understand:
@@ -290,36 +293,13 @@ struct fpu {
290 unsigned int last_cpu; 293 unsigned int last_cpu;
291 294
292 /* 295 /*
293 * @fpstate_active: 296 * @initialized:
294 * 297 *
295 * This flag indicates whether this context is active: if the task 298 * This flag indicates whether this context is initialized: if the task
296 * is not running then we can restore from this context, if the task 299 * is not running then we can restore from this context, if the task
297 * is running then we should save into this context. 300 * is running then we should save into this context.
298 */ 301 */
299 unsigned char fpstate_active; 302 unsigned char initialized;
300
301 /*
302 * @fpregs_active:
303 *
304 * This flag determines whether a given context is actively
305 * loaded into the FPU's registers and that those registers
306 * represent the task's current FPU state.
307 *
308 * Note the interaction with fpstate_active:
309 *
310 * # task does not use the FPU:
311 * fpstate_active == 0
312 *
313 * # task uses the FPU and regs are active:
314 * fpstate_active == 1 && fpregs_active == 1
315 *
316 * # the regs are inactive but still match fpstate:
317 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
318 *
319 * The third state is what we use for the lazy restore optimization
320 * on lazy-switching CPUs.
321 */
322 unsigned char fpregs_active;
323 303
324 /* 304 /*
325 * @state: 305 * @state:
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 1b2799e0699a..83fee2469eb7 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -48,8 +48,12 @@ void fpu__xstate_clear_all_cpu_caps(void);
48void *get_xsave_addr(struct xregs_state *xsave, int xstate); 48void *get_xsave_addr(struct xregs_state *xsave, int xstate);
49const void *get_xsave_field_ptr(int xstate_field); 49const void *get_xsave_field_ptr(int xstate_field);
50int using_compacted_format(void); 50int using_compacted_format(void);
51int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, 51int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
52 void __user *ubuf, struct xregs_state *xsave); 52int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
53int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, 53int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf);
54 struct xregs_state *xsave); 54int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf);
55
56/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
57extern int validate_xstate_header(const struct xstate_header *hdr);
58
55#endif 59#endif
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index bc62e7cbf1b1..59ad3d132353 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
88bool kvm_para_available(void); 88bool kvm_para_available(void);
89unsigned int kvm_arch_para_features(void); 89unsigned int kvm_arch_para_features(void);
90void __init kvm_guest_init(void); 90void __init kvm_guest_init(void);
91void kvm_async_pf_task_wait(u32 token); 91void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
92void kvm_async_pf_task_wake(u32 token); 92void kvm_async_pf_task_wake(u32 token);
93u32 kvm_read_and_reset_pf_reason(void); 93u32 kvm_read_and_reset_pf_reason(void);
94extern void kvm_disable_steal_time(void); 94extern void kvm_disable_steal_time(void);
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
103 103
104#else /* CONFIG_KVM_GUEST */ 104#else /* CONFIG_KVM_GUEST */
105#define kvm_guest_init() do {} while (0) 105#define kvm_guest_init() do {} while (0)
106#define kvm_async_pf_task_wait(T) do {} while(0) 106#define kvm_async_pf_task_wait(T, I) do {} while(0)
107#define kvm_async_pf_task_wake(T) do {} while(0) 107#define kvm_async_pf_task_wake(T) do {} while(0)
108 108
109static inline bool kvm_para_available(void) 109static inline bool kvm_para_available(void)
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 181264989db5..8edac1de2e35 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -187,7 +187,6 @@ struct mca_msr_regs {
187 187
188extern struct mce_vendor_flags mce_flags; 188extern struct mce_vendor_flags mce_flags;
189 189
190extern struct mca_config mca_cfg;
191extern struct mca_msr_regs msr_ops; 190extern struct mca_msr_regs msr_ops;
192 191
193enum mce_notifier_prios { 192enum mce_notifier_prios {
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index c120b5db178a..3c856a15b98e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
126 DEBUG_LOCKS_WARN_ON(preemptible()); 126 DEBUG_LOCKS_WARN_ON(preemptible());
127} 127}
128 128
129static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 129void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
130{
131 int cpu = smp_processor_id();
132
133 if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
134 cpumask_clear_cpu(cpu, mm_cpumask(mm));
135}
136 130
137static inline int init_new_context(struct task_struct *tsk, 131static inline int init_new_context(struct task_struct *tsk,
138 struct mm_struct *mm) 132 struct mm_struct *mm)
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 738503e1f80c..530f448fddaf 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
289 * to this information. 289 * to this information.
290 */ 290 */
291extern u32 *hv_vp_index; 291extern u32 *hv_vp_index;
292extern u32 hv_max_vp_index;
292 293
293/** 294/**
294 * hv_cpu_number_to_vp_number() - Map CPU to VP. 295 * hv_cpu_number_to_vp_number() - Map CPU to VP.
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 5161da1a0fa0..89e7eeb5cec1 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -158,17 +158,6 @@ struct thread_info {
158 */ 158 */
159#ifndef __ASSEMBLY__ 159#ifndef __ASSEMBLY__
160 160
161static inline unsigned long current_stack_pointer(void)
162{
163 unsigned long sp;
164#ifdef CONFIG_X86_64
165 asm("mov %%rsp,%0" : "=g" (sp));
166#else
167 asm("mov %%esp,%0" : "=g" (sp));
168#endif
169 return sp;
170}
171
172/* 161/*
173 * Walks up the stack frames to make sure that the specified object is 162 * Walks up the stack frames to make sure that the specified object is
174 * entirely contained by a single stack frame. 163 * entirely contained by a single stack frame.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 4893abf7f74f..c4aed0de565e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -82,6 +82,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
82#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) 82#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
83#endif 83#endif
84 84
85static inline bool tlb_defer_switch_to_init_mm(void)
86{
87 /*
88 * If we have PCID, then switching to init_mm is reasonably
89 * fast. If we don't have PCID, then switching to init_mm is
90 * quite slow, so we try to defer it in the hopes that we can
91 * avoid it entirely. The latter approach runs the risk of
92 * receiving otherwise unnecessary IPIs.
93 *
94 * This choice is just a heuristic. The tlb code can handle this
95 * function returning true or false regardless of whether we have
96 * PCID.
97 */
98 return !static_cpu_has(X86_FEATURE_PCID);
99}
100
85/* 101/*
86 * 6 because 6 should be plenty and struct tlb_state will fit in 102 * 6 because 6 should be plenty and struct tlb_state will fit in
87 * two cache lines. 103 * two cache lines.
@@ -105,6 +121,23 @@ struct tlb_state {
105 u16 next_asid; 121 u16 next_asid;
106 122
107 /* 123 /*
124 * We can be in one of several states:
125 *
126 * - Actively using an mm. Our CPU's bit will be set in
127 * mm_cpumask(loaded_mm) and is_lazy == false;
128 *
129 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
130 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
131 *
132 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
133 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
134 * We're heuristically guessing that the CR3 load we
135 * skipped more than makes up for the overhead added by
136 * lazy mode.
137 */
138 bool is_lazy;
139
140 /*
108 * Access to this CR4 shadow and to H/W CR4 is protected by 141 * Access to this CR4 shadow and to H/W CR4 is protected by
109 * disabling interrupts when modifying either one. 142 * disabling interrupts when modifying either one.
110 */ 143 */
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 342e59789fcd..39f7a27bef13 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -12,25 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
12 12
13 TP_STRUCT__entry( 13 TP_STRUCT__entry(
14 __field(struct fpu *, fpu) 14 __field(struct fpu *, fpu)
15 __field(bool, fpregs_active) 15 __field(bool, initialized)
16 __field(bool, fpstate_active)
17 __field(u64, xfeatures) 16 __field(u64, xfeatures)
18 __field(u64, xcomp_bv) 17 __field(u64, xcomp_bv)
19 ), 18 ),
20 19
21 TP_fast_assign( 20 TP_fast_assign(
22 __entry->fpu = fpu; 21 __entry->fpu = fpu;
23 __entry->fpregs_active = fpu->fpregs_active; 22 __entry->initialized = fpu->initialized;
24 __entry->fpstate_active = fpu->fpstate_active;
25 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) { 23 if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
26 __entry->xfeatures = fpu->state.xsave.header.xfeatures; 24 __entry->xfeatures = fpu->state.xsave.header.xfeatures;
27 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv; 25 __entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
28 } 26 }
29 ), 27 ),
30 TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx", 28 TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
31 __entry->fpu, 29 __entry->fpu,
32 __entry->fpregs_active, 30 __entry->initialized,
33 __entry->fpstate_active,
34 __entry->xfeatures, 31 __entry->xfeatures,
35 __entry->xcomp_bv 32 __entry->xcomp_bv
36 ) 33 )
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 78e8fcc87d4c..4b892917edeb 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -337,7 +337,7 @@ do { \
337 _ASM_EXTABLE(1b, 4b) \ 337 _ASM_EXTABLE(1b, 4b) \
338 _ASM_EXTABLE(2b, 4b) \ 338 _ASM_EXTABLE(2b, 4b) \
339 : "=r" (retval), "=&A"(x) \ 339 : "=r" (retval), "=&A"(x) \
340 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ 340 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
341 "i" (errret), "0" (retval)); \ 341 "i" (errret), "0" (retval)); \
342}) 342})
343 343
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 128a1a0b1450..7cb282e9e587 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -551,13 +551,13 @@ static inline void
551MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, 551MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
552 struct desc_struct desc) 552 struct desc_struct desc)
553{ 553{
554 u32 *p = (u32 *) &desc;
555
556 mcl->op = __HYPERVISOR_update_descriptor; 554 mcl->op = __HYPERVISOR_update_descriptor;
557 if (sizeof(maddr) == sizeof(long)) { 555 if (sizeof(maddr) == sizeof(long)) {
558 mcl->args[0] = maddr; 556 mcl->args[0] = maddr;
559 mcl->args[1] = *(unsigned long *)&desc; 557 mcl->args[1] = *(unsigned long *)&desc;
560 } else { 558 } else {
559 u32 *p = (u32 *)&desc;
560
561 mcl->args[0] = maddr; 561 mcl->args[0] = maddr;
562 mcl->args[1] = maddr >> 32; 562 mcl->args[1] = maddr >> 32;
563 mcl->args[2] = *p++; 563 mcl->args[2] = *p++;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 458da8509b75..6db28f17ff28 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = {
27 {} 27 {}
28}; 28};
29 29
30#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
31
30const struct pci_device_id amd_nb_misc_ids[] = { 32const struct pci_device_id amd_nb_misc_ids[] = {
31 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
40 {} 43 {}
41}; 44};
42EXPORT_SYMBOL_GPL(amd_nb_misc_ids); 45EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
51 {} 55 {}
52}; 56};
53 57
@@ -402,11 +406,48 @@ void amd_flush_garts(void)
402} 406}
403EXPORT_SYMBOL_GPL(amd_flush_garts); 407EXPORT_SYMBOL_GPL(amd_flush_garts);
404 408
409static void __fix_erratum_688(void *info)
410{
411#define MSR_AMD64_IC_CFG 0xC0011021
412
413 msr_set_bit(MSR_AMD64_IC_CFG, 3);
414 msr_set_bit(MSR_AMD64_IC_CFG, 14);
415}
416
417/* Apply erratum 688 fix so machines without a BIOS fix work. */
418static __init void fix_erratum_688(void)
419{
420 struct pci_dev *F4;
421 u32 val;
422
423 if (boot_cpu_data.x86 != 0x14)
424 return;
425
426 if (!amd_northbridges.num)
427 return;
428
429 F4 = node_to_amd_nb(0)->link;
430 if (!F4)
431 return;
432
433 if (pci_read_config_dword(F4, 0x164, &val))
434 return;
435
436 if (val & BIT(2))
437 return;
438
439 on_each_cpu(__fix_erratum_688, NULL, 0);
440
441 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
442}
443
405static __init int init_amd_nbs(void) 444static __init int init_amd_nbs(void)
406{ 445{
407 amd_cache_northbridges(); 446 amd_cache_northbridges();
408 amd_cache_gart(); 447 amd_cache_gart();
409 448
449 fix_erratum_688();
450
410 return 0; 451 return 0;
411} 452}
412 453
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d705c769f77d..ff891772c9f8 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void)
573 return ~0U; 573 return ~0U;
574} 574}
575 575
576static u32 skx_deadline_rev(void)
577{
578 switch (boot_cpu_data.x86_mask) {
579 case 0x03: return 0x01000136;
580 case 0x04: return 0x02000014;
581 }
582
583 return ~0U;
584}
585
576static const struct x86_cpu_id deadline_match[] = { 586static const struct x86_cpu_id deadline_match[] = {
577 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), 587 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
578 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), 588 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
579 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), 589 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
580 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014), 590 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
581 591
582 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), 592 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
583 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), 593 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void)
600 const struct x86_cpu_id *m; 610 const struct x86_cpu_id *m;
601 u32 rev; 611 u32 rev;
602 612
603 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) 613 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
614 boot_cpu_has(X86_FEATURE_HYPERVISOR))
604 return; 615 return;
605 616
606 m = x86_match_cpu(deadline_match); 617 m = x86_match_cpu(deadline_match);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 24f749324c0f..9990a71e311f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
831 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 831 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
832 unsigned int apicid, nshared, first, last; 832 unsigned int apicid, nshared, first, last;
833 833
834 this_leaf = this_cpu_ci->info_list + index;
835 nshared = base->eax.split.num_threads_sharing + 1; 834 nshared = base->eax.split.num_threads_sharing + 1;
836 apicid = cpu_data(cpu).apicid; 835 apicid = cpu_data(cpu).apicid;
837 first = apicid - (apicid % nshared); 836 first = apicid - (apicid % nshared);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 098530a93bb7..debb974fd17d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,3 +1,6 @@
1#ifndef __X86_MCE_INTERNAL_H__
2#define __X86_MCE_INTERNAL_H__
3
1#include <linux/device.h> 4#include <linux/device.h>
2#include <asm/mce.h> 5#include <asm/mce.h>
3 6
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { }
108static inline void mce_register_injector_chain(struct notifier_block *nb) { } 111static inline void mce_register_injector_chain(struct notifier_block *nb) { }
109static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } 112static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
110#endif 113#endif
114
115extern struct mca_config mca_cfg;
116
117#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 40e28ed77fbf..486f640b02ef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -28,6 +28,8 @@
28#include <asm/msr.h> 28#include <asm/msr.h>
29#include <asm/trace/irq_vectors.h> 29#include <asm/trace/irq_vectors.h>
30 30
31#include "mce-internal.h"
32
31#define NR_BLOCKS 5 33#define NR_BLOCKS 5
32#define THRESHOLD_MAX 0xFFF 34#define THRESHOLD_MAX 0xFFF
33#define INT_TYPE_APIC 0x00020000 35#define INT_TYPE_APIC 0x00020000
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 86e8f0b2537b..c4fa4a85d4cb 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void)
122 bool *res = &dis_ucode_ldr; 122 bool *res = &dis_ucode_ldr;
123#endif 123#endif
124 124
125 if (!have_cpuid_p())
126 return *res;
127
128 /* 125 /*
129 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not 126 * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
130 * completely accurate as xen pv guests don't see that CPUID bit set but 127 * completely accurate as xen pv guests don't see that CPUID bit set but
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name)
166void __init load_ucode_bsp(void) 163void __init load_ucode_bsp(void)
167{ 164{
168 unsigned int cpuid_1_eax; 165 unsigned int cpuid_1_eax;
166 bool intel = true;
169 167
170 if (check_loader_disabled_bsp()) 168 if (!have_cpuid_p())
171 return; 169 return;
172 170
173 cpuid_1_eax = native_cpuid_eax(1); 171 cpuid_1_eax = native_cpuid_eax(1);
174 172
175 switch (x86_cpuid_vendor()) { 173 switch (x86_cpuid_vendor()) {
176 case X86_VENDOR_INTEL: 174 case X86_VENDOR_INTEL:
177 if (x86_family(cpuid_1_eax) >= 6) 175 if (x86_family(cpuid_1_eax) < 6)
178 load_ucode_intel_bsp(); 176 return;
179 break; 177 break;
178
180 case X86_VENDOR_AMD: 179 case X86_VENDOR_AMD:
181 if (x86_family(cpuid_1_eax) >= 0x10) 180 if (x86_family(cpuid_1_eax) < 0x10)
182 load_ucode_amd_bsp(cpuid_1_eax); 181 return;
182 intel = false;
183 break; 183 break;
184
184 default: 185 default:
185 break; 186 return;
186 } 187 }
188
189 if (check_loader_disabled_bsp())
190 return;
191
192 if (intel)
193 load_ucode_intel_bsp();
194 else
195 load_ucode_amd_bsp(cpuid_1_eax);
187} 196}
188 197
189static bool check_loader_disabled_ap(void) 198static bool check_loader_disabled_ap(void)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8f7a9bbad514..7dbcb7adf797 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -34,6 +34,7 @@
34#include <linux/mm.h> 34#include <linux/mm.h>
35 35
36#include <asm/microcode_intel.h> 36#include <asm/microcode_intel.h>
37#include <asm/intel-family.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/tlbflush.h> 39#include <asm/tlbflush.h>
39#include <asm/setup.h> 40#include <asm/setup.h>
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
918 return 0; 919 return 0;
919} 920}
920 921
922static bool is_blacklisted(unsigned int cpu)
923{
924 struct cpuinfo_x86 *c = &cpu_data(cpu);
925
926 if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
927 pr_err_once("late loading on model 79 is disabled.\n");
928 return true;
929 }
930
931 return false;
932}
933
921static enum ucode_state request_microcode_fw(int cpu, struct device *device, 934static enum ucode_state request_microcode_fw(int cpu, struct device *device,
922 bool refresh_fw) 935 bool refresh_fw)
923{ 936{
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
926 const struct firmware *firmware; 939 const struct firmware *firmware;
927 enum ucode_state ret; 940 enum ucode_state ret;
928 941
942 if (is_blacklisted(cpu))
943 return UCODE_NFOUND;
944
929 sprintf(name, "intel-ucode/%02x-%02x-%02x", 945 sprintf(name, "intel-ucode/%02x-%02x-%02x",
930 c->x86, c->x86_model, c->x86_mask); 946 c->x86, c->x86_model, c->x86_mask);
931 947
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
950static enum ucode_state 966static enum ucode_state
951request_microcode_user(int cpu, const void __user *buf, size_t size) 967request_microcode_user(int cpu, const void __user *buf, size_t size)
952{ 968{
969 if (is_blacklisted(cpu))
970 return UCODE_NFOUND;
971
953 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); 972 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
954} 973}
955 974
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index e1114f070c2d..f92a6593de1e 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
100 100
101 kernel_fpu_disable(); 101 kernel_fpu_disable();
102 102
103 if (fpu->fpregs_active) { 103 if (fpu->initialized) {
104 /* 104 /*
105 * Ignore return value -- we don't care if reg state 105 * Ignore return value -- we don't care if reg state
106 * is clobbered. 106 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
116{ 116{
117 struct fpu *fpu = &current->thread.fpu; 117 struct fpu *fpu = &current->thread.fpu;
118 118
119 if (fpu->fpregs_active) 119 if (fpu->initialized)
120 copy_kernel_to_fpregs(&fpu->state); 120 copy_kernel_to_fpregs(&fpu->state);
121 121
122 kernel_fpu_enable(); 122 kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
148 148
149 preempt_disable(); 149 preempt_disable();
150 trace_x86_fpu_before_save(fpu); 150 trace_x86_fpu_before_save(fpu);
151 if (fpu->fpregs_active) { 151 if (fpu->initialized) {
152 if (!copy_fpregs_to_fpstate(fpu)) { 152 if (!copy_fpregs_to_fpstate(fpu)) {
153 copy_kernel_to_fpregs(&fpu->state); 153 copy_kernel_to_fpregs(&fpu->state);
154 } 154 }
@@ -189,10 +189,9 @@ EXPORT_SYMBOL_GPL(fpstate_init);
189 189
190int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) 190int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
191{ 191{
192 dst_fpu->fpregs_active = 0;
193 dst_fpu->last_cpu = -1; 192 dst_fpu->last_cpu = -1;
194 193
195 if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) 194 if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
196 return 0; 195 return 0;
197 196
198 WARN_ON_FPU(src_fpu != &current->thread.fpu); 197 WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -206,26 +205,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
206 /* 205 /*
207 * Save current FPU registers directly into the child 206 * Save current FPU registers directly into the child
208 * FPU context, without any memory-to-memory copying. 207 * FPU context, without any memory-to-memory copying.
209 * In lazy mode, if the FPU context isn't loaded into
210 * fpregs, CR0.TS will be set and do_device_not_available
211 * will load the FPU context.
212 * 208 *
213 * We have to do all this with preemption disabled, 209 * ( The function 'fails' in the FNSAVE case, which destroys
214 * mostly because of the FNSAVE case, because in that 210 * register contents so we have to copy them back. )
215 * case we must not allow preemption in the window
216 * between the FNSAVE and us marking the context lazy.
217 *
218 * It shouldn't be an issue as even FNSAVE is plenty
219 * fast in terms of critical section length.
220 */ 211 */
221 preempt_disable();
222 if (!copy_fpregs_to_fpstate(dst_fpu)) { 212 if (!copy_fpregs_to_fpstate(dst_fpu)) {
223 memcpy(&src_fpu->state, &dst_fpu->state, 213 memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
224 fpu_kernel_xstate_size);
225
226 copy_kernel_to_fpregs(&src_fpu->state); 214 copy_kernel_to_fpregs(&src_fpu->state);
227 } 215 }
228 preempt_enable();
229 216
230 trace_x86_fpu_copy_src(src_fpu); 217 trace_x86_fpu_copy_src(src_fpu);
231 trace_x86_fpu_copy_dst(dst_fpu); 218 trace_x86_fpu_copy_dst(dst_fpu);
@@ -237,45 +224,48 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
237 * Activate the current task's in-memory FPU context, 224 * Activate the current task's in-memory FPU context,
238 * if it has not been used before: 225 * if it has not been used before:
239 */ 226 */
240void fpu__activate_curr(struct fpu *fpu) 227void fpu__initialize(struct fpu *fpu)
241{ 228{
242 WARN_ON_FPU(fpu != &current->thread.fpu); 229 WARN_ON_FPU(fpu != &current->thread.fpu);
243 230
244 if (!fpu->fpstate_active) { 231 if (!fpu->initialized) {
245 fpstate_init(&fpu->state); 232 fpstate_init(&fpu->state);
246 trace_x86_fpu_init_state(fpu); 233 trace_x86_fpu_init_state(fpu);
247 234
248 trace_x86_fpu_activate_state(fpu); 235 trace_x86_fpu_activate_state(fpu);
249 /* Safe to do for the current task: */ 236 /* Safe to do for the current task: */
250 fpu->fpstate_active = 1; 237 fpu->initialized = 1;
251 } 238 }
252} 239}
253EXPORT_SYMBOL_GPL(fpu__activate_curr); 240EXPORT_SYMBOL_GPL(fpu__initialize);
254 241
255/* 242/*
256 * This function must be called before we read a task's fpstate. 243 * This function must be called before we read a task's fpstate.
257 * 244 *
258 * If the task has not used the FPU before then initialize its 245 * There's two cases where this gets called:
259 * fpstate. 246 *
247 * - for the current task (when coredumping), in which case we have
248 * to save the latest FPU registers into the fpstate,
249 *
250 * - or it's called for stopped tasks (ptrace), in which case the
251 * registers were already saved by the context-switch code when
252 * the task scheduled out - we only have to initialize the registers
253 * if they've never been initialized.
260 * 254 *
261 * If the task has used the FPU before then save it. 255 * If the task has used the FPU before then save it.
262 */ 256 */
263void fpu__activate_fpstate_read(struct fpu *fpu) 257void fpu__prepare_read(struct fpu *fpu)
264{ 258{
265 /* 259 if (fpu == &current->thread.fpu) {
266 * If fpregs are active (in the current CPU), then
267 * copy them to the fpstate:
268 */
269 if (fpu->fpregs_active) {
270 fpu__save(fpu); 260 fpu__save(fpu);
271 } else { 261 } else {
272 if (!fpu->fpstate_active) { 262 if (!fpu->initialized) {
273 fpstate_init(&fpu->state); 263 fpstate_init(&fpu->state);
274 trace_x86_fpu_init_state(fpu); 264 trace_x86_fpu_init_state(fpu);
275 265
276 trace_x86_fpu_activate_state(fpu); 266 trace_x86_fpu_activate_state(fpu);
277 /* Safe to do for current and for stopped child tasks: */ 267 /* Safe to do for current and for stopped child tasks: */
278 fpu->fpstate_active = 1; 268 fpu->initialized = 1;
279 } 269 }
280 } 270 }
281} 271}
@@ -283,17 +273,17 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
283/* 273/*
284 * This function must be called before we write a task's fpstate. 274 * This function must be called before we write a task's fpstate.
285 * 275 *
286 * If the task has used the FPU before then unlazy it. 276 * If the task has used the FPU before then invalidate any cached FPU registers.
287 * If the task has not used the FPU before then initialize its fpstate. 277 * If the task has not used the FPU before then initialize its fpstate.
288 * 278 *
289 * After this function call, after registers in the fpstate are 279 * After this function call, after registers in the fpstate are
290 * modified and the child task has woken up, the child task will 280 * modified and the child task has woken up, the child task will
291 * restore the modified FPU state from the modified context. If we 281 * restore the modified FPU state from the modified context. If we
292 * didn't clear its lazy status here then the lazy in-registers 282 * didn't clear its cached status here then the cached in-registers
293 * state pending on its former CPU could be restored, corrupting 283 * state pending on its former CPU could be restored, corrupting
294 * the modifications. 284 * the modifications.
295 */ 285 */
296void fpu__activate_fpstate_write(struct fpu *fpu) 286void fpu__prepare_write(struct fpu *fpu)
297{ 287{
298 /* 288 /*
299 * Only stopped child tasks can be used to modify the FPU 289 * Only stopped child tasks can be used to modify the FPU
@@ -301,8 +291,8 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
301 */ 291 */
302 WARN_ON_FPU(fpu == &current->thread.fpu); 292 WARN_ON_FPU(fpu == &current->thread.fpu);
303 293
304 if (fpu->fpstate_active) { 294 if (fpu->initialized) {
305 /* Invalidate any lazy state: */ 295 /* Invalidate any cached state: */
306 __fpu_invalidate_fpregs_state(fpu); 296 __fpu_invalidate_fpregs_state(fpu);
307 } else { 297 } else {
308 fpstate_init(&fpu->state); 298 fpstate_init(&fpu->state);
@@ -310,74 +300,11 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
310 300
311 trace_x86_fpu_activate_state(fpu); 301 trace_x86_fpu_activate_state(fpu);
312 /* Safe to do for stopped child tasks: */ 302 /* Safe to do for stopped child tasks: */
313 fpu->fpstate_active = 1; 303 fpu->initialized = 1;
314 } 304 }
315} 305}
316 306
317/* 307/*
318 * This function must be called before we write the current
319 * task's fpstate.
320 *
321 * This call gets the current FPU register state and moves
322 * it in to the 'fpstate'. Preemption is disabled so that
323 * no writes to the 'fpstate' can occur from context
324 * swiches.
325 *
326 * Must be followed by a fpu__current_fpstate_write_end().
327 */
328void fpu__current_fpstate_write_begin(void)
329{
330 struct fpu *fpu = &current->thread.fpu;
331
332 /*
333 * Ensure that the context-switching code does not write
334 * over the fpstate while we are doing our update.
335 */
336 preempt_disable();
337
338 /*
339 * Move the fpregs in to the fpu's 'fpstate'.
340 */
341 fpu__activate_fpstate_read(fpu);
342
343 /*
344 * The caller is about to write to 'fpu'. Ensure that no
345 * CPU thinks that its fpregs match the fpstate. This
346 * ensures we will not be lazy and skip a XRSTOR in the
347 * future.
348 */
349 __fpu_invalidate_fpregs_state(fpu);
350}
351
352/*
353 * This function must be paired with fpu__current_fpstate_write_begin()
354 *
355 * This will ensure that the modified fpstate gets placed back in
356 * the fpregs if necessary.
357 *
358 * Note: This function may be called whether or not an _actual_
359 * write to the fpstate occurred.
360 */
361void fpu__current_fpstate_write_end(void)
362{
363 struct fpu *fpu = &current->thread.fpu;
364
365 /*
366 * 'fpu' now has an updated copy of the state, but the
367 * registers may still be out of date. Update them with
368 * an XRSTOR if they are active.
369 */
370 if (fpregs_active())
371 copy_kernel_to_fpregs(&fpu->state);
372
373 /*
374 * Our update is done and the fpregs/fpstate are in sync
375 * if necessary. Context switches can happen again.
376 */
377 preempt_enable();
378}
379
380/*
381 * 'fpu__restore()' is called to copy FPU registers from 308 * 'fpu__restore()' is called to copy FPU registers from
382 * the FPU fpstate to the live hw registers and to activate 309 * the FPU fpstate to the live hw registers and to activate
383 * access to the hardware registers, so that FPU instructions 310 * access to the hardware registers, so that FPU instructions
@@ -389,7 +316,7 @@ void fpu__current_fpstate_write_end(void)
389 */ 316 */
390void fpu__restore(struct fpu *fpu) 317void fpu__restore(struct fpu *fpu)
391{ 318{
392 fpu__activate_curr(fpu); 319 fpu__initialize(fpu);
393 320
394 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ 321 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
395 kernel_fpu_disable(); 322 kernel_fpu_disable();
@@ -414,15 +341,17 @@ void fpu__drop(struct fpu *fpu)
414{ 341{
415 preempt_disable(); 342 preempt_disable();
416 343
417 if (fpu->fpregs_active) { 344 if (fpu == &current->thread.fpu) {
418 /* Ignore delayed exceptions from user space */ 345 if (fpu->initialized) {
419 asm volatile("1: fwait\n" 346 /* Ignore delayed exceptions from user space */
420 "2:\n" 347 asm volatile("1: fwait\n"
421 _ASM_EXTABLE(1b, 2b)); 348 "2:\n"
422 fpregs_deactivate(fpu); 349 _ASM_EXTABLE(1b, 2b));
350 fpregs_deactivate(fpu);
351 }
423 } 352 }
424 353
425 fpu->fpstate_active = 0; 354 fpu->initialized = 0;
426 355
427 trace_x86_fpu_dropped(fpu); 356 trace_x86_fpu_dropped(fpu);
428 357
@@ -462,9 +391,11 @@ void fpu__clear(struct fpu *fpu)
462 * Make sure fpstate is cleared and initialized. 391 * Make sure fpstate is cleared and initialized.
463 */ 392 */
464 if (static_cpu_has(X86_FEATURE_FPU)) { 393 if (static_cpu_has(X86_FEATURE_FPU)) {
465 fpu__activate_curr(fpu); 394 preempt_disable();
395 fpu__initialize(fpu);
466 user_fpu_begin(); 396 user_fpu_begin();
467 copy_init_fpstate_to_fpregs(); 397 copy_init_fpstate_to_fpregs();
398 preempt_enable();
468 } 399 }
469} 400}
470 401
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index d5d44c452624..7affb7e3d9a5 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
240 WARN_ON_FPU(!on_boot_cpu); 240 WARN_ON_FPU(!on_boot_cpu);
241 on_boot_cpu = 0; 241 on_boot_cpu = 0;
242 242
243 WARN_ON_FPU(current->thread.fpu.fpstate_active); 243 WARN_ON_FPU(current->thread.fpu.initialized);
244} 244}
245 245
246/* 246/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index b188b16841e3..3ea151372389 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
16{ 16{
17 struct fpu *target_fpu = &target->thread.fpu; 17 struct fpu *target_fpu = &target->thread.fpu;
18 18
19 return target_fpu->fpstate_active ? regset->n : 0; 19 return target_fpu->initialized ? regset->n : 0;
20} 20}
21 21
22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) 22int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
23{ 23{
24 struct fpu *target_fpu = &target->thread.fpu; 24 struct fpu *target_fpu = &target->thread.fpu;
25 25
26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active) 26 if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
27 return regset->n; 27 return regset->n;
28 else 28 else
29 return 0; 29 return 0;
@@ -38,7 +38,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
38 if (!boot_cpu_has(X86_FEATURE_FXSR)) 38 if (!boot_cpu_has(X86_FEATURE_FXSR))
39 return -ENODEV; 39 return -ENODEV;
40 40
41 fpu__activate_fpstate_read(fpu); 41 fpu__prepare_read(fpu);
42 fpstate_sanitize_xstate(fpu); 42 fpstate_sanitize_xstate(fpu);
43 43
44 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 44 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -55,7 +55,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
55 if (!boot_cpu_has(X86_FEATURE_FXSR)) 55 if (!boot_cpu_has(X86_FEATURE_FXSR))
56 return -ENODEV; 56 return -ENODEV;
57 57
58 fpu__activate_fpstate_write(fpu); 58 fpu__prepare_write(fpu);
59 fpstate_sanitize_xstate(fpu); 59 fpstate_sanitize_xstate(fpu);
60 60
61 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 61 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -89,10 +89,13 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
89 89
90 xsave = &fpu->state.xsave; 90 xsave = &fpu->state.xsave;
91 91
92 fpu__activate_fpstate_read(fpu); 92 fpu__prepare_read(fpu);
93 93
94 if (using_compacted_format()) { 94 if (using_compacted_format()) {
95 ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave); 95 if (kbuf)
96 ret = copy_xstate_to_kernel(kbuf, xsave, pos, count);
97 else
98 ret = copy_xstate_to_user(ubuf, xsave, pos, count);
96 } else { 99 } else {
97 fpstate_sanitize_xstate(fpu); 100 fpstate_sanitize_xstate(fpu);
98 /* 101 /*
@@ -129,28 +132,29 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
129 132
130 xsave = &fpu->state.xsave; 133 xsave = &fpu->state.xsave;
131 134
132 fpu__activate_fpstate_write(fpu); 135 fpu__prepare_write(fpu);
133 136
134 if (boot_cpu_has(X86_FEATURE_XSAVES)) 137 if (using_compacted_format()) {
135 ret = copyin_to_xsaves(kbuf, ubuf, xsave); 138 if (kbuf)
136 else 139 ret = copy_kernel_to_xstate(xsave, kbuf);
140 else
141 ret = copy_user_to_xstate(xsave, ubuf);
142 } else {
137 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1); 143 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
138 144 if (!ret)
139 /* 145 ret = validate_xstate_header(&xsave->header);
140 * In case of failure, mark all states as init: 146 }
141 */
142 if (ret)
143 fpstate_init(&fpu->state);
144 147
145 /* 148 /*
146 * mxcsr reserved bits must be masked to zero for security reasons. 149 * mxcsr reserved bits must be masked to zero for security reasons.
147 */ 150 */
148 xsave->i387.mxcsr &= mxcsr_feature_mask; 151 xsave->i387.mxcsr &= mxcsr_feature_mask;
149 xsave->header.xfeatures &= xfeatures_mask; 152
150 /* 153 /*
151 * These bits must be zero. 154 * In case of failure, mark all states as init:
152 */ 155 */
153 memset(&xsave->header.reserved, 0, 48); 156 if (ret)
157 fpstate_init(&fpu->state);
154 158
155 return ret; 159 return ret;
156} 160}
@@ -299,7 +303,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
299 struct fpu *fpu = &target->thread.fpu; 303 struct fpu *fpu = &target->thread.fpu;
300 struct user_i387_ia32_struct env; 304 struct user_i387_ia32_struct env;
301 305
302 fpu__activate_fpstate_read(fpu); 306 fpu__prepare_read(fpu);
303 307
304 if (!boot_cpu_has(X86_FEATURE_FPU)) 308 if (!boot_cpu_has(X86_FEATURE_FPU))
305 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); 309 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
@@ -329,7 +333,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
329 struct user_i387_ia32_struct env; 333 struct user_i387_ia32_struct env;
330 int ret; 334 int ret;
331 335
332 fpu__activate_fpstate_write(fpu); 336 fpu__prepare_write(fpu);
333 fpstate_sanitize_xstate(fpu); 337 fpstate_sanitize_xstate(fpu);
334 338
335 if (!boot_cpu_has(X86_FEATURE_FPU)) 339 if (!boot_cpu_has(X86_FEATURE_FPU))
@@ -369,7 +373,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
369 struct fpu *fpu = &tsk->thread.fpu; 373 struct fpu *fpu = &tsk->thread.fpu;
370 int fpvalid; 374 int fpvalid;
371 375
372 fpvalid = fpu->fpstate_active; 376 fpvalid = fpu->initialized;
373 if (fpvalid) 377 if (fpvalid)
374 fpvalid = !fpregs_get(tsk, NULL, 378 fpvalid = !fpregs_get(tsk, NULL,
375 0, sizeof(struct user_i387_ia32_struct), 379 0, sizeof(struct user_i387_ia32_struct),
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 83c23c230b4c..fb639e70048f 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
155 */ 155 */
156int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) 156int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
157{ 157{
158 struct xregs_state *xsave = &current->thread.fpu.state.xsave; 158 struct fpu *fpu = &current->thread.fpu;
159 struct xregs_state *xsave = &fpu->state.xsave;
159 struct task_struct *tsk = current; 160 struct task_struct *tsk = current;
160 int ia32_fxstate = (buf != buf_fx); 161 int ia32_fxstate = (buf != buf_fx);
161 162
@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
170 sizeof(struct user_i387_ia32_struct), NULL, 171 sizeof(struct user_i387_ia32_struct), NULL,
171 (struct _fpstate_32 __user *) buf) ? -1 : 1; 172 (struct _fpstate_32 __user *) buf) ? -1 : 1;
172 173
173 if (fpregs_active() || using_compacted_format()) { 174 if (fpu->initialized || using_compacted_format()) {
174 /* Save the live register state to the user directly. */ 175 /* Save the live register state to the user directly. */
175 if (copy_fpregs_to_sigframe(buf_fx)) 176 if (copy_fpregs_to_sigframe(buf_fx))
176 return -1; 177 return -1;
177 /* Update the thread's fxstate to save the fsave header. */ 178 /* Update the thread's fxstate to save the fsave header. */
178 if (ia32_fxstate) 179 if (ia32_fxstate)
179 copy_fxregs_to_kernel(&tsk->thread.fpu); 180 copy_fxregs_to_kernel(fpu);
180 } else { 181 } else {
181 /* 182 /*
182 * It is a *bug* if kernel uses compacted-format for xsave 183 * It is a *bug* if kernel uses compacted-format for xsave
@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
189 return -1; 190 return -1;
190 } 191 }
191 192
192 fpstate_sanitize_xstate(&tsk->thread.fpu); 193 fpstate_sanitize_xstate(fpu);
193 if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) 194 if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
194 return -1; 195 return -1;
195 } 196 }
@@ -213,8 +214,11 @@ sanitize_restored_xstate(struct task_struct *tsk,
213 struct xstate_header *header = &xsave->header; 214 struct xstate_header *header = &xsave->header;
214 215
215 if (use_xsave()) { 216 if (use_xsave()) {
216 /* These bits must be zero. */ 217 /*
217 memset(header->reserved, 0, 48); 218 * Note: we don't need to zero the reserved bits in the
219 * xstate_header here because we either didn't copy them at all,
220 * or we checked earlier that they aren't set.
221 */
218 222
219 /* 223 /*
220 * Init the state that is not present in the memory 224 * Init the state that is not present in the memory
@@ -223,7 +227,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
223 if (fx_only) 227 if (fx_only)
224 header->xfeatures = XFEATURE_MASK_FPSSE; 228 header->xfeatures = XFEATURE_MASK_FPSSE;
225 else 229 else
226 header->xfeatures &= (xfeatures_mask & xfeatures); 230 header->xfeatures &= xfeatures;
227 } 231 }
228 232
229 if (use_fxsr()) { 233 if (use_fxsr()) {
@@ -279,7 +283,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
279 if (!access_ok(VERIFY_READ, buf, size)) 283 if (!access_ok(VERIFY_READ, buf, size))
280 return -EACCES; 284 return -EACCES;
281 285
282 fpu__activate_curr(fpu); 286 fpu__initialize(fpu);
283 287
284 if (!static_cpu_has(X86_FEATURE_FPU)) 288 if (!static_cpu_has(X86_FEATURE_FPU))
285 return fpregs_soft_set(current, NULL, 289 return fpregs_soft_set(current, NULL,
@@ -307,28 +311,29 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
307 /* 311 /*
308 * For 32-bit frames with fxstate, copy the user state to the 312 * For 32-bit frames with fxstate, copy the user state to the
309 * thread's fpu state, reconstruct fxstate from the fsave 313 * thread's fpu state, reconstruct fxstate from the fsave
310 * header. Sanitize the copied state etc. 314 * header. Validate and sanitize the copied state.
311 */ 315 */
312 struct fpu *fpu = &tsk->thread.fpu; 316 struct fpu *fpu = &tsk->thread.fpu;
313 struct user_i387_ia32_struct env; 317 struct user_i387_ia32_struct env;
314 int err = 0; 318 int err = 0;
315 319
316 /* 320 /*
317 * Drop the current fpu which clears fpu->fpstate_active. This ensures 321 * Drop the current fpu which clears fpu->initialized. This ensures
318 * that any context-switch during the copy of the new state, 322 * that any context-switch during the copy of the new state,
319 * avoids the intermediate state from getting restored/saved. 323 * avoids the intermediate state from getting restored/saved.
320 * Thus avoiding the new restored state from getting corrupted. 324 * Thus avoiding the new restored state from getting corrupted.
321 * We will be ready to restore/save the state only after 325 * We will be ready to restore/save the state only after
322 * fpu->fpstate_active is again set. 326 * fpu->initialized is again set.
323 */ 327 */
324 fpu__drop(fpu); 328 fpu__drop(fpu);
325 329
326 if (using_compacted_format()) { 330 if (using_compacted_format()) {
327 err = copyin_to_xsaves(NULL, buf_fx, 331 err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
328 &fpu->state.xsave);
329 } else { 332 } else {
330 err = __copy_from_user(&fpu->state.xsave, 333 err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
331 buf_fx, state_size); 334
335 if (!err && state_size > offsetof(struct xregs_state, header))
336 err = validate_xstate_header(&fpu->state.xsave.header);
332 } 337 }
333 338
334 if (err || __copy_from_user(&env, buf, sizeof(env))) { 339 if (err || __copy_from_user(&env, buf, sizeof(env))) {
@@ -339,7 +344,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
339 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); 344 sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
340 } 345 }
341 346
342 fpu->fpstate_active = 1; 347 fpu->initialized = 1;
343 preempt_disable(); 348 preempt_disable();
344 fpu__restore(fpu); 349 fpu__restore(fpu);
345 preempt_enable(); 350 preempt_enable();
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index c24ac1efb12d..f1d5476c9022 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -483,6 +483,30 @@ int using_compacted_format(void)
483 return boot_cpu_has(X86_FEATURE_XSAVES); 483 return boot_cpu_has(X86_FEATURE_XSAVES);
484} 484}
485 485
486/* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
487int validate_xstate_header(const struct xstate_header *hdr)
488{
489 /* No unknown or supervisor features may be set */
490 if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
491 return -EINVAL;
492
493 /* Userspace must use the uncompacted format */
494 if (hdr->xcomp_bv)
495 return -EINVAL;
496
497 /*
498 * If 'reserved' is shrunken to add a new field, make sure to validate
499 * that new field here!
500 */
501 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
502
503 /* No reserved bits may be set */
504 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
505 return -EINVAL;
506
507 return 0;
508}
509
486static void __xstate_dump_leaves(void) 510static void __xstate_dump_leaves(void)
487{ 511{
488 int i; 512 int i;
@@ -867,7 +891,7 @@ const void *get_xsave_field_ptr(int xsave_state)
867{ 891{
868 struct fpu *fpu = &current->thread.fpu; 892 struct fpu *fpu = &current->thread.fpu;
869 893
870 if (!fpu->fpstate_active) 894 if (!fpu->initialized)
871 return NULL; 895 return NULL;
872 /* 896 /*
873 * fpu__save() takes the CPU's xstate registers 897 * fpu__save() takes the CPU's xstate registers
@@ -921,38 +945,129 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
921#endif /* ! CONFIG_ARCH_HAS_PKEYS */ 945#endif /* ! CONFIG_ARCH_HAS_PKEYS */
922 946
923/* 947/*
948 * Weird legacy quirk: SSE and YMM states store information in the
949 * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
950 * area is marked as unused in the xfeatures header, we need to copy
951 * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
952 */
953static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
954{
955 if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
956 return false;
957
958 if (xfeatures & XFEATURE_MASK_FP)
959 return false;
960
961 return true;
962}
963
964/*
924 * This is similar to user_regset_copyout(), but will not add offset to 965 * This is similar to user_regset_copyout(), but will not add offset to
925 * the source data pointer or increment pos, count, kbuf, and ubuf. 966 * the source data pointer or increment pos, count, kbuf, and ubuf.
926 */ 967 */
927static inline int xstate_copyout(unsigned int pos, unsigned int count, 968static inline void
928 void *kbuf, void __user *ubuf, 969__copy_xstate_to_kernel(void *kbuf, const void *data,
929 const void *data, const int start_pos, 970 unsigned int offset, unsigned int size, unsigned int size_total)
930 const int end_pos)
931{ 971{
932 if ((count == 0) || (pos < start_pos)) 972 if (offset < size_total) {
933 return 0; 973 unsigned int copy = min(size, size_total - offset);
934 974
935 if (end_pos < 0 || pos < end_pos) { 975 memcpy(kbuf + offset, data, copy);
936 unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos)); 976 }
977}
937 978
938 if (kbuf) { 979/*
939 memcpy(kbuf + pos, data, copy); 980 * Convert from kernel XSAVES compacted format to standard format and copy
940 } else { 981 * to a kernel-space ptrace buffer.
941 if (__copy_to_user(ubuf + pos, data, copy)) 982 *
942 return -EFAULT; 983 * It supports partial copy but pos always starts from zero. This is called
984 * from xstateregs_get() and there we check the CPU has XSAVES.
985 */
986int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
987{
988 unsigned int offset, size;
989 struct xstate_header header;
990 int i;
991
992 /*
993 * Currently copy_regset_to_user() starts from pos 0:
994 */
995 if (unlikely(offset_start != 0))
996 return -EFAULT;
997
998 /*
999 * The destination is a ptrace buffer; we put in only user xstates:
1000 */
1001 memset(&header, 0, sizeof(header));
1002 header.xfeatures = xsave->header.xfeatures;
1003 header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
1004
1005 /*
1006 * Copy xregs_state->header:
1007 */
1008 offset = offsetof(struct xregs_state, header);
1009 size = sizeof(header);
1010
1011 __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
1012
1013 for (i = 0; i < XFEATURE_MAX; i++) {
1014 /*
1015 * Copy only in-use xstates:
1016 */
1017 if ((header.xfeatures >> i) & 1) {
1018 void *src = __raw_xsave_addr(xsave, 1 << i);
1019
1020 offset = xstate_offsets[i];
1021 size = xstate_sizes[i];
1022
1023 /* The next component has to fit fully into the output buffer: */
1024 if (offset + size > size_total)
1025 break;
1026
1027 __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
943 } 1028 }
1029
1030 }
1031
1032 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1033 offset = offsetof(struct fxregs_state, mxcsr);
1034 size = MXCSR_AND_FLAGS_SIZE;
1035 __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
1036 }
1037
1038 /*
1039 * Fill xsave->i387.sw_reserved value for ptrace frame:
1040 */
1041 offset = offsetof(struct fxregs_state, sw_reserved);
1042 size = sizeof(xstate_fx_sw_bytes);
1043
1044 __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
1045
1046 return 0;
1047}
1048
1049static inline int
1050__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
1051{
1052 if (!size)
1053 return 0;
1054
1055 if (offset < size_total) {
1056 unsigned int copy = min(size, size_total - offset);
1057
1058 if (__copy_to_user(ubuf + offset, data, copy))
1059 return -EFAULT;
944 } 1060 }
945 return 0; 1061 return 0;
946} 1062}
947 1063
948/* 1064/*
949 * Convert from kernel XSAVES compacted format to standard format and copy 1065 * Convert from kernel XSAVES compacted format to standard format and copy
950 * to a ptrace buffer. It supports partial copy but pos always starts from 1066 * to a user-space buffer. It supports partial copy but pos always starts from
951 * zero. This is called from xstateregs_get() and there we check the CPU 1067 * zero. This is called from xstateregs_get() and there we check the CPU
952 * has XSAVES. 1068 * has XSAVES.
953 */ 1069 */
954int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf, 1070int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
955 void __user *ubuf, struct xregs_state *xsave)
956{ 1071{
957 unsigned int offset, size; 1072 unsigned int offset, size;
958 int ret, i; 1073 int ret, i;
@@ -961,7 +1076,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
961 /* 1076 /*
962 * Currently copy_regset_to_user() starts from pos 0: 1077 * Currently copy_regset_to_user() starts from pos 0:
963 */ 1078 */
964 if (unlikely(pos != 0)) 1079 if (unlikely(offset_start != 0))
965 return -EFAULT; 1080 return -EFAULT;
966 1081
967 /* 1082 /*
@@ -977,8 +1092,7 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
977 offset = offsetof(struct xregs_state, header); 1092 offset = offsetof(struct xregs_state, header);
978 size = sizeof(header); 1093 size = sizeof(header);
979 1094
980 ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count); 1095 ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
981
982 if (ret) 1096 if (ret)
983 return ret; 1097 return ret;
984 1098
@@ -992,25 +1106,30 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
992 offset = xstate_offsets[i]; 1106 offset = xstate_offsets[i];
993 size = xstate_sizes[i]; 1107 size = xstate_sizes[i];
994 1108
995 ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count); 1109 /* The next component has to fit fully into the output buffer: */
1110 if (offset + size > size_total)
1111 break;
996 1112
1113 ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
997 if (ret) 1114 if (ret)
998 return ret; 1115 return ret;
999
1000 if (offset + size >= count)
1001 break;
1002 } 1116 }
1003 1117
1004 } 1118 }
1005 1119
1120 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1121 offset = offsetof(struct fxregs_state, mxcsr);
1122 size = MXCSR_AND_FLAGS_SIZE;
1123 __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
1124 }
1125
1006 /* 1126 /*
1007 * Fill xsave->i387.sw_reserved value for ptrace frame: 1127 * Fill xsave->i387.sw_reserved value for ptrace frame:
1008 */ 1128 */
1009 offset = offsetof(struct fxregs_state, sw_reserved); 1129 offset = offsetof(struct fxregs_state, sw_reserved);
1010 size = sizeof(xstate_fx_sw_bytes); 1130 size = sizeof(xstate_fx_sw_bytes);
1011 1131
1012 ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count); 1132 ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
1013
1014 if (ret) 1133 if (ret)
1015 return ret; 1134 return ret;
1016 1135
@@ -1018,55 +1137,98 @@ int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
1018} 1137}
1019 1138
1020/* 1139/*
1021 * Convert from a ptrace standard-format buffer to kernel XSAVES format 1140 * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
1022 * and copy to the target thread. This is called from xstateregs_set() and 1141 * and copy to the target thread. This is called from xstateregs_set().
1023 * there we check the CPU has XSAVES and a whole standard-sized buffer
1024 * exists.
1025 */ 1142 */
1026int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, 1143int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
1027 struct xregs_state *xsave)
1028{ 1144{
1029 unsigned int offset, size; 1145 unsigned int offset, size;
1030 int i; 1146 int i;
1031 u64 xfeatures; 1147 struct xstate_header hdr;
1032 u64 allowed_features;
1033 1148
1034 offset = offsetof(struct xregs_state, header); 1149 offset = offsetof(struct xregs_state, header);
1035 size = sizeof(xfeatures); 1150 size = sizeof(hdr);
1036 1151
1037 if (kbuf) { 1152 memcpy(&hdr, kbuf + offset, size);
1038 memcpy(&xfeatures, kbuf + offset, size); 1153
1039 } else { 1154 if (validate_xstate_header(&hdr))
1040 if (__copy_from_user(&xfeatures, ubuf + offset, size)) 1155 return -EINVAL;
1041 return -EFAULT; 1156
1157 for (i = 0; i < XFEATURE_MAX; i++) {
1158 u64 mask = ((u64)1 << i);
1159
1160 if (hdr.xfeatures & mask) {
1161 void *dst = __raw_xsave_addr(xsave, 1 << i);
1162
1163 offset = xstate_offsets[i];
1164 size = xstate_sizes[i];
1165
1166 memcpy(dst, kbuf + offset, size);
1167 }
1168 }
1169
1170 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1171 offset = offsetof(struct fxregs_state, mxcsr);
1172 size = MXCSR_AND_FLAGS_SIZE;
1173 memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
1042 } 1174 }
1043 1175
1044 /* 1176 /*
1045 * Reject if the user sets any disabled or supervisor features: 1177 * The state that came in from userspace was user-state only.
1178 * Mask all the user states out of 'xfeatures':
1179 */
1180 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
1181
1182 /*
1183 * Add back in the features that came in from userspace:
1046 */ 1184 */
1047 allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR; 1185 xsave->header.xfeatures |= hdr.xfeatures;
1048 1186
1049 if (xfeatures & ~allowed_features) 1187 return 0;
1188}
1189
1190/*
1191 * Convert from a ptrace or sigreturn standard-format user-space buffer to
1192 * kernel XSAVES format and copy to the target thread. This is called from
1193 * xstateregs_set(), as well as potentially from the sigreturn() and
1194 * rt_sigreturn() system calls.
1195 */
1196int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
1197{
1198 unsigned int offset, size;
1199 int i;
1200 struct xstate_header hdr;
1201
1202 offset = offsetof(struct xregs_state, header);
1203 size = sizeof(hdr);
1204
1205 if (__copy_from_user(&hdr, ubuf + offset, size))
1206 return -EFAULT;
1207
1208 if (validate_xstate_header(&hdr))
1050 return -EINVAL; 1209 return -EINVAL;
1051 1210
1052 for (i = 0; i < XFEATURE_MAX; i++) { 1211 for (i = 0; i < XFEATURE_MAX; i++) {
1053 u64 mask = ((u64)1 << i); 1212 u64 mask = ((u64)1 << i);
1054 1213
1055 if (xfeatures & mask) { 1214 if (hdr.xfeatures & mask) {
1056 void *dst = __raw_xsave_addr(xsave, 1 << i); 1215 void *dst = __raw_xsave_addr(xsave, 1 << i);
1057 1216
1058 offset = xstate_offsets[i]; 1217 offset = xstate_offsets[i];
1059 size = xstate_sizes[i]; 1218 size = xstate_sizes[i];
1060 1219
1061 if (kbuf) { 1220 if (__copy_from_user(dst, ubuf + offset, size))
1062 memcpy(dst, kbuf + offset, size); 1221 return -EFAULT;
1063 } else {
1064 if (__copy_from_user(dst, ubuf + offset, size))
1065 return -EFAULT;
1066 }
1067 } 1222 }
1068 } 1223 }
1069 1224
1225 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1226 offset = offsetof(struct fxregs_state, mxcsr);
1227 size = MXCSR_AND_FLAGS_SIZE;
1228 if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
1229 return -EFAULT;
1230 }
1231
1070 /* 1232 /*
1071 * The state that came in from userspace was user-state only. 1233 * The state that came in from userspace was user-state only.
1072 * Mask all the user states out of 'xfeatures': 1234 * Mask all the user states out of 'xfeatures':
@@ -1076,7 +1238,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
1076 /* 1238 /*
1077 * Add back in the features that came in from userspace: 1239 * Add back in the features that came in from userspace:
1078 */ 1240 */
1079 xsave->header.xfeatures |= xfeatures; 1241 xsave->header.xfeatures |= hdr.xfeatures;
1080 1242
1081 return 0; 1243 return 0;
1082} 1244}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index cf2ce063f65a..2902ca4d5993 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void)
30 30
31asmlinkage __visible void __init i386_start_kernel(void) 31asmlinkage __visible void __init i386_start_kernel(void)
32{ 32{
33 cr4_init_shadow(); 33 /* Make sure IDT is set up before any exception happens */
34
35 idt_setup_early_handler(); 34 idt_setup_early_handler();
36 35
36 cr4_init_shadow();
37
37 sanitize_boot_params(&boot_params); 38 sanitize_boot_params(&boot_params);
38 39
39 x86_early_init_platform_quirks(); 40 x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 1f38d9a4d9de..d4eb450144fd 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -64,7 +64,7 @@ static void call_on_stack(void *func, void *stack)
64 64
65static inline void *current_stack(void) 65static inline void *current_stack(void)
66{ 66{
67 return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); 67 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
68} 68}
69 69
70static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) 70static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
@@ -88,7 +88,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
88 88
89 /* Save the next esp at the bottom of the stack */ 89 /* Save the next esp at the bottom of the stack */
90 prev_esp = (u32 *)irqstk; 90 prev_esp = (u32 *)irqstk;
91 *prev_esp = current_stack_pointer(); 91 *prev_esp = current_stack_pointer;
92 92
93 if (unlikely(overflow)) 93 if (unlikely(overflow))
94 call_on_stack(print_stack_overflow, isp); 94 call_on_stack(print_stack_overflow, isp);
@@ -139,7 +139,7 @@ void do_softirq_own_stack(void)
139 139
140 /* Push the previous esp onto the stack */ 140 /* Push the previous esp onto the stack */
141 prev_esp = (u32 *)irqstk; 141 prev_esp = (u32 *)irqstk;
142 *prev_esp = current_stack_pointer(); 142 *prev_esp = current_stack_pointer;
143 143
144 call_on_stack(__do_softirq, isp); 144 call_on_stack(__do_softirq, isp);
145} 145}
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index db2182d63ed0..3fc0f9a794cb 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -3,6 +3,15 @@
3 3
4/* Kprobes and Optprobes common header */ 4/* Kprobes and Optprobes common header */
5 5
6#include <asm/asm.h>
7
8#ifdef CONFIG_FRAME_POINTER
9# define SAVE_RBP_STRING " push %" _ASM_BP "\n" \
10 " mov %" _ASM_SP ", %" _ASM_BP "\n"
11#else
12# define SAVE_RBP_STRING " push %" _ASM_BP "\n"
13#endif
14
6#ifdef CONFIG_X86_64 15#ifdef CONFIG_X86_64
7#define SAVE_REGS_STRING \ 16#define SAVE_REGS_STRING \
8 /* Skip cs, ip, orig_ax. */ \ 17 /* Skip cs, ip, orig_ax. */ \
@@ -17,7 +26,7 @@
17 " pushq %r10\n" \ 26 " pushq %r10\n" \
18 " pushq %r11\n" \ 27 " pushq %r11\n" \
19 " pushq %rbx\n" \ 28 " pushq %rbx\n" \
20 " pushq %rbp\n" \ 29 SAVE_RBP_STRING \
21 " pushq %r12\n" \ 30 " pushq %r12\n" \
22 " pushq %r13\n" \ 31 " pushq %r13\n" \
23 " pushq %r14\n" \ 32 " pushq %r14\n" \
@@ -48,7 +57,7 @@
48 " pushl %es\n" \ 57 " pushl %es\n" \
49 " pushl %ds\n" \ 58 " pushl %ds\n" \
50 " pushl %eax\n" \ 59 " pushl %eax\n" \
51 " pushl %ebp\n" \ 60 SAVE_RBP_STRING \
52 " pushl %edi\n" \ 61 " pushl %edi\n" \
53 " pushl %esi\n" \ 62 " pushl %esi\n" \
54 " pushl %edx\n" \ 63 " pushl %edx\n" \
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f0153714ddac..0742491cbb73 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1080 * raw stack chunk with redzones: 1080 * raw stack chunk with redzones:
1081 */ 1081 */
1082 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); 1082 __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
1083 regs->flags &= ~X86_EFLAGS_IF;
1084 trace_hardirqs_off();
1085 regs->ip = (unsigned long)(jp->entry); 1083 regs->ip = (unsigned long)(jp->entry);
1086 1084
1087 /* 1085 /*
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
index 4b0592ca9e47..8c1cc08f514f 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -299,7 +299,7 @@ static int __init create_setup_data_nodes(struct kobject *parent)
299 return 0; 299 return 0;
300 300
301out_clean_nodes: 301out_clean_nodes:
302 for (j = i - 1; j > 0; j--) 302 for (j = i - 1; j >= 0; j--)
303 cleanup_setup_data_node(*(kobjp + j)); 303 cleanup_setup_data_node(*(kobjp + j));
304 kfree(kobjp); 304 kfree(kobjp);
305out_setup_data_kobj: 305out_setup_data_kobj:
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index aa60a08b65b1..8bb9594d0761 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
117 return NULL; 117 return NULL;
118} 118}
119 119
120void kvm_async_pf_task_wait(u32 token) 120/*
121 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
122 * (other than user space)?
123 */
124void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
121{ 125{
122 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); 126 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123 struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; 127 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
@@ -140,7 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
140 144
141 n.token = token; 145 n.token = token;
142 n.cpu = smp_processor_id(); 146 n.cpu = smp_processor_id();
143 n.halted = is_idle_task(current) || preempt_count() > 1; 147 n.halted = is_idle_task(current) ||
148 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
149 ? preempt_count() > 1 || rcu_preempt_depth()
150 : interrupt_kernel);
144 init_swait_queue_head(&n.wq); 151 init_swait_queue_head(&n.wq);
145 hlist_add_head(&n.link, &b->list); 152 hlist_add_head(&n.link, &b->list);
146 raw_spin_unlock(&b->lock); 153 raw_spin_unlock(&b->lock);
@@ -268,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
268 case KVM_PV_REASON_PAGE_NOT_PRESENT: 275 case KVM_PV_REASON_PAGE_NOT_PRESENT:
269 /* page is swapped out by the host. */ 276 /* page is swapped out by the host. */
270 prev_state = exception_enter(); 277 prev_state = exception_enter();
271 kvm_async_pf_task_wait((u32)read_cr2()); 278 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
272 exception_exit(prev_state); 279 exception_exit(prev_state);
273 break; 280 break;
274 case KVM_PV_REASON_PAGE_READY: 281 case KVM_PV_REASON_PAGE_READY:
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 54180fa6f66f..add33f600531 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type)
105 load_cr3(initial_page_table); 105 load_cr3(initial_page_table);
106#else 106#else
107 write_cr3(real_mode_header->trampoline_pgd); 107 write_cr3(real_mode_header->trampoline_pgd);
108
109 /* Exiting long mode will fail if CR4.PCIDE is set. */
110 if (static_cpu_has(X86_FEATURE_PCID))
111 cr4_clear_bits(X86_CR4_PCIDE);
108#endif 112#endif
109 113
110 /* Jump to the identity-mapped low memory code */ 114 /* Jump to the identity-mapped low memory code */
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index e04442345fc0..4e188fda5961 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
263 sp = (unsigned long) ka->sa.sa_restorer; 263 sp = (unsigned long) ka->sa.sa_restorer;
264 } 264 }
265 265
266 if (fpu->fpstate_active) { 266 if (fpu->initialized) {
267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32), 267 sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
268 &buf_fx, &math_size); 268 &buf_fx, &math_size);
269 *fpstate = (void __user *)sp; 269 *fpstate = (void __user *)sp;
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
279 return (void __user *)-1L; 279 return (void __user *)-1L;
280 280
281 /* save i387 and extended state */ 281 /* save i387 and extended state */
282 if (fpu->fpstate_active && 282 if (fpu->initialized &&
283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0) 283 copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
284 return (void __user *)-1L; 284 return (void __user *)-1L;
285 285
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
755 /* 755 /*
756 * Ensure the signal handler starts with the new fpu state. 756 * Ensure the signal handler starts with the new fpu state.
757 */ 757 */
758 if (fpu->fpstate_active) 758 if (fpu->initialized)
759 fpu__clear(fpu); 759 fpu__clear(fpu);
760 } 760 }
761 signal_setup_done(failed, ksig, stepping); 761 signal_setup_done(failed, ksig, stepping);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 34ea3651362e..67db4f43309e 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -142,7 +142,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
142 * from double_fault. 142 * from double_fault.
143 */ 143 */
144 BUG_ON((unsigned long)(current_top_of_stack() - 144 BUG_ON((unsigned long)(current_top_of_stack() -
145 current_stack_pointer()) >= THREAD_SIZE); 145 current_stack_pointer) >= THREAD_SIZE);
146 146
147 preempt_enable_no_resched(); 147 preempt_enable_no_resched();
148} 148}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index d145a0b1f529..3dc26f95d46e 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state)
44 state->stack_info.type, state->stack_info.next_sp, 44 state->stack_info.type, state->stack_info.next_sp,
45 state->stack_mask, state->graph_idx); 45 state->stack_mask, state->graph_idx);
46 46
47 for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { 47 for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
48 sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
48 if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) 49 if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
49 break; 50 break;
50 51
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state)
174 * This determines if the frame pointer actually contains an encoded pointer to 175 * This determines if the frame pointer actually contains an encoded pointer to
175 * pt_regs on the stack. See ENCODE_FRAME_POINTER. 176 * pt_regs on the stack. See ENCODE_FRAME_POINTER.
176 */ 177 */
178#ifdef CONFIG_X86_64
177static struct pt_regs *decode_frame_pointer(unsigned long *bp) 179static struct pt_regs *decode_frame_pointer(unsigned long *bp)
178{ 180{
179 unsigned long regs = (unsigned long)bp; 181 unsigned long regs = (unsigned long)bp;
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
183 185
184 return (struct pt_regs *)(regs & ~0x1); 186 return (struct pt_regs *)(regs & ~0x1);
185} 187}
188#else
189static struct pt_regs *decode_frame_pointer(unsigned long *bp)
190{
191 unsigned long regs = (unsigned long)bp;
192
193 if (regs & 0x80000000)
194 return NULL;
195
196 return (struct pt_regs *)(regs | 0x80000000);
197}
198#endif
199
200#ifdef CONFIG_X86_32
201#define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long))
202#else
203#define KERNEL_REGS_SIZE (sizeof(struct pt_regs))
204#endif
186 205
187static bool update_stack_state(struct unwind_state *state, 206static bool update_stack_state(struct unwind_state *state,
188 unsigned long *next_bp) 207 unsigned long *next_bp)
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state,
202 regs = decode_frame_pointer(next_bp); 221 regs = decode_frame_pointer(next_bp);
203 if (regs) { 222 if (regs) {
204 frame = (unsigned long *)regs; 223 frame = (unsigned long *)regs;
205 len = regs_size(regs); 224 len = KERNEL_REGS_SIZE;
206 state->got_irq = true; 225 state->got_irq = true;
207 } else { 226 } else {
208 frame = next_bp; 227 frame = next_bp;
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state,
226 frame < prev_frame_end) 245 frame < prev_frame_end)
227 return false; 246 return false;
228 247
248 /*
249 * On 32-bit with user mode regs, make sure the last two regs are safe
250 * to access:
251 */
252 if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) &&
253 !on_stack(info, frame, len + 2*sizeof(long)))
254 return false;
255
229 /* Move state to the next frame: */ 256 /* Move state to the next frame: */
230 if (regs) { 257 if (regs) {
231 state->regs = regs; 258 state->regs = regs;
@@ -328,6 +355,13 @@ bad_address:
328 state->regs->sp < (unsigned long)task_pt_regs(state->task)) 355 state->regs->sp < (unsigned long)task_pt_regs(state->task))
329 goto the_end; 356 goto the_end;
330 357
358 /*
359 * There are some known frame pointer issues on 32-bit. Disable
360 * unwinder warnings on 32-bit until it gets objtool support.
361 */
362 if (IS_ENABLED(CONFIG_X86_32))
363 goto the_end;
364
331 if (state->regs) { 365 if (state->regs) {
332 printk_deferred_once(KERN_WARNING 366 printk_deferred_once(KERN_WARNING
333 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", 367 "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 570b70d3f604..b95007e7c1b3 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip)
86 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; 86 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
87 87
88 if (unlikely((idx >= lookup_num_blocks-1))) { 88 if (unlikely((idx >= lookup_num_blocks-1))) {
89 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n", 89 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
90 idx, lookup_num_blocks, ip); 90 idx, lookup_num_blocks, (void *)ip);
91 return NULL; 91 return NULL;
92 } 92 }
93 93
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip)
96 96
97 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || 97 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
98 (__start_orc_unwind + stop > __stop_orc_unwind))) { 98 (__start_orc_unwind + stop > __stop_orc_unwind))) {
99 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n", 99 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
100 idx, lookup_num_blocks, start, stop, ip); 100 idx, lookup_num_blocks, start, stop, (void *)ip);
101 return NULL; 101 return NULL;
102 } 102 }
103 103
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state)
373 373
374 case ORC_REG_R10: 374 case ORC_REG_R10:
375 if (!state->regs || !state->full_regs) { 375 if (!state->regs || !state->full_regs) {
376 orc_warn("missing regs for base reg R10 at ip %p\n", 376 orc_warn("missing regs for base reg R10 at ip %pB\n",
377 (void *)state->ip); 377 (void *)state->ip);
378 goto done; 378 goto done;
379 } 379 }
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state)
382 382
383 case ORC_REG_R13: 383 case ORC_REG_R13:
384 if (!state->regs || !state->full_regs) { 384 if (!state->regs || !state->full_regs) {
385 orc_warn("missing regs for base reg R13 at ip %p\n", 385 orc_warn("missing regs for base reg R13 at ip %pB\n",
386 (void *)state->ip); 386 (void *)state->ip);
387 goto done; 387 goto done;
388 } 388 }
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state)
391 391
392 case ORC_REG_DI: 392 case ORC_REG_DI:
393 if (!state->regs || !state->full_regs) { 393 if (!state->regs || !state->full_regs) {
394 orc_warn("missing regs for base reg DI at ip %p\n", 394 orc_warn("missing regs for base reg DI at ip %pB\n",
395 (void *)state->ip); 395 (void *)state->ip);
396 goto done; 396 goto done;
397 } 397 }
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state)
400 400
401 case ORC_REG_DX: 401 case ORC_REG_DX:
402 if (!state->regs || !state->full_regs) { 402 if (!state->regs || !state->full_regs) {
403 orc_warn("missing regs for base reg DX at ip %p\n", 403 orc_warn("missing regs for base reg DX at ip %pB\n",
404 (void *)state->ip); 404 (void *)state->ip);
405 goto done; 405 goto done;
406 } 406 }
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state)
408 break; 408 break;
409 409
410 default: 410 default:
411 orc_warn("unknown SP base reg %d for ip %p\n", 411 orc_warn("unknown SP base reg %d for ip %pB\n",
412 orc->sp_reg, (void *)state->ip); 412 orc->sp_reg, (void *)state->ip);
413 goto done; 413 goto done;
414 } 414 }
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state)
436 436
437 case ORC_TYPE_REGS: 437 case ORC_TYPE_REGS:
438 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { 438 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
439 orc_warn("can't dereference registers at %p for ip %p\n", 439 orc_warn("can't dereference registers at %p for ip %pB\n",
440 (void *)sp, (void *)orig_ip); 440 (void *)sp, (void *)orig_ip);
441 goto done; 441 goto done;
442 } 442 }
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state)
448 448
449 case ORC_TYPE_REGS_IRET: 449 case ORC_TYPE_REGS_IRET:
450 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { 450 if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
451 orc_warn("can't dereference iret registers at %p for ip %p\n", 451 orc_warn("can't dereference iret registers at %p for ip %pB\n",
452 (void *)sp, (void *)orig_ip); 452 (void *)sp, (void *)orig_ip);
453 goto done; 453 goto done;
454 } 454 }
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state)
465 break; 465 break;
466 466
467 default: 467 default:
468 orc_warn("unknown .orc_unwind entry type %d\n", orc->type); 468 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
469 orc->type, (void *)orig_ip);
469 break; 470 break;
470 } 471 }
471 472
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state)
487 break; 488 break;
488 489
489 default: 490 default:
490 orc_warn("unknown BP base reg %d for ip %p\n", 491 orc_warn("unknown BP base reg %d for ip %pB\n",
491 orc->bp_reg, (void *)orig_ip); 492 orc->bp_reg, (void *)orig_ip);
492 goto done; 493 goto done;
493 } 494 }
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state)
496 if (state->stack_info.type == prev_type && 497 if (state->stack_info.type == prev_type &&
497 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && 498 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
498 state->sp <= prev_sp) { 499 state->sp <= prev_sp) {
499 orc_warn("stack going in the wrong direction? ip=%p\n", 500 orc_warn("stack going in the wrong direction? ip=%pB\n",
500 (void *)orig_ip); 501 (void *)orig_ip);
501 goto done; 502 goto done;
502 } 503 }
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3ea624452f93..3c48bc8bf08c 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
23 depends on HIGH_RES_TIMERS 23 depends on HIGH_RES_TIMERS
24 # for TASKSTATS/TASK_DELAY_ACCT: 24 # for TASKSTATS/TASK_DELAY_ACCT:
25 depends on NET && MULTIUSER 25 depends on NET && MULTIUSER
26 depends on X86_LOCAL_APIC
26 select PREEMPT_NOTIFIERS 27 select PREEMPT_NOTIFIERS
27 select MMU_NOTIFIER 28 select MMU_NOTIFIER
28 select ANON_INODES 29 select ANON_INODES
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a36254cbf776..d90cdc77e077 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
425 #op " %al \n\t" \ 425 #op " %al \n\t" \
426 FOP_RET 426 FOP_RET
427 427
428asm(".global kvm_fastop_exception \n" 428asm(".pushsection .fixup, \"ax\"\n"
429 "kvm_fastop_exception: xor %esi, %esi; ret"); 429 ".global kvm_fastop_exception \n"
430 "kvm_fastop_exception: xor %esi, %esi; ret\n"
431 ".popsection");
430 432
431FOP_START(setcc) 433FOP_START(setcc)
432FOP_SETCC(seto) 434FOP_SETCC(seto)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca30c1eb1d9..7a69cf053711 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3837 case KVM_PV_REASON_PAGE_NOT_PRESENT: 3837 case KVM_PV_REASON_PAGE_NOT_PRESENT:
3838 vcpu->arch.apf.host_apf_reason = 0; 3838 vcpu->arch.apf.host_apf_reason = 0;
3839 local_irq_disable(); 3839 local_irq_disable();
3840 kvm_async_pf_task_wait(fault_address); 3840 kvm_async_pf_task_wait(fault_address, 0);
3841 local_irq_enable(); 3841 local_irq_enable();
3842 break; 3842 break;
3843 case KVM_PV_REASON_PAGE_READY: 3843 case KVM_PV_REASON_PAGE_READY:
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
3974 unsigned level, unsigned gpte) 3974 unsigned level, unsigned gpte)
3975{ 3975{
3976 /* 3976 /*
3977 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3978 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3979 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3980 */
3981 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3982
3983 /*
3984 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. 3977 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3985 * If it is clear, there are no large pages at this level, so clear 3978 * If it is clear, there are no large pages at this level, so clear
3986 * PT_PAGE_SIZE_MASK in gpte if that is the case. 3979 * PT_PAGE_SIZE_MASK in gpte if that is the case.
3987 */ 3980 */
3988 gpte &= level - mmu->last_nonleaf_level; 3981 gpte &= level - mmu->last_nonleaf_level;
3989 3982
3983 /*
3984 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3985 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3986 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3987 */
3988 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3989
3990 return gpte & PT_PAGE_SIZE_MASK; 3990 return gpte & PT_PAGE_SIZE_MASK;
3991} 3991}
3992 3992
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4555 4555
4556 update_permission_bitmask(vcpu, context, true); 4556 update_permission_bitmask(vcpu, context, true);
4557 update_pkru_bitmask(vcpu, context, true); 4557 update_pkru_bitmask(vcpu, context, true);
4558 update_last_nonleaf_level(vcpu, context);
4558 reset_rsvds_bits_mask_ept(vcpu, context, execonly); 4559 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4559 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); 4560 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4560} 4561}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 86b68dc5a649..f18d1f8d332b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -334,10 +334,11 @@ retry_walk:
334 --walker->level; 334 --walker->level;
335 335
336 index = PT_INDEX(addr, walker->level); 336 index = PT_INDEX(addr, walker->level);
337
338 table_gfn = gpte_to_gfn(pte); 337 table_gfn = gpte_to_gfn(pte);
339 offset = index * sizeof(pt_element_t); 338 offset = index * sizeof(pt_element_t);
340 pte_gpa = gfn_to_gpa(table_gfn) + offset; 339 pte_gpa = gfn_to_gpa(table_gfn) + offset;
340
341 BUG_ON(walker->level < 1);
341 walker->table_gfn[walker->level - 1] = table_gfn; 342 walker->table_gfn[walker->level - 1] = table_gfn;
342 walker->pte_gpa[walker->level - 1] = pte_gpa; 343 walker->pte_gpa[walker->level - 1] = pte_gpa;
343 344
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6970249c09fc..95a01609d7ee 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -200,6 +200,8 @@ struct loaded_vmcs {
200 int cpu; 200 int cpu;
201 bool launched; 201 bool launched;
202 bool nmi_known_unmasked; 202 bool nmi_known_unmasked;
203 unsigned long vmcs_host_cr3; /* May not match real cr3 */
204 unsigned long vmcs_host_cr4; /* May not match real cr4 */
203 struct list_head loaded_vmcss_on_cpu_link; 205 struct list_head loaded_vmcss_on_cpu_link;
204}; 206};
205 207
@@ -600,8 +602,6 @@ struct vcpu_vmx {
600 int gs_ldt_reload_needed; 602 int gs_ldt_reload_needed;
601 int fs_reload_needed; 603 int fs_reload_needed;
602 u64 msr_host_bndcfgs; 604 u64 msr_host_bndcfgs;
603 unsigned long vmcs_host_cr3; /* May not match real cr3 */
604 unsigned long vmcs_host_cr4; /* May not match real cr4 */
605 } host_state; 605 } host_state;
606 struct { 606 struct {
607 int vm86_active; 607 int vm86_active;
@@ -2202,46 +2202,44 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2202 struct pi_desc old, new; 2202 struct pi_desc old, new;
2203 unsigned int dest; 2203 unsigned int dest;
2204 2204
2205 if (!kvm_arch_has_assigned_device(vcpu->kvm) || 2205 /*
2206 !irq_remapping_cap(IRQ_POSTING_CAP) || 2206 * In case of hot-plug or hot-unplug, we may have to undo
2207 !kvm_vcpu_apicv_active(vcpu)) 2207 * vmx_vcpu_pi_put even if there is no assigned device. And we
2208 * always keep PI.NDST up to date for simplicity: it makes the
2209 * code easier, and CPU migration is not a fast path.
2210 */
2211 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2208 return; 2212 return;
2209 2213
2214 /*
2215 * First handle the simple case where no cmpxchg is necessary; just
2216 * allow posting non-urgent interrupts.
2217 *
2218 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2219 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2220 * expects the VCPU to be on the blocked_vcpu_list that matches
2221 * PI.NDST.
2222 */
2223 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2224 vcpu->cpu == cpu) {
2225 pi_clear_sn(pi_desc);
2226 return;
2227 }
2228
2229 /* The full case. */
2210 do { 2230 do {
2211 old.control = new.control = pi_desc->control; 2231 old.control = new.control = pi_desc->control;
2212 2232
2213 /* 2233 dest = cpu_physical_id(cpu);
2214 * If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
2215 * are two possible cases:
2216 * 1. After running 'pre_block', context switch
2217 * happened. For this case, 'sn' was set in
2218 * vmx_vcpu_put(), so we need to clear it here.
2219 * 2. After running 'pre_block', we were blocked,
2220 * and woken up by some other guy. For this case,
2221 * we don't need to do anything, 'pi_post_block'
2222 * will do everything for us. However, we cannot
2223 * check whether it is case #1 or case #2 here
2224 * (maybe, not needed), so we also clear sn here,
2225 * I think it is not a big deal.
2226 */
2227 if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
2228 if (vcpu->cpu != cpu) {
2229 dest = cpu_physical_id(cpu);
2230
2231 if (x2apic_enabled())
2232 new.ndst = dest;
2233 else
2234 new.ndst = (dest << 8) & 0xFF00;
2235 }
2236 2234
2237 /* set 'NV' to 'notification vector' */ 2235 if (x2apic_enabled())
2238 new.nv = POSTED_INTR_VECTOR; 2236 new.ndst = dest;
2239 } 2237 else
2238 new.ndst = (dest << 8) & 0xFF00;
2240 2239
2241 /* Allow posting non-urgent interrupts */
2242 new.sn = 0; 2240 new.sn = 0;
2243 } while (cmpxchg(&pi_desc->control, old.control, 2241 } while (cmpxchg64(&pi_desc->control, old.control,
2244 new.control) != old.control); 2242 new.control) != old.control);
2245} 2243}
2246 2244
2247static void decache_tsc_multiplier(struct vcpu_vmx *vmx) 2245static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
@@ -5178,12 +5176,12 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
5178 */ 5176 */
5179 cr3 = __read_cr3(); 5177 cr3 = __read_cr3();
5180 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */ 5178 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
5181 vmx->host_state.vmcs_host_cr3 = cr3; 5179 vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
5182 5180
5183 /* Save the most likely value for this task's CR4 in the VMCS. */ 5181 /* Save the most likely value for this task's CR4 in the VMCS. */
5184 cr4 = cr4_read_shadow(); 5182 cr4 = cr4_read_shadow();
5185 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ 5183 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
5186 vmx->host_state.vmcs_host_cr4 = cr4; 5184 vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
5187 5185
5188 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 5186 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
5189#ifdef CONFIG_X86_64 5187#ifdef CONFIG_X86_64
@@ -9273,15 +9271,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9273 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 9271 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
9274 9272
9275 cr3 = __get_current_cr3_fast(); 9273 cr3 = __get_current_cr3_fast();
9276 if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) { 9274 if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
9277 vmcs_writel(HOST_CR3, cr3); 9275 vmcs_writel(HOST_CR3, cr3);
9278 vmx->host_state.vmcs_host_cr3 = cr3; 9276 vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
9279 } 9277 }
9280 9278
9281 cr4 = cr4_read_shadow(); 9279 cr4 = cr4_read_shadow();
9282 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { 9280 if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
9283 vmcs_writel(HOST_CR4, cr4); 9281 vmcs_writel(HOST_CR4, cr4);
9284 vmx->host_state.vmcs_host_cr4 = cr4; 9282 vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
9285 } 9283 }
9286 9284
9287 /* When single-stepping over STI and MOV SS, we must clear the 9285 /* When single-stepping over STI and MOV SS, we must clear the
@@ -9591,6 +9589,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
9591 9589
9592 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; 9590 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
9593 9591
9592 /*
9593 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
9594 * or POSTED_INTR_WAKEUP_VECTOR.
9595 */
9596 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
9597 vmx->pi_desc.sn = 1;
9598
9594 return &vmx->vcpu; 9599 return &vmx->vcpu;
9595 9600
9596free_vmcs: 9601free_vmcs:
@@ -9839,7 +9844,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
9839 9844
9840 WARN_ON(!is_guest_mode(vcpu)); 9845 WARN_ON(!is_guest_mode(vcpu));
9841 9846
9842 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code)) { 9847 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
9848 !to_vmx(vcpu)->nested.nested_run_pending) {
9843 vmcs12->vm_exit_intr_error_code = fault->error_code; 9849 vmcs12->vm_exit_intr_error_code = fault->error_code;
9844 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 9850 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
9845 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | 9851 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
@@ -11291,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
11291 11297
11292 /* Same as above - no reason to call set_cr4_guest_host_mask(). */ 11298 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
11293 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 11299 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
11294 kvm_set_cr4(vcpu, vmcs12->host_cr4); 11300 vmx_set_cr4(vcpu, vmcs12->host_cr4);
11295 11301
11296 nested_ept_uninit_mmu_context(vcpu); 11302 nested_ept_uninit_mmu_context(vcpu);
11297 11303
@@ -11704,6 +11710,37 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
11704 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask); 11710 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
11705} 11711}
11706 11712
11713static void __pi_post_block(struct kvm_vcpu *vcpu)
11714{
11715 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
11716 struct pi_desc old, new;
11717 unsigned int dest;
11718
11719 do {
11720 old.control = new.control = pi_desc->control;
11721 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
11722 "Wakeup handler not enabled while the VCPU is blocked\n");
11723
11724 dest = cpu_physical_id(vcpu->cpu);
11725
11726 if (x2apic_enabled())
11727 new.ndst = dest;
11728 else
11729 new.ndst = (dest << 8) & 0xFF00;
11730
11731 /* set 'NV' to 'notification vector' */
11732 new.nv = POSTED_INTR_VECTOR;
11733 } while (cmpxchg64(&pi_desc->control, old.control,
11734 new.control) != old.control);
11735
11736 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
11737 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
11738 list_del(&vcpu->blocked_vcpu_list);
11739 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
11740 vcpu->pre_pcpu = -1;
11741 }
11742}
11743
11707/* 11744/*
11708 * This routine does the following things for vCPU which is going 11745 * This routine does the following things for vCPU which is going
11709 * to be blocked if VT-d PI is enabled. 11746 * to be blocked if VT-d PI is enabled.
@@ -11719,7 +11756,6 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
11719 */ 11756 */
11720static int pi_pre_block(struct kvm_vcpu *vcpu) 11757static int pi_pre_block(struct kvm_vcpu *vcpu)
11721{ 11758{
11722 unsigned long flags;
11723 unsigned int dest; 11759 unsigned int dest;
11724 struct pi_desc old, new; 11760 struct pi_desc old, new;
11725 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 11761 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
@@ -11729,34 +11765,20 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
11729 !kvm_vcpu_apicv_active(vcpu)) 11765 !kvm_vcpu_apicv_active(vcpu))
11730 return 0; 11766 return 0;
11731 11767
11732 vcpu->pre_pcpu = vcpu->cpu; 11768 WARN_ON(irqs_disabled());
11733 spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock, 11769 local_irq_disable();
11734 vcpu->pre_pcpu), flags); 11770 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
11735 list_add_tail(&vcpu->blocked_vcpu_list, 11771 vcpu->pre_pcpu = vcpu->cpu;
11736 &per_cpu(blocked_vcpu_on_cpu, 11772 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
11737 vcpu->pre_pcpu)); 11773 list_add_tail(&vcpu->blocked_vcpu_list,
11738 spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock, 11774 &per_cpu(blocked_vcpu_on_cpu,
11739 vcpu->pre_pcpu), flags); 11775 vcpu->pre_pcpu));
11776 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
11777 }
11740 11778
11741 do { 11779 do {
11742 old.control = new.control = pi_desc->control; 11780 old.control = new.control = pi_desc->control;
11743 11781
11744 /*
11745 * We should not block the vCPU if
11746 * an interrupt is posted for it.
11747 */
11748 if (pi_test_on(pi_desc) == 1) {
11749 spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
11750 vcpu->pre_pcpu), flags);
11751 list_del(&vcpu->blocked_vcpu_list);
11752 spin_unlock_irqrestore(
11753 &per_cpu(blocked_vcpu_on_cpu_lock,
11754 vcpu->pre_pcpu), flags);
11755 vcpu->pre_pcpu = -1;
11756
11757 return 1;
11758 }
11759
11760 WARN((pi_desc->sn == 1), 11782 WARN((pi_desc->sn == 1),
11761 "Warning: SN field of posted-interrupts " 11783 "Warning: SN field of posted-interrupts "
11762 "is set before blocking\n"); 11784 "is set before blocking\n");
@@ -11778,10 +11800,15 @@ static int pi_pre_block(struct kvm_vcpu *vcpu)
11778 11800
11779 /* set 'NV' to 'wakeup vector' */ 11801 /* set 'NV' to 'wakeup vector' */
11780 new.nv = POSTED_INTR_WAKEUP_VECTOR; 11802 new.nv = POSTED_INTR_WAKEUP_VECTOR;
11781 } while (cmpxchg(&pi_desc->control, old.control, 11803 } while (cmpxchg64(&pi_desc->control, old.control,
11782 new.control) != old.control); 11804 new.control) != old.control);
11783 11805
11784 return 0; 11806 /* We should not block the vCPU if an interrupt is posted for it. */
11807 if (pi_test_on(pi_desc) == 1)
11808 __pi_post_block(vcpu);
11809
11810 local_irq_enable();
11811 return (vcpu->pre_pcpu == -1);
11785} 11812}
11786 11813
11787static int vmx_pre_block(struct kvm_vcpu *vcpu) 11814static int vmx_pre_block(struct kvm_vcpu *vcpu)
@@ -11797,44 +11824,13 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
11797 11824
11798static void pi_post_block(struct kvm_vcpu *vcpu) 11825static void pi_post_block(struct kvm_vcpu *vcpu)
11799{ 11826{
11800 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); 11827 if (vcpu->pre_pcpu == -1)
11801 struct pi_desc old, new;
11802 unsigned int dest;
11803 unsigned long flags;
11804
11805 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
11806 !irq_remapping_cap(IRQ_POSTING_CAP) ||
11807 !kvm_vcpu_apicv_active(vcpu))
11808 return; 11828 return;
11809 11829
11810 do { 11830 WARN_ON(irqs_disabled());
11811 old.control = new.control = pi_desc->control; 11831 local_irq_disable();
11812 11832 __pi_post_block(vcpu);
11813 dest = cpu_physical_id(vcpu->cpu); 11833 local_irq_enable();
11814
11815 if (x2apic_enabled())
11816 new.ndst = dest;
11817 else
11818 new.ndst = (dest << 8) & 0xFF00;
11819
11820 /* Allow posting non-urgent interrupts */
11821 new.sn = 0;
11822
11823 /* set 'NV' to 'notification vector' */
11824 new.nv = POSTED_INTR_VECTOR;
11825 } while (cmpxchg(&pi_desc->control, old.control,
11826 new.control) != old.control);
11827
11828 if(vcpu->pre_pcpu != -1) {
11829 spin_lock_irqsave(
11830 &per_cpu(blocked_vcpu_on_cpu_lock,
11831 vcpu->pre_pcpu), flags);
11832 list_del(&vcpu->blocked_vcpu_list);
11833 spin_unlock_irqrestore(
11834 &per_cpu(blocked_vcpu_on_cpu_lock,
11835 vcpu->pre_pcpu), flags);
11836 vcpu->pre_pcpu = -1;
11837 }
11838} 11834}
11839 11835
11840static void vmx_post_block(struct kvm_vcpu *vcpu) 11836static void vmx_post_block(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cd17b7d9a107..03869eb7fcd6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7225,7 +7225,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7225 int r; 7225 int r;
7226 sigset_t sigsaved; 7226 sigset_t sigsaved;
7227 7227
7228 fpu__activate_curr(fpu); 7228 fpu__initialize(fpu);
7229 7229
7230 if (vcpu->sigset_active) 7230 if (vcpu->sigset_active)
7231 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 7231 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index d4a7df2205b8..220638a4cb94 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -114,7 +114,7 @@ void math_emulate(struct math_emu_info *info)
114 struct desc_struct code_descriptor; 114 struct desc_struct code_descriptor;
115 struct fpu *fpu = &current->thread.fpu; 115 struct fpu *fpu = &current->thread.fpu;
116 116
117 fpu__activate_curr(fpu); 117 fpu__initialize(fpu);
118 118
119#ifdef RE_ENTRANT_CHECKING 119#ifdef RE_ENTRANT_CHECKING
120 if (emulating) { 120 if (emulating) {
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 72bf8c01c6e3..e1f095884386 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,12 @@
1# Kernel does not boot with instrumentation of tlb.c. 1# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c
2KCOV_INSTRUMENT_tlb.o := n 2KCOV_INSTRUMENT_tlb.o := n
3KCOV_INSTRUMENT_mem_encrypt.o := n
4
5KASAN_SANITIZE_mem_encrypt.o := n
6
7ifdef CONFIG_FUNCTION_TRACER
8CFLAGS_REMOVE_mem_encrypt.o = -pg
9endif
3 10
4obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 11obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
5 pat.o pgtable.o physaddr.o setup_nx.o tlb.o 12 pat.o pgtable.o physaddr.o setup_nx.o tlb.o
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index c076f710de4c..c3521e2be396 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -2,6 +2,7 @@
2#include <linux/uaccess.h> 2#include <linux/uaccess.h>
3#include <linux/sched/debug.h> 3#include <linux/sched/debug.h>
4 4
5#include <asm/fpu/internal.h>
5#include <asm/traps.h> 6#include <asm/traps.h>
6#include <asm/kdebug.h> 7#include <asm/kdebug.h>
7 8
@@ -78,6 +79,29 @@ bool ex_handler_refcount(const struct exception_table_entry *fixup,
78} 79}
79EXPORT_SYMBOL_GPL(ex_handler_refcount); 80EXPORT_SYMBOL_GPL(ex_handler_refcount);
80 81
82/*
83 * Handler for when we fail to restore a task's FPU state. We should never get
84 * here because the FPU state of a task using the FPU (task->thread.fpu.state)
85 * should always be valid. However, past bugs have allowed userspace to set
86 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
87 * These caused XRSTOR to fail when switching to the task, leaking the FPU
88 * registers of the task previously executing on the CPU. Mitigate this class
89 * of vulnerability by restoring from the initial state (essentially, zeroing
90 * out all the FPU registers) if we can't restore from the task's FPU state.
91 */
92bool ex_handler_fprestore(const struct exception_table_entry *fixup,
93 struct pt_regs *regs, int trapnr)
94{
95 regs->ip = ex_fixup_addr(fixup);
96
97 WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
98 (void *)instruction_pointer(regs));
99
100 __copy_kernel_to_fpregs(&init_fpstate, -1);
101 return true;
102}
103EXPORT_SYMBOL_GPL(ex_handler_fprestore);
104
81bool ex_handler_ext(const struct exception_table_entry *fixup, 105bool ex_handler_ext(const struct exception_table_entry *fixup,
82 struct pt_regs *regs, int trapnr) 106 struct pt_regs *regs, int trapnr)
83{ 107{
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 39567b5c33da..7101c281c7ce 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -192,8 +192,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
192 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really 192 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
193 * faulted on a pte with its pkey=4. 193 * faulted on a pte with its pkey=4.
194 */ 194 */
195static void fill_sig_info_pkey(int si_code, siginfo_t *info, 195static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
196 struct vm_area_struct *vma)
197{ 196{
198 /* This is effectively an #ifdef */ 197 /* This is effectively an #ifdef */
199 if (!boot_cpu_has(X86_FEATURE_OSPKE)) 198 if (!boot_cpu_has(X86_FEATURE_OSPKE))
@@ -209,7 +208,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
209 * valid VMA, so we should never reach this without a 208 * valid VMA, so we should never reach this without a
210 * valid VMA. 209 * valid VMA.
211 */ 210 */
212 if (!vma) { 211 if (!pkey) {
213 WARN_ONCE(1, "PKU fault with no VMA passed in"); 212 WARN_ONCE(1, "PKU fault with no VMA passed in");
214 info->si_pkey = 0; 213 info->si_pkey = 0;
215 return; 214 return;
@@ -219,13 +218,12 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info,
219 * absolutely guranteed to be 100% accurate because of 218 * absolutely guranteed to be 100% accurate because of
220 * the race explained above. 219 * the race explained above.
221 */ 220 */
222 info->si_pkey = vma_pkey(vma); 221 info->si_pkey = *pkey;
223} 222}
224 223
225static void 224static void
226force_sig_info_fault(int si_signo, int si_code, unsigned long address, 225force_sig_info_fault(int si_signo, int si_code, unsigned long address,
227 struct task_struct *tsk, struct vm_area_struct *vma, 226 struct task_struct *tsk, u32 *pkey, int fault)
228 int fault)
229{ 227{
230 unsigned lsb = 0; 228 unsigned lsb = 0;
231 siginfo_t info; 229 siginfo_t info;
@@ -240,7 +238,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
240 lsb = PAGE_SHIFT; 238 lsb = PAGE_SHIFT;
241 info.si_addr_lsb = lsb; 239 info.si_addr_lsb = lsb;
242 240
243 fill_sig_info_pkey(si_code, &info, vma); 241 fill_sig_info_pkey(si_code, &info, pkey);
244 242
245 force_sig_info(si_signo, &info, tsk); 243 force_sig_info(si_signo, &info, tsk);
246} 244}
@@ -762,8 +760,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
762 struct task_struct *tsk = current; 760 struct task_struct *tsk = current;
763 unsigned long flags; 761 unsigned long flags;
764 int sig; 762 int sig;
765 /* No context means no VMA to pass down */
766 struct vm_area_struct *vma = NULL;
767 763
768 /* Are we prepared to handle this kernel fault? */ 764 /* Are we prepared to handle this kernel fault? */
769 if (fixup_exception(regs, X86_TRAP_PF)) { 765 if (fixup_exception(regs, X86_TRAP_PF)) {
@@ -788,7 +784,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
788 784
789 /* XXX: hwpoison faults will set the wrong code. */ 785 /* XXX: hwpoison faults will set the wrong code. */
790 force_sig_info_fault(signal, si_code, address, 786 force_sig_info_fault(signal, si_code, address,
791 tsk, vma, 0); 787 tsk, NULL, 0);
792 } 788 }
793 789
794 /* 790 /*
@@ -896,8 +892,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
896 892
897static void 893static void
898__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 894__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
899 unsigned long address, struct vm_area_struct *vma, 895 unsigned long address, u32 *pkey, int si_code)
900 int si_code)
901{ 896{
902 struct task_struct *tsk = current; 897 struct task_struct *tsk = current;
903 898
@@ -945,7 +940,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
945 tsk->thread.error_code = error_code; 940 tsk->thread.error_code = error_code;
946 tsk->thread.trap_nr = X86_TRAP_PF; 941 tsk->thread.trap_nr = X86_TRAP_PF;
947 942
948 force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0); 943 force_sig_info_fault(SIGSEGV, si_code, address, tsk, pkey, 0);
949 944
950 return; 945 return;
951 } 946 }
@@ -958,9 +953,9 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
958 953
959static noinline void 954static noinline void
960bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, 955bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
961 unsigned long address, struct vm_area_struct *vma) 956 unsigned long address, u32 *pkey)
962{ 957{
963 __bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR); 958 __bad_area_nosemaphore(regs, error_code, address, pkey, SEGV_MAPERR);
964} 959}
965 960
966static void 961static void
@@ -968,6 +963,10 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
968 unsigned long address, struct vm_area_struct *vma, int si_code) 963 unsigned long address, struct vm_area_struct *vma, int si_code)
969{ 964{
970 struct mm_struct *mm = current->mm; 965 struct mm_struct *mm = current->mm;
966 u32 pkey;
967
968 if (vma)
969 pkey = vma_pkey(vma);
971 970
972 /* 971 /*
973 * Something tried to access memory that isn't in our memory map.. 972 * Something tried to access memory that isn't in our memory map..
@@ -975,7 +974,8 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
975 */ 974 */
976 up_read(&mm->mmap_sem); 975 up_read(&mm->mmap_sem);
977 976
978 __bad_area_nosemaphore(regs, error_code, address, vma, si_code); 977 __bad_area_nosemaphore(regs, error_code, address,
978 (vma) ? &pkey : NULL, si_code);
979} 979}
980 980
981static noinline void 981static noinline void
@@ -1018,7 +1018,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
1018 1018
1019static void 1019static void
1020do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, 1020do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
1021 struct vm_area_struct *vma, unsigned int fault) 1021 u32 *pkey, unsigned int fault)
1022{ 1022{
1023 struct task_struct *tsk = current; 1023 struct task_struct *tsk = current;
1024 int code = BUS_ADRERR; 1024 int code = BUS_ADRERR;
@@ -1045,13 +1045,12 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
1045 code = BUS_MCEERR_AR; 1045 code = BUS_MCEERR_AR;
1046 } 1046 }
1047#endif 1047#endif
1048 force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault); 1048 force_sig_info_fault(SIGBUS, code, address, tsk, pkey, fault);
1049} 1049}
1050 1050
1051static noinline void 1051static noinline void
1052mm_fault_error(struct pt_regs *regs, unsigned long error_code, 1052mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1053 unsigned long address, struct vm_area_struct *vma, 1053 unsigned long address, u32 *pkey, unsigned int fault)
1054 unsigned int fault)
1055{ 1054{
1056 if (fatal_signal_pending(current) && !(error_code & PF_USER)) { 1055 if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
1057 no_context(regs, error_code, address, 0, 0); 1056 no_context(regs, error_code, address, 0, 0);
@@ -1075,9 +1074,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
1075 } else { 1074 } else {
1076 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 1075 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
1077 VM_FAULT_HWPOISON_LARGE)) 1076 VM_FAULT_HWPOISON_LARGE))
1078 do_sigbus(regs, error_code, address, vma, fault); 1077 do_sigbus(regs, error_code, address, pkey, fault);
1079 else if (fault & VM_FAULT_SIGSEGV) 1078 else if (fault & VM_FAULT_SIGSEGV)
1080 bad_area_nosemaphore(regs, error_code, address, vma); 1079 bad_area_nosemaphore(regs, error_code, address, pkey);
1081 else 1080 else
1082 BUG(); 1081 BUG();
1083 } 1082 }
@@ -1267,6 +1266,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1267 struct mm_struct *mm; 1266 struct mm_struct *mm;
1268 int fault, major = 0; 1267 int fault, major = 0;
1269 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 1268 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1269 u32 pkey;
1270 1270
1271 tsk = current; 1271 tsk = current;
1272 mm = tsk->mm; 1272 mm = tsk->mm;
@@ -1440,7 +1440,17 @@ good_area:
1440 * make sure we exit gracefully rather than endlessly redo 1440 * make sure we exit gracefully rather than endlessly redo
1441 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if 1441 * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
1442 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. 1442 * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
1443 *
1444 * Note that handle_userfault() may also release and reacquire mmap_sem
1445 * (and not return with VM_FAULT_RETRY), when returning to userland to
1446 * repeat the page fault later with a VM_FAULT_NOPAGE retval
1447 * (potentially after handling any pending signal during the return to
1448 * userland). The return to userland is identified whenever
1449 * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
1450 * Thus we have to be careful about not touching vma after handling the
1451 * fault, so we read the pkey beforehand.
1443 */ 1452 */
1453 pkey = vma_pkey(vma);
1444 fault = handle_mm_fault(vma, address, flags); 1454 fault = handle_mm_fault(vma, address, flags);
1445 major |= fault & VM_FAULT_MAJOR; 1455 major |= fault & VM_FAULT_MAJOR;
1446 1456
@@ -1469,7 +1479,7 @@ good_area:
1469 1479
1470 up_read(&mm->mmap_sem); 1480 up_read(&mm->mmap_sem);
1471 if (unlikely(fault & VM_FAULT_ERROR)) { 1481 if (unlikely(fault & VM_FAULT_ERROR)) {
1472 mm_fault_error(regs, error_code, address, vma, fault); 1482 mm_fault_error(regs, error_code, address, &pkey, fault);
1473 return; 1483 return;
1474 } 1484 }
1475 1485
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 3fcc8e01683b..16c5f37933a2 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -10,6 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#define DISABLE_BRANCH_PROFILING
14
13#include <linux/linkage.h> 15#include <linux/linkage.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/mm.h> 17#include <linux/mm.h>
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 2dab69a706ec..d7bc0eea20a5 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -18,7 +18,6 @@
18 18
19#include <asm/cpufeature.h> /* boot_cpu_has, ... */ 19#include <asm/cpufeature.h> /* boot_cpu_has, ... */
20#include <asm/mmu_context.h> /* vma_pkey() */ 20#include <asm/mmu_context.h> /* vma_pkey() */
21#include <asm/fpu/internal.h> /* fpregs_active() */
22 21
23int __execute_only_pkey(struct mm_struct *mm) 22int __execute_only_pkey(struct mm_struct *mm)
24{ 23{
@@ -45,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
45 */ 44 */
46 preempt_disable(); 45 preempt_disable();
47 if (!need_to_set_mm_pkey && 46 if (!need_to_set_mm_pkey &&
48 fpregs_active() && 47 current->thread.fpu.initialized &&
49 !__pkru_allows_read(read_pkru(), execute_only_pkey)) { 48 !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
50 preempt_enable(); 49 preempt_enable();
51 return execute_only_pkey; 50 return execute_only_pkey;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 93fe97cce581..0f3d0cea4d00 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -30,6 +30,7 @@
30 30
31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); 31atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
32 32
33
33static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, 34static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
34 u16 *new_asid, bool *need_flush) 35 u16 *new_asid, bool *need_flush)
35{ 36{
@@ -80,7 +81,7 @@ void leave_mm(int cpu)
80 return; 81 return;
81 82
82 /* Warn if we're not lazy. */ 83 /* Warn if we're not lazy. */
83 WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))); 84 WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
84 85
85 switch_mm(NULL, &init_mm, NULL); 86 switch_mm(NULL, &init_mm, NULL);
86} 87}
@@ -142,45 +143,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
142 __flush_tlb_all(); 143 __flush_tlb_all();
143 } 144 }
144#endif 145#endif
146 this_cpu_write(cpu_tlbstate.is_lazy, false);
145 147
146 if (real_prev == next) { 148 if (real_prev == next) {
147 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != 149 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
148 next->context.ctx_id); 150 next->context.ctx_id);
149
150 if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
151 /*
152 * There's nothing to do: we weren't lazy, and we
153 * aren't changing our mm. We don't need to flush
154 * anything, nor do we need to update CR3, CR4, or
155 * LDTR.
156 */
157 return;
158 }
159
160 /* Resume remote flushes and then read tlb_gen. */
161 cpumask_set_cpu(cpu, mm_cpumask(next));
162 next_tlb_gen = atomic64_read(&next->context.tlb_gen);
163
164 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) <
165 next_tlb_gen) {
166 /*
167 * Ideally, we'd have a flush_tlb() variant that
168 * takes the known CR3 value as input. This would
169 * be faster on Xen PV and on hypothetical CPUs
170 * on which INVPCID is fast.
171 */
172 this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen,
173 next_tlb_gen);
174 write_cr3(build_cr3(next, prev_asid));
175 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
176 TLB_FLUSH_ALL);
177 }
178 151
179 /* 152 /*
180 * We just exited lazy mode, which means that CR4 and/or LDTR 153 * We don't currently support having a real mm loaded without
181 * may be stale. (Changes to the required CR4 and LDTR states 154 * our cpu set in mm_cpumask(). We have all the bookkeeping
182 * are not reflected in tlb_gen.) 155 * in place to figure out whether we would need to flush
156 * if our cpu were cleared in mm_cpumask(), but we don't
157 * currently use it.
183 */ 158 */
159 if (WARN_ON_ONCE(real_prev != &init_mm &&
160 !cpumask_test_cpu(cpu, mm_cpumask(next))))
161 cpumask_set_cpu(cpu, mm_cpumask(next));
162
163 return;
184 } else { 164 } else {
185 u16 new_asid; 165 u16 new_asid;
186 bool need_flush; 166 bool need_flush;
@@ -191,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
191 * mapped in the new pgd, we'll double-fault. Forcibly 171 * mapped in the new pgd, we'll double-fault. Forcibly
192 * map it. 172 * map it.
193 */ 173 */
194 unsigned int index = pgd_index(current_stack_pointer()); 174 unsigned int index = pgd_index(current_stack_pointer);
195 pgd_t *pgd = next->pgd + index; 175 pgd_t *pgd = next->pgd + index;
196 176
197 if (unlikely(pgd_none(*pgd))) 177 if (unlikely(pgd_none(*pgd)))
@@ -199,10 +179,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
199 } 179 }
200 180
201 /* Stop remote flushes for the previous mm */ 181 /* Stop remote flushes for the previous mm */
202 if (cpumask_test_cpu(cpu, mm_cpumask(real_prev))) 182 VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
203 cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); 183 real_prev != &init_mm);
204 184 cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
205 VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
206 185
207 /* 186 /*
208 * Start remote flushes and then read tlb_gen. 187 * Start remote flushes and then read tlb_gen.
@@ -233,6 +212,40 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
233} 212}
234 213
235/* 214/*
215 * Please ignore the name of this function. It should be called
216 * switch_to_kernel_thread().
217 *
218 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
219 * kernel thread or other context without an mm. Acceptable implementations
220 * include doing nothing whatsoever, switching to init_mm, or various clever
221 * lazy tricks to try to minimize TLB flushes.
222 *
223 * The scheduler reserves the right to call enter_lazy_tlb() several times
224 * in a row. It will notify us that we're going back to a real mm by
225 * calling switch_mm_irqs_off().
226 */
227void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
228{
229 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
230 return;
231
232 if (tlb_defer_switch_to_init_mm()) {
233 /*
234 * There's a significant optimization that may be possible
235 * here. We have accurate enough TLB flush tracking that we
236 * don't need to maintain coherence of TLB per se when we're
237 * lazy. We do, however, need to maintain coherence of
238 * paging-structure caches. We could, in principle, leave our
239 * old mm loaded and only switch to init_mm when
240 * tlb_remove_page() happens.
241 */
242 this_cpu_write(cpu_tlbstate.is_lazy, true);
243 } else {
244 switch_mm(NULL, &init_mm, NULL);
245 }
246}
247
248/*
236 * Call this when reinitializing a CPU. It fixes the following potential 249 * Call this when reinitializing a CPU. It fixes the following potential
237 * problems: 250 * problems:
238 * 251 *
@@ -303,16 +316,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
303 /* This code cannot presently handle being reentered. */ 316 /* This code cannot presently handle being reentered. */
304 VM_WARN_ON(!irqs_disabled()); 317 VM_WARN_ON(!irqs_disabled());
305 318
319 if (unlikely(loaded_mm == &init_mm))
320 return;
321
306 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != 322 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
307 loaded_mm->context.ctx_id); 323 loaded_mm->context.ctx_id);
308 324
309 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) { 325 if (this_cpu_read(cpu_tlbstate.is_lazy)) {
310 /* 326 /*
311 * We're in lazy mode -- don't flush. We can get here on 327 * We're in lazy mode. We need to at least flush our
312 * remote flushes due to races and on local flushes if a 328 * paging-structure cache to avoid speculatively reading
313 * kernel thread coincidentally flushes the mm it's lazily 329 * garbage into our TLB. Since switching to init_mm is barely
314 * still using. 330 * slower than a minimal flush, just switch to init_mm.
315 */ 331 */
332 switch_mm_irqs_off(NULL, &init_mm, NULL);
316 return; 333 return;
317 } 334 }
318 335
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 8c9573660d51..0554e8aef4d5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog)
284 /* if (index >= array->map.max_entries) 284 /* if (index >= array->map.max_entries)
285 * goto out; 285 * goto out;
286 */ 286 */
287 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 287 EMIT2(0x89, 0xD2); /* mov edx, edx */
288 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
288 offsetof(struct bpf_array, map.max_entries)); 289 offsetof(struct bpf_array, map.max_entries));
289 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
290#define OFFSET1 43 /* number of bytes to jump */ 290#define OFFSET1 43 /* number of bytes to jump */
291 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 291 EMIT2(X86_JBE, OFFSET1); /* jbe out */
292 label1 = cnt; 292 label1 = cnt;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0e7ef69e8531..d669e9d89001 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
93 int rc; 93 int rc;
94 94
95 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, 95 rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE,
96 "x86/xen/hvm_guest:prepare", 96 "x86/xen/guest:prepare",
97 cpu_up_prepare_cb, cpu_dead_cb); 97 cpu_up_prepare_cb, cpu_dead_cb);
98 if (rc >= 0) { 98 if (rc >= 0) {
99 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 99 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
100 "x86/xen/hvm_guest:online", 100 "x86/xen/guest:online",
101 xen_cpu_up_online, NULL); 101 xen_cpu_up_online, NULL);
102 if (rc < 0) 102 if (rc < 0)
103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); 103 cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 7330cb3b2283..71495f1a86d7 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1238,21 +1238,16 @@ static void __init xen_pagetable_cleanhighmap(void)
1238 * from _brk_limit way up to the max_pfn_mapped (which is the end of 1238 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1239 * the ramdisk). We continue on, erasing PMD entries that point to page 1239 * the ramdisk). We continue on, erasing PMD entries that point to page
1240 * tables - do note that they are accessible at this stage via __va. 1240 * tables - do note that they are accessible at this stage via __va.
1241 * For good measure we also round up to the PMD - which means that if 1241 * As Xen is aligning the memory end to a 4MB boundary, for good
1242 * measure we also round up to PMD_SIZE * 2 - which means that if
1242 * anybody is using __ka address to the initial boot-stack - and try 1243 * anybody is using __ka address to the initial boot-stack - and try
1243 * to use it - they are going to crash. The xen_start_info has been 1244 * to use it - they are going to crash. The xen_start_info has been
1244 * taken care of already in xen_setup_kernel_pagetable. */ 1245 * taken care of already in xen_setup_kernel_pagetable. */
1245 addr = xen_start_info->pt_base; 1246 addr = xen_start_info->pt_base;
1246 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); 1247 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1247 1248
1248 xen_cleanhighmap(addr, addr + size); 1249 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1249 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); 1250 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1250#ifdef DEBUG
1251 /* This is superfluous and is not necessary, but you know what
1252 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1253 * anything at this stage. */
1254 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1255#endif
1256} 1251}
1257#endif 1252#endif
1258 1253
diff --git a/block/bio.c b/block/bio.c
index b38e962fa83e..101c2a9b5481 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1239 */ 1239 */
1240 bmd->is_our_pages = map_data ? 0 : 1; 1240 bmd->is_our_pages = map_data ? 0 : 1;
1241 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1241 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1242 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1242 bmd->iter = *iter;
1243 iter->nr_segs, iter->count); 1243 bmd->iter.iov = bmd->iov;
1244 1244
1245 ret = -ENOMEM; 1245 ret = -ENOMEM;
1246 bio = bio_kmalloc(gfp_mask, nr_pages); 1246 bio = bio_kmalloc(gfp_mask, nr_pages);
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1331 int ret, offset; 1331 int ret, offset;
1332 struct iov_iter i; 1332 struct iov_iter i;
1333 struct iovec iov; 1333 struct iovec iov;
1334 struct bio_vec *bvec;
1334 1335
1335 iov_for_each(iov, i, *iter) { 1336 iov_for_each(iov, i, *iter) {
1336 unsigned long uaddr = (unsigned long) iov.iov_base; 1337 unsigned long uaddr = (unsigned long) iov.iov_base;
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1375 ret = get_user_pages_fast(uaddr, local_nr_pages, 1376 ret = get_user_pages_fast(uaddr, local_nr_pages,
1376 (iter->type & WRITE) != WRITE, 1377 (iter->type & WRITE) != WRITE,
1377 &pages[cur_page]); 1378 &pages[cur_page]);
1378 if (ret < local_nr_pages) { 1379 if (unlikely(ret < local_nr_pages)) {
1380 for (j = cur_page; j < page_limit; j++) {
1381 if (!pages[j])
1382 break;
1383 put_page(pages[j]);
1384 }
1379 ret = -EFAULT; 1385 ret = -EFAULT;
1380 goto out_unmap; 1386 goto out_unmap;
1381 } 1387 }
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1383 offset = offset_in_page(uaddr); 1389 offset = offset_in_page(uaddr);
1384 for (j = cur_page; j < page_limit; j++) { 1390 for (j = cur_page; j < page_limit; j++) {
1385 unsigned int bytes = PAGE_SIZE - offset; 1391 unsigned int bytes = PAGE_SIZE - offset;
1392 unsigned short prev_bi_vcnt = bio->bi_vcnt;
1386 1393
1387 if (len <= 0) 1394 if (len <= 0)
1388 break; 1395 break;
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1397 bytes) 1404 bytes)
1398 break; 1405 break;
1399 1406
1407 /*
1408 * check if vector was merged with previous
1409 * drop page reference if needed
1410 */
1411 if (bio->bi_vcnt == prev_bi_vcnt)
1412 put_page(pages[j]);
1413
1400 len -= bytes; 1414 len -= bytes;
1401 offset = 0; 1415 offset = 0;
1402 } 1416 }
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
1423 return bio; 1437 return bio;
1424 1438
1425 out_unmap: 1439 out_unmap:
1426 for (j = 0; j < nr_pages; j++) { 1440 bio_for_each_segment_all(bvec, bio, j) {
1427 if (!pages[j]) 1441 put_page(bvec->bv_page);
1428 break;
1429 put_page(pages[j]);
1430 } 1442 }
1431 out: 1443 out:
1432 kfree(pages); 1444 kfree(pages);
diff --git a/block/blk-core.c b/block/blk-core.c
index aebe676225e6..048be4aa6024 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -854,6 +854,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
854 854
855 kobject_init(&q->kobj, &blk_queue_ktype); 855 kobject_init(&q->kobj, &blk_queue_ktype);
856 856
857#ifdef CONFIG_BLK_DEV_IO_TRACE
858 mutex_init(&q->blk_trace_mutex);
859#endif
857 mutex_init(&q->sysfs_lock); 860 mutex_init(&q->sysfs_lock);
858 spin_lock_init(&q->__queue_lock); 861 spin_lock_init(&q->__queue_lock);
859 862
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 980e73095643..de294d775acf 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
815 goto err; 815 goto err;
816 816
817 /* 817 /*
818 * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir 818 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
819 * didn't exist yet (because we don't know what to name the directory 819 * didn't exist yet (because we don't know what to name the directory
820 * until the queue is registered to a gendisk). 820 * until the queue is registered to a gendisk).
821 */ 821 */
822 if (q->elevator && !q->sched_debugfs_dir)
823 blk_mq_debugfs_register_sched(q);
824
825 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
822 queue_for_each_hw_ctx(q, hctx, i) { 826 queue_for_each_hw_ctx(q, hctx, i) {
823 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) 827 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
824 goto err; 828 goto err;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0fea76aa0f3f..17816a028dcb 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
1911 1911
1912 tg->disptime = jiffies - 1; 1912 tg->disptime = jiffies - 1;
1913 throtl_select_dispatch(sq); 1913 throtl_select_dispatch(sq);
1914 throtl_schedule_next_dispatch(sq, false); 1914 throtl_schedule_next_dispatch(sq, true);
1915 } 1915 }
1916 rcu_read_unlock(); 1916 rcu_read_unlock();
1917 throtl_select_dispatch(&td->service_queue); 1917 throtl_select_dispatch(&td->service_queue);
1918 throtl_schedule_next_dispatch(&td->service_queue, false); 1918 throtl_schedule_next_dispatch(&td->service_queue, true);
1919 queue_work(kthrotld_workqueue, &td->dispatch_work); 1919 queue_work(kthrotld_workqueue, &td->dispatch_work);
1920} 1920}
1921 1921
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index c82408c7cc3c..15d25ccd51a5 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -154,7 +154,6 @@ static int bsg_prepare_job(struct device *dev, struct request *req)
154failjob_rls_rqst_payload: 154failjob_rls_rqst_payload:
155 kfree(job->request_payload.sg_list); 155 kfree(job->request_payload.sg_list);
156failjob_rls_job: 156failjob_rls_job:
157 kfree(job);
158 return -ENOMEM; 157 return -ENOMEM;
159} 158}
160 159
@@ -208,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
208 struct bsg_job *job = blk_mq_rq_to_pdu(req); 207 struct bsg_job *job = blk_mq_rq_to_pdu(req);
209 struct scsi_request *sreq = &job->sreq; 208 struct scsi_request *sreq = &job->sreq;
210 209
210 /* called right after the request is allocated for the request_queue */
211
212 sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
213 if (!sreq->sense)
214 return -ENOMEM;
215
216 return 0;
217}
218
219static void bsg_initialize_rq(struct request *req)
220{
221 struct bsg_job *job = blk_mq_rq_to_pdu(req);
222 struct scsi_request *sreq = &job->sreq;
223 void *sense = sreq->sense;
224
225 /* called right before the request is given to the request_queue user */
226
211 memset(job, 0, sizeof(*job)); 227 memset(job, 0, sizeof(*job));
212 228
213 scsi_req_init(sreq); 229 scsi_req_init(sreq);
230
231 sreq->sense = sense;
214 sreq->sense_len = SCSI_SENSE_BUFFERSIZE; 232 sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
215 sreq->sense = kzalloc(sreq->sense_len, gfp);
216 if (!sreq->sense)
217 return -ENOMEM;
218 233
219 job->req = req; 234 job->req = req;
220 job->reply = sreq->sense; 235 job->reply = sense;
221 job->reply_len = sreq->sense_len; 236 job->reply_len = sreq->sense_len;
222 job->dd_data = job + 1; 237 job->dd_data = job + 1;
223
224 return 0;
225} 238}
226 239
227static void bsg_exit_rq(struct request_queue *q, struct request *req) 240static void bsg_exit_rq(struct request_queue *q, struct request *req)
@@ -252,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
252 q->cmd_size = sizeof(struct bsg_job) + dd_job_size; 265 q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
253 q->init_rq_fn = bsg_init_rq; 266 q->init_rq_fn = bsg_init_rq;
254 q->exit_rq_fn = bsg_exit_rq; 267 q->exit_rq_fn = bsg_exit_rq;
268 q->initialize_rq_fn = bsg_initialize_rq;
255 q->request_fn = bsg_request_fn; 269 q->request_fn = bsg_request_fn;
256 270
257 ret = blk_init_allocated_queue(q); 271 ret = blk_init_allocated_queue(q);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 86e8fe1adcdb..88c555db4e5d 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -112,7 +112,7 @@ ssize_t part_stat_show(struct device *dev,
112 struct device_attribute *attr, char *buf) 112 struct device_attribute *attr, char *buf)
113{ 113{
114 struct hd_struct *p = dev_to_part(dev); 114 struct hd_struct *p = dev_to_part(dev);
115 struct request_queue *q = dev_to_disk(dev)->queue; 115 struct request_queue *q = part_to_disk(p)->queue;
116 unsigned int inflight[2]; 116 unsigned int inflight[2];
117 int cpu; 117 int cpu;
118 118
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index e4b0ed386bc8..39aecad286fe 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
57 char *req, *p; 57 char *req, *p;
58 int len; 58 int len;
59 59
60 BUG_ON(!id_0 && !id_1);
61
60 if (id_0) { 62 if (id_0) {
61 lookup = id_0->data; 63 lookup = id_0->data;
62 len = id_0->len; 64 len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
105 if (id_0 && id_1) { 107 if (id_0 && id_1) {
106 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); 108 const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
107 109
108 if (!kids->id[0]) { 110 if (!kids->id[1]) {
109 pr_debug("First ID matches, but second is missing\n"); 111 pr_debug("First ID matches, but second is missing\n");
110 goto reject; 112 goto reject;
111 } 113 }
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd8649117..d140d8bb2c96 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
88 bool want = false; 88 bool want = false;
89 89
90 sinfo = msg->signed_infos; 90 sinfo = msg->signed_infos;
91 if (!sinfo)
92 goto inconsistent;
93
91 if (sinfo->authattrs) { 94 if (sinfo->authattrs) {
92 want = true; 95 want = true;
93 msg->have_authattrs = true; 96 msg->have_authattrs = true;
diff --git a/crypto/shash.c b/crypto/shash.c
index 5e31c8d776df..325a14da5827 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
41 int err; 41 int err;
42 42
43 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); 43 absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
44 buffer = kmalloc(absize, GFP_KERNEL); 44 buffer = kmalloc(absize, GFP_ATOMIC);
45 if (!buffer) 45 if (!buffer)
46 return -ENOMEM; 46 return -ENOMEM;
47 47
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req)
275 275
276int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) 276int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
277{ 277{
278 struct scatterlist *sg = req->src;
279 unsigned int offset = sg->offset;
280 unsigned int nbytes = req->nbytes; 278 unsigned int nbytes = req->nbytes;
279 struct scatterlist *sg;
280 unsigned int offset;
281 int err; 281 int err;
282 282
283 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 283 if (nbytes &&
284 (sg = req->src, offset = sg->offset,
285 nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
284 void *data; 286 void *data;
285 287
286 data = kmap_atomic(sg_page(sg)); 288 data = kmap_atomic(sg_page(sg));
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4faa0fd53b0c..d5692e35fab1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
426 426
427static int skcipher_walk_first(struct skcipher_walk *walk) 427static int skcipher_walk_first(struct skcipher_walk *walk)
428{ 428{
429 walk->nbytes = 0;
430
431 if (WARN_ON_ONCE(in_irq())) 429 if (WARN_ON_ONCE(in_irq()))
432 return -EDEADLK; 430 return -EDEADLK;
433 431
434 if (unlikely(!walk->total))
435 return 0;
436
437 walk->buffer = NULL; 432 walk->buffer = NULL;
438 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 433 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
439 int err = skcipher_copy_iv(walk); 434 int err = skcipher_copy_iv(walk);
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
452{ 447{
453 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 448 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
454 449
450 walk->total = req->cryptlen;
451 walk->nbytes = 0;
452
453 if (unlikely(!walk->total))
454 return 0;
455
455 scatterwalk_start(&walk->in, req->src); 456 scatterwalk_start(&walk->in, req->src);
456 scatterwalk_start(&walk->out, req->dst); 457 scatterwalk_start(&walk->out, req->dst);
457 458
458 walk->total = req->cryptlen;
459 walk->iv = req->iv; 459 walk->iv = req->iv;
460 walk->oiv = req->iv; 460 walk->oiv = req->iv;
461 461
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
509 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 509 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
510 int err; 510 int err;
511 511
512 walk->nbytes = 0;
513
514 if (unlikely(!walk->total))
515 return 0;
516
512 walk->flags &= ~SKCIPHER_WALK_PHYS; 517 walk->flags &= ~SKCIPHER_WALK_PHYS;
513 518
514 scatterwalk_start(&walk->in, req->src); 519 scatterwalk_start(&walk->in, req->src);
diff --git a/crypto/xts.c b/crypto/xts.c
index d86c11a8c882..e31828ed0046 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
554 ctx->name[len - 1] = 0; 554 ctx->name[len - 1] = 0;
555 555
556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) 557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
558 return -ENAMETOOLONG; 558 err = -ENAMETOOLONG;
559 goto err_drop_spawn;
560 }
559 } else 561 } else
560 goto err_drop_spawn; 562 goto err_drop_spawn;
561 563
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 077f9bad6f44..3c3a37b8503b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
743 } 743 }
744 ghes_do_proc(ghes, ghes->estatus); 744 ghes_do_proc(ghes, ghes->estatus);
745 745
746out:
747 ghes_clear_estatus(ghes);
748
749 if (rc == -ENOENT)
750 return rc;
751
746 /* 752 /*
747 * GHESv2 type HEST entries introduce support for error acknowledgment, 753 * GHESv2 type HEST entries introduce support for error acknowledgment,
748 * so only acknowledge the error if this support is present. 754 * so only acknowledge the error if this support is present.
749 */ 755 */
750 if (is_hest_type_generic_v2(ghes)) { 756 if (is_hest_type_generic_v2(ghes))
751 rc = ghes_ack_error(ghes->generic_v2); 757 return ghes_ack_error(ghes->generic_v2);
752 if (rc) 758
753 return rc;
754 }
755out:
756 ghes_clear_estatus(ghes);
757 return rc; 759 return rc;
758} 760}
759 761
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9565d572f8dd..de56394dd161 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1178,12 +1178,44 @@ dev_put:
1178 return ret; 1178 return ret;
1179} 1179}
1180 1180
1181static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
1182{
1183 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1184 struct acpi_iort_node *parent;
1185 struct acpi_iort_id_mapping *map;
1186 int i;
1187
1188 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1189 iort_node->mapping_offset);
1190
1191 for (i = 0; i < iort_node->mapping_count; i++, map++) {
1192 if (!map->output_reference)
1193 continue;
1194
1195 parent = ACPI_ADD_PTR(struct acpi_iort_node,
1196 iort_table, map->output_reference);
1197 /*
1198 * If we detect a RC->SMMU mapping, make sure
1199 * we enable ACS on the system.
1200 */
1201 if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1202 (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1203 pci_request_acs();
1204 return true;
1205 }
1206 }
1207 }
1208
1209 return false;
1210}
1211
1181static void __init iort_init_platform_devices(void) 1212static void __init iort_init_platform_devices(void)
1182{ 1213{
1183 struct acpi_iort_node *iort_node, *iort_end; 1214 struct acpi_iort_node *iort_node, *iort_end;
1184 struct acpi_table_iort *iort; 1215 struct acpi_table_iort *iort;
1185 struct fwnode_handle *fwnode; 1216 struct fwnode_handle *fwnode;
1186 int i, ret; 1217 int i, ret;
1218 bool acs_enabled = false;
1187 1219
1188 /* 1220 /*
1189 * iort_table and iort both point to the start of IORT table, but 1221 * iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
1203 return; 1235 return;
1204 } 1236 }
1205 1237
1238 if (!acs_enabled)
1239 acs_enabled = iort_enable_acs(iort_node);
1240
1206 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1241 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
1207 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1242 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
1208 1243
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 3fb8ff513461..e26ea209b63e 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
571 * } 571 * }
572 * } 572 * }
573 * 573 *
574 * Calling this function with index %2 return %-ENOENT and with index %3 574 * Calling this function with index %2 or index %3 return %-ENOENT. If the
575 * returns the last entry. If the property does not contain any more values 575 * property does not contain any more values %-ENOENT is returned. The NULL
576 * %-ENODATA is returned. The NULL entry must be single integer and 576 * entry must be single integer and preferably contain value %0.
577 * preferably contain value %0.
578 * 577 *
579 * Return: %0 on success, negative error code on failure. 578 * Return: %0 on success, negative error code on failure.
580 */ 579 */
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
590 589
591 data = acpi_device_data_of_node(fwnode); 590 data = acpi_device_data_of_node(fwnode);
592 if (!data) 591 if (!data)
593 return -EINVAL; 592 return -ENOENT;
594 593
595 ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); 594 ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
596 if (ret) 595 if (ret)
597 return ret; 596 return ret == -EINVAL ? -ENOENT : -EINVAL;
598 597
599 /* 598 /*
600 * The simplest case is when the value is a single reference. Just 599 * The simplest case is when the value is a single reference. Just
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
606 605
607 ret = acpi_bus_get_device(obj->reference.handle, &device); 606 ret = acpi_bus_get_device(obj->reference.handle, &device);
608 if (ret) 607 if (ret)
609 return ret; 608 return ret == -ENODEV ? -EINVAL : ret;
610 609
611 args->adev = device; 610 args->adev = device;
612 args->nargs = 0; 611 args->nargs = 0;
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
622 * The index argument is then used to determine which reference 621 * The index argument is then used to determine which reference
623 * the caller wants (along with the arguments). 622 * the caller wants (along with the arguments).
624 */ 623 */
625 if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) 624 if (obj->type != ACPI_TYPE_PACKAGE)
626 return -EPROTO; 625 return -EINVAL;
626 if (index >= obj->package.count)
627 return -ENOENT;
627 628
628 element = obj->package.elements; 629 element = obj->package.elements;
629 end = element + obj->package.count; 630 end = element + obj->package.count;
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
635 ret = acpi_bus_get_device(element->reference.handle, 636 ret = acpi_bus_get_device(element->reference.handle,
636 &device); 637 &device);
637 if (ret) 638 if (ret)
638 return -ENODEV; 639 return -EINVAL;
639 640
640 nargs = 0; 641 nargs = 0;
641 element++; 642 element++;
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
649 else if (type == ACPI_TYPE_LOCAL_REFERENCE) 650 else if (type == ACPI_TYPE_LOCAL_REFERENCE)
650 break; 651 break;
651 else 652 else
652 return -EPROTO; 653 return -EINVAL;
653 } 654 }
654 655
655 if (nargs > MAX_ACPI_REFERENCE_ARGS) 656 if (nargs > MAX_ACPI_REFERENCE_ARGS)
656 return -EPROTO; 657 return -EINVAL;
657 658
658 if (idx == index) { 659 if (idx == index) {
659 args->adev = device; 660 args->adev = device;
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
670 return -ENOENT; 671 return -ENOENT;
671 element++; 672 element++;
672 } else { 673 } else {
673 return -EPROTO; 674 return -EINVAL;
674 } 675 }
675 676
676 idx++; 677 idx++;
677 } 678 }
678 679
679 return -ENODATA; 680 return -ENOENT;
680} 681}
681EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); 682EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
682 683
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d055b3f2a207..fddf76ef5bd6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2217 debug_id, (u64)fda->num_fds); 2217 debug_id, (u64)fda->num_fds);
2218 continue; 2218 continue;
2219 } 2219 }
2220 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2220 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2221 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2221 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2222 task_close_fd(proc, fd_array[fd_index]); 2222 task_close_fd(proc, fd_array[fd_index]);
2223 } break; 2223 } break;
@@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
2326 (u64)node->ptr); 2326 (u64)node->ptr);
2327 binder_node_unlock(node); 2327 binder_node_unlock(node);
2328 } else { 2328 } else {
2329 int ret;
2330 struct binder_ref_data dest_rdata; 2329 struct binder_ref_data dest_rdata;
2331 2330
2332 binder_node_unlock(node); 2331 binder_node_unlock(node);
@@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2442 */ 2441 */
2443 parent_buffer = parent->buffer - 2442 parent_buffer = parent->buffer -
2444 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2443 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2445 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2444 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2446 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2445 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2447 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2446 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2448 proc->pid, thread->pid); 2447 proc->pid, thread->pid);
@@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
2508 proc->pid, thread->pid); 2507 proc->pid, thread->pid);
2509 return -EINVAL; 2508 return -EINVAL;
2510 } 2509 }
2511 parent_buffer = (u8 *)(parent->buffer - 2510 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2512 binder_alloc_get_user_buffer_offset( 2511 binder_alloc_get_user_buffer_offset(
2513 &target_proc->alloc)); 2512 &target_proc->alloc));
2514 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2513 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -2583,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
2583 return true; 2582 return true;
2584} 2583}
2585 2584
2585/**
2586 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2587 * @node: struct binder_node for which to get refs
2588 * @proc: returns @node->proc if valid
2589 * @error: if no @proc then returns BR_DEAD_REPLY
2590 *
2591 * User-space normally keeps the node alive when creating a transaction
2592 * since it has a reference to the target. The local strong ref keeps it
2593 * alive if the sending process dies before the target process processes
2594 * the transaction. If the source process is malicious or has a reference
2595 * counting bug, relying on the local strong ref can fail.
2596 *
2597 * Since user-space can cause the local strong ref to go away, we also take
2598 * a tmpref on the node to ensure it survives while we are constructing
2599 * the transaction. We also need a tmpref on the proc while we are
2600 * constructing the transaction, so we take that here as well.
2601 *
2602 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2603 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2604 * target proc has died, @error is set to BR_DEAD_REPLY
2605 */
2606static struct binder_node *binder_get_node_refs_for_txn(
2607 struct binder_node *node,
2608 struct binder_proc **procp,
2609 uint32_t *error)
2610{
2611 struct binder_node *target_node = NULL;
2612
2613 binder_node_inner_lock(node);
2614 if (node->proc) {
2615 target_node = node;
2616 binder_inc_node_nilocked(node, 1, 0, NULL);
2617 binder_inc_node_tmpref_ilocked(node);
2618 node->proc->tmp_ref++;
2619 *procp = node->proc;
2620 } else
2621 *error = BR_DEAD_REPLY;
2622 binder_node_inner_unlock(node);
2623
2624 return target_node;
2625}
2626
2586static void binder_transaction(struct binder_proc *proc, 2627static void binder_transaction(struct binder_proc *proc,
2587 struct binder_thread *thread, 2628 struct binder_thread *thread,
2588 struct binder_transaction_data *tr, int reply, 2629 struct binder_transaction_data *tr, int reply,
@@ -2686,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc,
2686 ref = binder_get_ref_olocked(proc, tr->target.handle, 2727 ref = binder_get_ref_olocked(proc, tr->target.handle,
2687 true); 2728 true);
2688 if (ref) { 2729 if (ref) {
2689 binder_inc_node(ref->node, 1, 0, NULL); 2730 target_node = binder_get_node_refs_for_txn(
2690 target_node = ref->node; 2731 ref->node, &target_proc,
2691 } 2732 &return_error);
2692 binder_proc_unlock(proc); 2733 } else {
2693 if (target_node == NULL) {
2694 binder_user_error("%d:%d got transaction to invalid handle\n", 2734 binder_user_error("%d:%d got transaction to invalid handle\n",
2695 proc->pid, thread->pid); 2735 proc->pid, thread->pid);
2696 return_error = BR_FAILED_REPLY; 2736 return_error = BR_FAILED_REPLY;
2697 return_error_param = -EINVAL;
2698 return_error_line = __LINE__;
2699 goto err_invalid_target_handle;
2700 } 2737 }
2738 binder_proc_unlock(proc);
2701 } else { 2739 } else {
2702 mutex_lock(&context->context_mgr_node_lock); 2740 mutex_lock(&context->context_mgr_node_lock);
2703 target_node = context->binder_context_mgr_node; 2741 target_node = context->binder_context_mgr_node;
2704 if (target_node == NULL) { 2742 if (target_node)
2743 target_node = binder_get_node_refs_for_txn(
2744 target_node, &target_proc,
2745 &return_error);
2746 else
2705 return_error = BR_DEAD_REPLY; 2747 return_error = BR_DEAD_REPLY;
2706 mutex_unlock(&context->context_mgr_node_lock);
2707 return_error_line = __LINE__;
2708 goto err_no_context_mgr_node;
2709 }
2710 binder_inc_node(target_node, 1, 0, NULL);
2711 mutex_unlock(&context->context_mgr_node_lock); 2748 mutex_unlock(&context->context_mgr_node_lock);
2712 } 2749 }
2713 e->to_node = target_node->debug_id; 2750 if (!target_node) {
2714 binder_node_lock(target_node); 2751 /*
2715 target_proc = target_node->proc; 2752 * return_error is set above
2716 if (target_proc == NULL) { 2753 */
2717 binder_node_unlock(target_node); 2754 return_error_param = -EINVAL;
2718 return_error = BR_DEAD_REPLY;
2719 return_error_line = __LINE__; 2755 return_error_line = __LINE__;
2720 goto err_dead_binder; 2756 goto err_dead_binder;
2721 } 2757 }
2722 binder_inner_proc_lock(target_proc); 2758 e->to_node = target_node->debug_id;
2723 target_proc->tmp_ref++;
2724 binder_inner_proc_unlock(target_proc);
2725 binder_node_unlock(target_node);
2726 if (security_binder_transaction(proc->tsk, 2759 if (security_binder_transaction(proc->tsk,
2727 target_proc->tsk) < 0) { 2760 target_proc->tsk) < 0) {
2728 return_error = BR_FAILED_REPLY; 2761 return_error = BR_FAILED_REPLY;
@@ -3072,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc,
3072 if (target_thread) 3105 if (target_thread)
3073 binder_thread_dec_tmpref(target_thread); 3106 binder_thread_dec_tmpref(target_thread);
3074 binder_proc_dec_tmpref(target_proc); 3107 binder_proc_dec_tmpref(target_proc);
3108 if (target_node)
3109 binder_dec_node_tmpref(target_node);
3075 /* 3110 /*
3076 * write barrier to synchronize with initialization 3111 * write barrier to synchronize with initialization
3077 * of log entry 3112 * of log entry
@@ -3083,6 +3118,7 @@ static void binder_transaction(struct binder_proc *proc,
3083err_dead_proc_or_thread: 3118err_dead_proc_or_thread:
3084 return_error = BR_DEAD_REPLY; 3119 return_error = BR_DEAD_REPLY;
3085 return_error_line = __LINE__; 3120 return_error_line = __LINE__;
3121 binder_dequeue_work(proc, tcomplete);
3086err_translate_failed: 3122err_translate_failed:
3087err_bad_object_type: 3123err_bad_object_type:
3088err_bad_offset: 3124err_bad_offset:
@@ -3090,6 +3126,8 @@ err_bad_parent:
3090err_copy_data_failed: 3126err_copy_data_failed:
3091 trace_binder_transaction_failed_buffer_release(t->buffer); 3127 trace_binder_transaction_failed_buffer_release(t->buffer);
3092 binder_transaction_buffer_release(target_proc, t->buffer, offp); 3128 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3129 if (target_node)
3130 binder_dec_node_tmpref(target_node);
3093 target_node = NULL; 3131 target_node = NULL;
3094 t->buffer->transaction = NULL; 3132 t->buffer->transaction = NULL;
3095 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3133 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3104,13 +3142,14 @@ err_bad_call_stack:
3104err_empty_call_stack: 3142err_empty_call_stack:
3105err_dead_binder: 3143err_dead_binder:
3106err_invalid_target_handle: 3144err_invalid_target_handle:
3107err_no_context_mgr_node:
3108 if (target_thread) 3145 if (target_thread)
3109 binder_thread_dec_tmpref(target_thread); 3146 binder_thread_dec_tmpref(target_thread);
3110 if (target_proc) 3147 if (target_proc)
3111 binder_proc_dec_tmpref(target_proc); 3148 binder_proc_dec_tmpref(target_proc);
3112 if (target_node) 3149 if (target_node) {
3113 binder_dec_node(target_node, 1, 0); 3150 binder_dec_node(target_node, 1, 0);
3151 binder_dec_node_tmpref(target_node);
3152 }
3114 3153
3115 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 3154 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3116 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", 3155 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@@ -3623,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
3623 } 3662 }
3624} 3663}
3625 3664
3626static int binder_has_thread_work(struct binder_thread *thread)
3627{
3628 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3629 thread->looper_need_return;
3630}
3631
3632static int binder_put_node_cmd(struct binder_proc *proc, 3665static int binder_put_node_cmd(struct binder_proc *proc,
3633 struct binder_thread *thread, 3666 struct binder_thread *thread,
3634 void __user **ptrp, 3667 void __user **ptrp,
@@ -4258,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
4258 4291
4259 binder_inner_proc_unlock(thread->proc); 4292 binder_inner_proc_unlock(thread->proc);
4260 4293
4261 if (binder_has_work(thread, wait_for_proc_work))
4262 return POLLIN;
4263
4264 poll_wait(filp, &thread->wait, wait); 4294 poll_wait(filp, &thread->wait, wait);
4265 4295
4266 if (binder_has_thread_work(thread)) 4296 if (binder_has_work(thread, wait_for_proc_work))
4267 return POLLIN; 4297 return POLLIN;
4268 4298
4269 return 0; 4299 return 0;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 8fe165844e47..c2819a3d58a6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
215 } 215 }
216 } 216 }
217 217
218 if (!vma && need_mm) 218 if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
219 mm = get_task_mm(alloc->tsk); 219 mm = alloc->vma_vm_mm;
220 220
221 if (mm) { 221 if (mm) {
222 down_write(&mm->mmap_sem); 222 down_write(&mm->mmap_sem);
223 vma = alloc->vma; 223 vma = alloc->vma;
224 if (vma && mm != alloc->vma_vm_mm) {
225 pr_err("%d: vma mm and task mm mismatch\n",
226 alloc->pid);
227 vma = NULL;
228 }
229 } 224 }
230 225
231 if (!vma && need_mm) { 226 if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
565 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, 560 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
566 "%d: merge free, buffer %pK do not share page with %pK or %pK\n", 561 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
567 alloc->pid, buffer->data, 562 alloc->pid, buffer->data,
568 prev->data, next->data); 563 prev->data, next ? next->data : NULL);
569 binder_update_page_range(alloc, 0, buffer_start_page(buffer), 564 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
570 buffer_start_page(buffer) + PAGE_SIZE, 565 buffer_start_page(buffer) + PAGE_SIZE,
571 NULL); 566 NULL);
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
720 barrier(); 715 barrier();
721 alloc->vma = vma; 716 alloc->vma = vma;
722 alloc->vma_vm_mm = vma->vm_mm; 717 alloc->vma_vm_mm = vma->vm_mm;
718 mmgrab(alloc->vma_vm_mm);
723 719
724 return 0; 720 return 0;
725 721
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
795 vfree(alloc->buffer); 791 vfree(alloc->buffer);
796 } 792 }
797 mutex_unlock(&alloc->mutex); 793 mutex_unlock(&alloc->mutex);
794 if (alloc->vma_vm_mm)
795 mmdrop(alloc->vma_vm_mm);
798 796
799 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 797 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
800 "%s: %d buffers %d, pages %d\n", 798 "%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
889void binder_alloc_vma_close(struct binder_alloc *alloc) 887void binder_alloc_vma_close(struct binder_alloc *alloc)
890{ 888{
891 WRITE_ONCE(alloc->vma, NULL); 889 WRITE_ONCE(alloc->vma, NULL);
892 WRITE_ONCE(alloc->vma_vm_mm, NULL);
893} 890}
894 891
895/** 892/**
@@ -913,6 +910,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
913 struct binder_alloc *alloc; 910 struct binder_alloc *alloc;
914 uintptr_t page_addr; 911 uintptr_t page_addr;
915 size_t index; 912 size_t index;
913 struct vm_area_struct *vma;
916 914
917 alloc = page->alloc; 915 alloc = page->alloc;
918 if (!mutex_trylock(&alloc->mutex)) 916 if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +921,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
923 921
924 index = page - alloc->pages; 922 index = page - alloc->pages;
925 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 923 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
926 if (alloc->vma) { 924 vma = alloc->vma;
927 mm = get_task_mm(alloc->tsk); 925 if (vma) {
928 if (!mm) 926 if (!mmget_not_zero(alloc->vma_vm_mm))
929 goto err_get_task_mm_failed; 927 goto err_mmget;
928 mm = alloc->vma_vm_mm;
930 if (!down_write_trylock(&mm->mmap_sem)) 929 if (!down_write_trylock(&mm->mmap_sem))
931 goto err_down_write_mmap_sem_failed; 930 goto err_down_write_mmap_sem_failed;
931 }
932
933 list_lru_isolate(lru, item);
934 spin_unlock(lock);
932 935
936 if (vma) {
933 trace_binder_unmap_user_start(alloc, index); 937 trace_binder_unmap_user_start(alloc, index);
934 938
935 zap_page_range(alloc->vma, 939 zap_page_range(vma,
936 page_addr + alloc->user_buffer_offset, 940 page_addr + alloc->user_buffer_offset,
937 PAGE_SIZE); 941 PAGE_SIZE);
938 942
@@ -950,14 +954,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
950 954
951 trace_binder_unmap_kernel_end(alloc, index); 955 trace_binder_unmap_kernel_end(alloc, index);
952 956
953 list_lru_isolate(lru, item); 957 spin_lock(lock);
954
955 mutex_unlock(&alloc->mutex); 958 mutex_unlock(&alloc->mutex);
956 return LRU_REMOVED; 959 return LRU_REMOVED_RETRY;
957 960
958err_down_write_mmap_sem_failed: 961err_down_write_mmap_sem_failed:
959 mmput(mm); 962 mmput_async(mm);
960err_get_task_mm_failed: 963err_mmget:
961err_page_already_freed: 964err_page_already_freed:
962 mutex_unlock(&alloc->mutex); 965 mutex_unlock(&alloc->mutex);
963err_get_alloc_mutex_failed: 966err_get_alloc_mutex_failed:
@@ -996,7 +999,6 @@ struct shrinker binder_shrinker = {
996 */ 999 */
997void binder_alloc_init(struct binder_alloc *alloc) 1000void binder_alloc_init(struct binder_alloc *alloc)
998{ 1001{
999 alloc->tsk = current->group_leader;
1000 alloc->pid = current->group_leader->pid; 1002 alloc->pid = current->group_leader->pid;
1001 mutex_init(&alloc->mutex); 1003 mutex_init(&alloc->mutex);
1002 INIT_LIST_HEAD(&alloc->buffers); 1004 INIT_LIST_HEAD(&alloc->buffers);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index a3a3602c689c..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,6 @@ struct binder_lru_page {
100 */ 100 */
101struct binder_alloc { 101struct binder_alloc {
102 struct mutex mutex; 102 struct mutex mutex;
103 struct task_struct *tsk;
104 struct vm_area_struct *vma; 103 struct vm_area_struct *vma;
105 struct mm_struct *vma_vm_mm; 104 struct mm_struct *vma_vm_mm;
106 void *buffer; 105 void *buffer;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cb9b0e9090e3..9f78bb03bb76 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
621static int ahci_pci_reset_controller(struct ata_host *host) 621static int ahci_pci_reset_controller(struct ata_host *host)
622{ 622{
623 struct pci_dev *pdev = to_pci_dev(host->dev); 623 struct pci_dev *pdev = to_pci_dev(host->dev);
624 int rc;
624 625
625 ahci_reset_controller(host); 626 rc = ahci_reset_controller(host);
627 if (rc)
628 return rc;
626 629
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 630 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 struct ahci_host_priv *hpriv = host->private_data; 631 struct ahci_host_priv *hpriv = host->private_data;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 8401c3b5be92..b702c20fbc2b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = {
492 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ 492 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
493 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 493 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
494 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 494 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
495 { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
495 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 496 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
496 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ 497 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
497 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ 498 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1945a8ea2099..ee4c1ec9dca0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = {
3234}; 3234};
3235 3235
3236#define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3236#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3237#define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3237#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
3238 3238
3239static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3239static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3240{ 3240{
3241 q->setup = EZ(t->setup * 1000, T); 3241 q->setup = EZ(t->setup, T);
3242 q->act8b = EZ(t->act8b * 1000, T); 3242 q->act8b = EZ(t->act8b, T);
3243 q->rec8b = EZ(t->rec8b * 1000, T); 3243 q->rec8b = EZ(t->rec8b, T);
3244 q->cyc8b = EZ(t->cyc8b * 1000, T); 3244 q->cyc8b = EZ(t->cyc8b, T);
3245 q->active = EZ(t->active * 1000, T); 3245 q->active = EZ(t->active, T);
3246 q->recover = EZ(t->recover * 1000, T); 3246 q->recover = EZ(t->recover, T);
3247 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3247 q->dmack_hold = EZ(t->dmack_hold, T);
3248 q->cycle = EZ(t->cycle * 1000, T); 3248 q->cycle = EZ(t->cycle, T);
3249 q->udma = EZ(t->udma * 1000, UT); 3249 q->udma = EZ(t->udma, UT);
3250} 3250}
3251 3251
3252void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3252void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index cfeb049a01ef..642afd88870b 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
647static int charlcd_open(struct inode *inode, struct file *file) 647static int charlcd_open(struct inode *inode, struct file *file)
648{ 648{
649 struct charlcd_priv *priv = to_priv(the_charlcd); 649 struct charlcd_priv *priv = to_priv(the_charlcd);
650 int ret;
650 651
652 ret = -EBUSY;
651 if (!atomic_dec_and_test(&charlcd_available)) 653 if (!atomic_dec_and_test(&charlcd_available))
652 return -EBUSY; /* open only once at a time */ 654 goto fail; /* open only once at a time */
653 655
656 ret = -EPERM;
654 if (file->f_mode & FMODE_READ) /* device is write-only */ 657 if (file->f_mode & FMODE_READ) /* device is write-only */
655 return -EPERM; 658 goto fail;
656 659
657 if (priv->must_clear) { 660 if (priv->must_clear) {
658 charlcd_clear_display(&priv->lcd); 661 charlcd_clear_display(&priv->lcd);
659 priv->must_clear = false; 662 priv->must_clear = false;
660 } 663 }
661 return nonseekable_open(inode, file); 664 return nonseekable_open(inode, file);
665
666 fail:
667 atomic_inc(&charlcd_available);
668 return ret;
662} 669}
663 670
664static int charlcd_release(struct inode *inode, struct file *file) 671static int charlcd_release(struct inode *inode, struct file *file)
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index df126dcdaf18..6911acd896d9 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
1105 1105
1106static int keypad_open(struct inode *inode, struct file *file) 1106static int keypad_open(struct inode *inode, struct file *file)
1107{ 1107{
1108 int ret;
1109
1110 ret = -EBUSY;
1108 if (!atomic_dec_and_test(&keypad_available)) 1111 if (!atomic_dec_and_test(&keypad_available))
1109 return -EBUSY; /* open only once at a time */ 1112 goto fail; /* open only once at a time */
1110 1113
1114 ret = -EPERM;
1111 if (file->f_mode & FMODE_WRITE) /* device is read-only */ 1115 if (file->f_mode & FMODE_WRITE) /* device is read-only */
1112 return -EPERM; 1116 goto fail;
1113 1117
1114 keypad_buflen = 0; /* flush the buffer on opening */ 1118 keypad_buflen = 0; /* flush the buffer on opening */
1115 return 0; 1119 return 0;
1120 fail:
1121 atomic_inc(&keypad_available);
1122 return ret;
1116} 1123}
1117 1124
1118static int keypad_release(struct inode *inode, struct file *file) 1125static int keypad_release(struct inode *inode, struct file *file)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 41be9ff7d70a..6df7d6676a48 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
166} 166}
167 167
168#ifdef CONFIG_CPU_FREQ 168#ifdef CONFIG_CPU_FREQ
169static cpumask_var_t cpus_to_visit; 169static cpumask_var_t cpus_to_visit __initdata;
170static void parsing_done_workfn(struct work_struct *work); 170static void __init parsing_done_workfn(struct work_struct *work);
171static DECLARE_WORK(parsing_done_work, parsing_done_workfn); 171static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
172 172
173static int 173static int __init
174init_cpu_capacity_callback(struct notifier_block *nb, 174init_cpu_capacity_callback(struct notifier_block *nb,
175 unsigned long val, 175 unsigned long val,
176 void *data) 176 void *data)
@@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
206 return 0; 206 return 0;
207} 207}
208 208
209static struct notifier_block init_cpu_capacity_notifier = { 209static struct notifier_block init_cpu_capacity_notifier __initdata = {
210 .notifier_call = init_cpu_capacity_callback, 210 .notifier_call = init_cpu_capacity_callback,
211}; 211};
212 212
@@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void)
232} 232}
233core_initcall(register_cpufreq_notifier); 233core_initcall(register_cpufreq_notifier);
234 234
235static void parsing_done_workfn(struct work_struct *work) 235static void __init parsing_done_workfn(struct work_struct *work)
236{ 236{
237 cpufreq_unregister_notifier(&init_cpu_capacity_notifier, 237 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
238 CPUFREQ_POLICY_NOTIFIER); 238 CPUFREQ_POLICY_NOTIFIER);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3855902f2c5b..aae2402f3791 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {
27 27
28static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) 28static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
29{ 29{
30 ssize_t n;
31 cpumask_var_t mask;
30 struct node *node_dev = to_node(dev); 32 struct node *node_dev = to_node(dev);
31 const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
32 33
33 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ 34 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
34 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); 35 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
35 36
36 return cpumap_print_to_pagebuf(list, buf, mask); 37 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
38 return 0;
39
40 cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
41 n = cpumap_print_to_pagebuf(list, buf, mask);
42 free_cpumask_var(mask);
43
44 return n;
37} 45}
38 46
39static inline ssize_t node_read_cpumask(struct device *dev, 47static inline ssize_t node_read_cpumask(struct device *dev,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index d1bd99271066..9045c5f3734e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
868 struct platform_device *pdev = to_platform_device(dev); 868 struct platform_device *pdev = to_platform_device(dev);
869 char *driver_override, *old, *cp; 869 char *driver_override, *old, *cp;
870 870
871 if (count > PATH_MAX) 871 /* We need to keep extra room for a newline */
872 if (count >= (PAGE_SIZE - 1))
872 return -EINVAL; 873 return -EINVAL;
873 874
874 driver_override = kstrndup(buf, count, GFP_KERNEL); 875 driver_override = kstrndup(buf, count, GFP_KERNEL);
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index a8cc14fd8ae4..a6de32530693 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1581 1581
1582 opp->available = availability_req; 1582 opp->available = availability_req;
1583 1583
1584 dev_pm_opp_get(opp);
1585 mutex_unlock(&opp_table->lock);
1586
1584 /* Notify the change of the OPP availability */ 1587 /* Notify the change of the OPP availability */
1585 if (availability_req) 1588 if (availability_req)
1586 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 1589 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1589 blocking_notifier_call_chain(&opp_table->head, 1592 blocking_notifier_call_chain(&opp_table->head,
1590 OPP_EVENT_DISABLE, opp); 1593 OPP_EVENT_DISABLE, opp);
1591 1594
1595 dev_pm_opp_put(opp);
1596 goto put_table;
1597
1592unlock: 1598unlock:
1593 mutex_unlock(&opp_table->lock); 1599 mutex_unlock(&opp_table->lock);
1600put_table:
1594 dev_pm_opp_put_opp_table(opp_table); 1601 dev_pm_opp_put_opp_table(opp_table);
1595 return r; 1602 return r;
1596} 1603}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index d0b65bbe7e15..7ed99c1b2a8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -21,6 +21,7 @@
21#include <linux/phy.h> 21#include <linux/phy.h>
22 22
23struct property_set { 23struct property_set {
24 struct device *dev;
24 struct fwnode_handle fwnode; 25 struct fwnode_handle fwnode;
25 const struct property_entry *properties; 26 const struct property_entry *properties;
26}; 27};
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string);
682 * Caller is responsible to call fwnode_handle_put() on the returned 683 * Caller is responsible to call fwnode_handle_put() on the returned
683 * args->fwnode pointer. 684 * args->fwnode pointer.
684 * 685 *
686 * Returns: %0 on success
687 * %-ENOENT when the index is out of bounds, the index has an empty
688 * reference or the property was not found
689 * %-EINVAL on parse error
685 */ 690 */
686int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, 691int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
687 const char *prop, const char *nargs_prop, 692 const char *prop, const char *nargs_prop,
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
891void device_remove_properties(struct device *dev) 896void device_remove_properties(struct device *dev)
892{ 897{
893 struct fwnode_handle *fwnode; 898 struct fwnode_handle *fwnode;
899 struct property_set *pset;
894 900
895 fwnode = dev_fwnode(dev); 901 fwnode = dev_fwnode(dev);
896 if (!fwnode) 902 if (!fwnode)
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev)
900 * the pset. If there is no real firmware node (ACPI/DT) primary 906 * the pset. If there is no real firmware node (ACPI/DT) primary
901 * will hold the pset. 907 * will hold the pset.
902 */ 908 */
903 if (is_pset_node(fwnode)) { 909 pset = to_pset_node(fwnode);
910 if (pset) {
904 set_primary_fwnode(dev, NULL); 911 set_primary_fwnode(dev, NULL);
905 pset_free_set(to_pset_node(fwnode));
906 } else { 912 } else {
907 fwnode = fwnode->secondary; 913 pset = to_pset_node(fwnode->secondary);
908 if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { 914 if (pset && dev == pset->dev)
909 set_secondary_fwnode(dev, NULL); 915 set_secondary_fwnode(dev, NULL);
910 pset_free_set(to_pset_node(fwnode));
911 }
912 } 916 }
917 if (pset && dev == pset->dev)
918 pset_free_set(pset);
913} 919}
914EXPORT_SYMBOL_GPL(device_remove_properties); 920EXPORT_SYMBOL_GPL(device_remove_properties);
915 921
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev,
938 944
939 p->fwnode.ops = &pset_fwnode_ops; 945 p->fwnode.ops = &pset_fwnode_ops;
940 set_secondary_fwnode(dev, &p->fwnode); 946 set_secondary_fwnode(dev, &p->fwnode);
947 p->dev = dev;
941 return 0; 948 return 0;
942} 949}
943EXPORT_SYMBOL_GPL(device_add_properties); 950EXPORT_SYMBOL_GPL(device_add_properties);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4a438b8abe27..2dfe99b328f8 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,7 +17,7 @@ if BLK_DEV
17 17
18config BLK_DEV_NULL_BLK 18config BLK_DEV_NULL_BLK
19 tristate "Null test block driver" 19 tristate "Null test block driver"
20 depends on CONFIGFS_FS 20 select CONFIGFS_FS
21 21
22config BLK_DEV_FD 22config BLK_DEV_FD
23 tristate "Normal floppy disk support" 23 tristate "Normal floppy disk support"
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bbd0d186cfc0..2d7178f7754e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
342 342
343 if (!brd) 343 if (!brd)
344 return -ENODEV; 344 return -ENODEV;
345 page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); 345 page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
346 if (!page) 346 if (!page)
347 return -ENOSPC; 347 return -ENOSPC;
348 *kaddr = page_address(page); 348 *kaddr = page_address(page);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index f68c1d50802f..1f3956702993 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -67,10 +67,8 @@ struct loop_device {
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq; 69 struct request *rq;
70 union { 70 bool use_aio; /* use AIO interface to handle I/O */
71 bool use_aio; /* use AIO interface to handle I/O */ 71 atomic_t ref; /* only for aio */
72 atomic_t ref; /* only for aio */
73 };
74 long ret; 72 long ret;
75 struct kiocb iocb; 73 struct kiocb iocb;
76 struct bio_vec *bvec; 74 struct bio_vec *bvec;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2aa87cbdede0..9adfb5445f8d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
243 struct nbd_config *config = nbd->config; 243 struct nbd_config *config = nbd->config;
244 config->blksize = blocksize; 244 config->blksize = blocksize;
245 config->bytesize = blocksize * nr_blocks; 245 config->bytesize = blocksize * nr_blocks;
246 nbd_size_update(nbd);
247} 246}
248 247
249static void nbd_complete_rq(struct request *req) 248static void nbd_complete_rq(struct request *req)
@@ -387,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
387 return result; 386 return result;
388} 387}
389 388
389/*
390 * Different settings for sk->sk_sndtimeo can result in different return values
391 * if there is a signal pending when we enter sendmsg, because reasons?
392 */
393static inline int was_interrupted(int result)
394{
395 return result == -ERESTARTSYS || result == -EINTR;
396}
397
390/* always call with the tx_lock held */ 398/* always call with the tx_lock held */
391static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 399static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
392{ 400{
@@ -459,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
459 result = sock_xmit(nbd, index, 1, &from, 467 result = sock_xmit(nbd, index, 1, &from,
460 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 468 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
461 if (result <= 0) { 469 if (result <= 0) {
462 if (result == -ERESTARTSYS) { 470 if (was_interrupted(result)) {
463 /* If we havne't sent anything we can just return BUSY, 471 /* If we havne't sent anything we can just return BUSY,
464 * however if we have sent something we need to make 472 * however if we have sent something we need to make
465 * sure we only allow this req to be sent until we are 473 * sure we only allow this req to be sent until we are
@@ -503,7 +511,7 @@ send_pages:
503 } 511 }
504 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 512 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
505 if (result <= 0) { 513 if (result <= 0) {
506 if (result == -ERESTARTSYS) { 514 if (was_interrupted(result)) {
507 /* We've already sent the header, we 515 /* We've already sent the header, we
508 * have no choice but to set pending and 516 * have no choice but to set pending and
509 * return BUSY. 517 * return BUSY.
@@ -820,9 +828,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
820 * appropriate. 828 * appropriate.
821 */ 829 */
822 ret = nbd_handle_cmd(cmd, hctx->queue_num); 830 ret = nbd_handle_cmd(cmd, hctx->queue_num);
831 if (ret < 0)
832 ret = BLK_STS_IOERR;
833 else if (!ret)
834 ret = BLK_STS_OK;
823 complete(&cmd->send_complete); 835 complete(&cmd->send_complete);
824 836
825 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; 837 return ret;
826} 838}
827 839
828static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 840static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -1090,6 +1102,7 @@ static int nbd_start_device(struct nbd_device *nbd)
1090 args->index = i; 1102 args->index = i;
1091 queue_work(recv_workqueue, &args->work); 1103 queue_work(recv_workqueue, &args->work);
1092 } 1104 }
1105 nbd_size_update(nbd);
1093 return error; 1106 return error;
1094} 1107}
1095 1108
@@ -1194,6 +1207,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1194 if (!capable(CAP_SYS_ADMIN)) 1207 if (!capable(CAP_SYS_ADMIN))
1195 return -EPERM; 1208 return -EPERM;
1196 1209
1210 /* The block layer will pass back some non-nbd ioctls in case we have
1211 * special handling for them, but we don't so just return an error.
1212 */
1213 if (_IOC_TYPE(cmd) != 0xab)
1214 return -EINVAL;
1215
1197 mutex_lock(&nbd->config_lock); 1216 mutex_lock(&nbd->config_lock);
1198 1217
1199 /* Don't allow ioctl operations on a nbd device that was created with 1218 /* Don't allow ioctl operations on a nbd device that was created with
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7cedb4295e9d..64d0fc17c174 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2604 return NULL; 2604 return NULL;
2605 *dma_handle = dma_map_single(dev, buf, s->size, dir); 2605 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2606 if (dma_mapping_error(dev, *dma_handle)) { 2606 if (dma_mapping_error(dev, *dma_handle)) {
2607 kfree(buf); 2607 kmem_cache_free(s, buf);
2608 buf = NULL; 2608 buf = NULL;
2609 } 2609 }
2610 return buf; 2610 return buf;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 34e17ee799be..68846897d213 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0); 593 return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
594} 594}
595 595
596#ifdef CONFIG_VIRTIO_BLK_SCSI
597static void virtblk_initialize_rq(struct request *req)
598{
599 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
600
601 scsi_req_init(&vbr->sreq);
602}
603#endif
604
596static const struct blk_mq_ops virtio_mq_ops = { 605static const struct blk_mq_ops virtio_mq_ops = {
597 .queue_rq = virtio_queue_rq, 606 .queue_rq = virtio_queue_rq,
598 .complete = virtblk_request_done, 607 .complete = virtblk_request_done,
599 .init_request = virtblk_init_request, 608 .init_request = virtblk_init_request,
609#ifdef CONFIG_VIRTIO_BLK_SCSI
610 .initialize_rq_fn = virtblk_initialize_rq,
611#endif
600 .map_queues = virtblk_map_queues, 612 .map_queues = virtblk_map_queues,
601}; 613};
602 614
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2981c27d3aae..f149d3e61234 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
766 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); 766 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
767} 767}
768 768
769static bool zram_same_page_read(struct zram *zram, u32 index,
770 struct page *page,
771 unsigned int offset, unsigned int len)
772{
773 zram_slot_lock(zram, index);
774 if (unlikely(!zram_get_handle(zram, index) ||
775 zram_test_flag(zram, index, ZRAM_SAME))) {
776 void *mem;
777
778 zram_slot_unlock(zram, index);
779 mem = kmap_atomic(page);
780 zram_fill_page(mem + offset, len,
781 zram_get_element(zram, index));
782 kunmap_atomic(mem);
783 return true;
784 }
785 zram_slot_unlock(zram, index);
786
787 return false;
788}
789
790static void zram_meta_free(struct zram *zram, u64 disksize) 769static void zram_meta_free(struct zram *zram, u64 disksize)
791{ 770{
792 size_t num_pages = disksize >> PAGE_SHIFT; 771 size_t num_pages = disksize >> PAGE_SHIFT;
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
884 zram_slot_unlock(zram, index); 863 zram_slot_unlock(zram, index);
885 } 864 }
886 865
887 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
888 return 0;
889
890 zram_slot_lock(zram, index); 866 zram_slot_lock(zram, index);
891 handle = zram_get_handle(zram, index); 867 handle = zram_get_handle(zram, index);
868 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
869 unsigned long value;
870 void *mem;
871
872 value = handle ? zram_get_element(zram, index) : 0;
873 mem = kmap_atomic(page);
874 zram_fill_page(mem, PAGE_SIZE, value);
875 kunmap_atomic(mem);
876 zram_slot_unlock(zram, index);
877 return 0;
878 }
879
892 size = zram_get_obj_size(zram, index); 880 size = zram_get_obj_size(zram, index);
893 881
894 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); 882 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f396903184..70db4d5638a6 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
720 if (mbus->hw_io_coherency) 720 if (mbus->hw_io_coherency)
721 w->mbus_attr |= ATTR_HW_COHERENCY; 721 w->mbus_attr |= ATTR_HW_COHERENCY;
722 w->base = base & DDR_BASE_CS_LOW_MASK; 722 w->base = base & DDR_BASE_CS_LOW_MASK;
723 w->size = (size | ~DDR_SIZE_MASK) + 1; 723 w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
724 } 724 }
725 } 725 }
726 mvebu_mbus_dram_info.num_cs = cs; 726 mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index c834f5abfc49..4c10456f8a32 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -105,6 +105,7 @@ err:
105 105
106 return ret; 106 return ret;
107} 107}
108EXPORT_SYMBOL_GPL(clk_bulk_prepare);
108 109
109#endif /* CONFIG_HAVE_CLK_PREPARE */ 110#endif /* CONFIG_HAVE_CLK_PREPARE */
110 111
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 62d7854e4b87..5970a50671b9 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
315 RK2928_CLKGATE_CON(10), 8, GFLAGS), 315 RK2928_CLKGATE_CON(10), 8, GFLAGS),
316 316
317 GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, 317 GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
318 RK2928_CLKGATE_CON(10), 8, GFLAGS), 318 RK2928_CLKGATE_CON(10), 0, GFLAGS),
319 GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, 319 GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
320 RK2928_CLKGATE_CON(10), 8, GFLAGS), 320 RK2928_CLKGATE_CON(10), 1, GFLAGS),
321 GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, 321 GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
322 RK2928_CLKGATE_CON(10), 8, GFLAGS), 322 RK2928_CLKGATE_CON(10), 2, GFLAGS),
323 GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, 323 GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
324 RK2928_CLKGATE_CON(10), 8, GFLAGS), 324 RK2928_CLKGATE_CON(2), 15, GFLAGS),
325 325
326 COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, 326 COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
327 RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, 327 RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
541 GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), 541 GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
542 GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), 542 GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
543 543
544 GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS), 544 GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
545 GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), 545 GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
546 546
547 /* PD_MMC */ 547 /* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
577 "aclk_peri", 577 "aclk_peri",
578 "hclk_peri", 578 "hclk_peri",
579 "pclk_peri", 579 "pclk_peri",
580 "pclk_pmu",
581 "sclk_timer5",
580}; 582};
581 583
582static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) 584static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index e40b77583c47..d8d3cb67b402 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
294#define PLL_ENABLED (1 << 31) 294#define PLL_ENABLED (1 << 31)
295#define PLL_LOCKED (1 << 29) 295#define PLL_LOCKED (1 << 29)
296 296
297static void exynos4_clk_enable_pll(u32 reg)
298{
299 u32 pll_con = readl(reg_base + reg);
300 pll_con |= PLL_ENABLED;
301 writel(pll_con, reg_base + reg);
302
303 while (!(pll_con & PLL_LOCKED)) {
304 cpu_relax();
305 pll_con = readl(reg_base + reg);
306 }
307}
308
297static void exynos4_clk_wait_for_pll(u32 reg) 309static void exynos4_clk_wait_for_pll(u32 reg)
298{ 310{
299 u32 pll_con; 311 u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
315 samsung_clk_save(reg_base, exynos4_save_pll, 327 samsung_clk_save(reg_base, exynos4_save_pll,
316 ARRAY_SIZE(exynos4_clk_pll_regs)); 328 ARRAY_SIZE(exynos4_clk_pll_regs));
317 329
330 exynos4_clk_enable_pll(EPLL_CON0);
331 exynos4_clk_enable_pll(VPLL_CON0);
332
318 if (exynos4_soc == EXYNOS4210) { 333 if (exynos4_soc == EXYNOS4210) {
319 samsung_clk_save(reg_base, exynos4_save_soc, 334 samsung_clk_save(reg_base, exynos4_save_soc,
320 ARRAY_SIZE(exynos4210_clk_save)); 335 ARRAY_SIZE(exynos4210_clk_save));
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index a1df588343f2..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
117 /* Turn off the clock (and clear the event) */ 117 /* Turn off the clock (and clear the event) */
118 disable_timer(cs5535_event_clock); 118 disable_timer(cs5535_event_clock);
119 119
120 if (clockevent_state_shutdown(&cs5535_clockevent)) 120 if (clockevent_state_detached(&cs5535_clockevent) ||
121 clockevent_state_shutdown(&cs5535_clockevent))
121 return IRQ_HANDLED; 122 return IRQ_HANDLED;
122 123
123 /* Clear the counter */ 124 /* Clear the counter */
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c
index 6a20dc8b253f..9a7d7f0f23fe 100644
--- a/drivers/clocksource/numachip.c
+++ b/drivers/clocksource/numachip.c
@@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi
43 return 0; 43 return 0;
44} 44}
45 45
46static struct clock_event_device numachip2_clockevent = { 46static const struct clock_event_device numachip2_clockevent __initconst = {
47 .name = "numachip2", 47 .name = "numachip2",
48 .rating = 400, 48 .rating = 400,
49 .set_next_event = numachip2_set_next_event, 49 .set_next_event = numachip2_set_next_event,
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 430edadca527..a753c50e9e41 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -118,6 +118,10 @@ static const struct of_device_id blacklist[] __initconst = {
118 118
119 { .compatible = "sigma,tango4", }, 119 { .compatible = "sigma,tango4", },
120 120
121 { .compatible = "ti,am33xx", },
122 { .compatible = "ti,am43", },
123 { .compatible = "ti,dra7", },
124
121 { } 125 { }
122}; 126};
123 127
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index d9fbbf01062b..0f9754e07719 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx {
349/* The crypto framework makes it hard to avoid this global. */ 349/* The crypto framework makes it hard to avoid this global. */
350static struct device *artpec6_crypto_dev; 350static struct device *artpec6_crypto_dev;
351 351
352static struct dentry *dbgfs_root;
353
354#ifdef CONFIG_FAULT_INJECTION 352#ifdef CONFIG_FAULT_INJECTION
355static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); 353static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
356static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); 354static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 {
2984 char *desc; 2982 char *desc;
2985}; 2983};
2986 2984
2985static struct dentry *dbgfs_root;
2986
2987static void artpec6_crypto_init_debugfs(void) 2987static void artpec6_crypto_init_debugfs(void)
2988{ 2988{
2989 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); 2989 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index b585ce54a802..4835dd4a9e50 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
553{ 553{
554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); 554 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
555 struct scatterlist sg[1], *tsg; 555 struct scatterlist sg[1], *tsg;
556 int err = 0, len = 0, reg, ncp; 556 int err = 0, len = 0, reg, ncp = 0;
557 unsigned int i; 557 unsigned int i;
558 const u32 *buffer = (const u32 *)rctx->buffer; 558 u32 *buffer = (void *)rctx->buffer;
559 559
560 rctx->sg = hdev->req->src; 560 rctx->sg = hdev->req->src;
561 rctx->total = hdev->req->nbytes; 561 rctx->total = hdev->req->nbytes;
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
620 reg |= HASH_CR_DMAA; 620 reg |= HASH_CR_DMAA;
621 stm32_hash_write(hdev, HASH_CR, reg); 621 stm32_hash_write(hdev, HASH_CR, reg);
622 622
623 for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) 623 if (ncp) {
624 stm32_hash_write(hdev, HASH_DIN, buffer[i]); 624 memset(buffer + ncp, 0,
625 625 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
626 stm32_hash_set_nblw(hdev, ncp); 626 writesl(hdev->io_base + HASH_DIN, buffer,
627 DIV_ROUND_UP(ncp, sizeof(u32)));
628 }
629 stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32)));
627 reg = stm32_hash_read(hdev, HASH_STR); 630 reg = stm32_hash_read(hdev, HASH_STR);
628 reg |= HASH_STR_DCAL; 631 reg |= HASH_STR_DCAL;
629 stm32_hash_write(hdev, HASH_STR, reg); 632 stm32_hash_write(hdev, HASH_STR, reg);
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 66fb40d0ebdb..03830634e141 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -383,7 +383,7 @@ err_put_fd:
383 return err; 383 return err;
384} 384}
385 385
386static void sync_fill_fence_info(struct dma_fence *fence, 386static int sync_fill_fence_info(struct dma_fence *fence,
387 struct sync_fence_info *info) 387 struct sync_fence_info *info)
388{ 388{
389 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), 389 strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence,
399 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? 399 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
400 ktime_to_ns(fence->timestamp) : 400 ktime_to_ns(fence->timestamp) :
401 ktime_set(0, 0); 401 ktime_set(0, 0);
402
403 return info->status;
402} 404}
403 405
404static long sync_file_ioctl_fence_info(struct sync_file *sync_file, 406static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
424 * sync_fence_info and return the actual number of fences on 426 * sync_fence_info and return the actual number of fences on
425 * info->num_fences. 427 * info->num_fences.
426 */ 428 */
427 if (!info.num_fences) 429 if (!info.num_fences) {
430 info.status = dma_fence_is_signaled(sync_file->fence);
428 goto no_fences; 431 goto no_fences;
432 } else {
433 info.status = 1;
434 }
429 435
430 if (info.num_fences < num_fences) 436 if (info.num_fences < num_fences)
431 return -EINVAL; 437 return -EINVAL;
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
435 if (!fence_info) 441 if (!fence_info)
436 return -ENOMEM; 442 return -ENOMEM;
437 443
438 for (i = 0; i < num_fences; i++) 444 for (i = 0; i < num_fences; i++) {
439 sync_fill_fence_info(fences[i], &fence_info[i]); 445 int status = sync_fill_fence_info(fences[i], &fence_info[i]);
446 info.status = info.status <= 0 ? info.status : status;
447 }
440 448
441 if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, 449 if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
442 size)) { 450 size)) {
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
446 454
447no_fences: 455no_fences:
448 sync_file_get_name(sync_file, info.name, sizeof(info.name)); 456 sync_file_get_name(sync_file, info.name, sizeof(info.name));
449 info.status = dma_fence_is_signaled(sync_file->fence);
450 info.num_fences = num_fences; 457 info.num_fences = num_fences;
451 458
452 if (copy_to_user((void __user *)arg, &info, sizeof(info))) 459 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 32905d5606ac..55f9c62ee54b 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
212static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) 212static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
213{ 213{
214 struct msgdma_sw_desc *desc; 214 struct msgdma_sw_desc *desc;
215 unsigned long flags;
215 216
216 spin_lock_bh(&mdev->lock); 217 spin_lock_irqsave(&mdev->lock, flags);
217 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); 218 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
218 list_del(&desc->node); 219 list_del(&desc->node);
219 spin_unlock_bh(&mdev->lock); 220 spin_unlock_irqrestore(&mdev->lock, flags);
220 221
221 INIT_LIST_HEAD(&desc->tx_list); 222 INIT_LIST_HEAD(&desc->tx_list);
222 223
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
306 struct msgdma_device *mdev = to_mdev(tx->chan); 307 struct msgdma_device *mdev = to_mdev(tx->chan);
307 struct msgdma_sw_desc *new; 308 struct msgdma_sw_desc *new;
308 dma_cookie_t cookie; 309 dma_cookie_t cookie;
310 unsigned long flags;
309 311
310 new = tx_to_desc(tx); 312 new = tx_to_desc(tx);
311 spin_lock_bh(&mdev->lock); 313 spin_lock_irqsave(&mdev->lock, flags);
312 cookie = dma_cookie_assign(tx); 314 cookie = dma_cookie_assign(tx);
313 315
314 list_add_tail(&new->node, &mdev->pending_list); 316 list_add_tail(&new->node, &mdev->pending_list);
315 spin_unlock_bh(&mdev->lock); 317 spin_unlock_irqrestore(&mdev->lock, flags);
316 318
317 return cookie; 319 return cookie;
318} 320}
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
336 struct msgdma_extended_desc *desc; 338 struct msgdma_extended_desc *desc;
337 size_t copy; 339 size_t copy;
338 u32 desc_cnt; 340 u32 desc_cnt;
341 unsigned long irqflags;
339 342
340 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); 343 desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
341 344
342 spin_lock_bh(&mdev->lock); 345 spin_lock_irqsave(&mdev->lock, irqflags);
343 if (desc_cnt > mdev->desc_free_cnt) { 346 if (desc_cnt > mdev->desc_free_cnt) {
344 spin_unlock_bh(&mdev->lock); 347 spin_unlock_irqrestore(&mdev->lock, irqflags);
345 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 348 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
346 return NULL; 349 return NULL;
347 } 350 }
348 mdev->desc_free_cnt -= desc_cnt; 351 mdev->desc_free_cnt -= desc_cnt;
349 spin_unlock_bh(&mdev->lock); 352 spin_unlock_irqrestore(&mdev->lock, irqflags);
350 353
351 do { 354 do {
352 /* Allocate and populate the descriptor */ 355 /* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
397 u32 desc_cnt = 0, i; 400 u32 desc_cnt = 0, i;
398 struct scatterlist *sg; 401 struct scatterlist *sg;
399 u32 stride; 402 u32 stride;
403 unsigned long irqflags;
400 404
401 for_each_sg(sgl, sg, sg_len, i) 405 for_each_sg(sgl, sg, sg_len, i)
402 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); 406 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
403 407
404 spin_lock_bh(&mdev->lock); 408 spin_lock_irqsave(&mdev->lock, irqflags);
405 if (desc_cnt > mdev->desc_free_cnt) { 409 if (desc_cnt > mdev->desc_free_cnt) {
406 spin_unlock_bh(&mdev->lock); 410 spin_unlock_irqrestore(&mdev->lock, irqflags);
407 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); 411 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
408 return NULL; 412 return NULL;
409 } 413 }
410 mdev->desc_free_cnt -= desc_cnt; 414 mdev->desc_free_cnt -= desc_cnt;
411 spin_unlock_bh(&mdev->lock); 415 spin_unlock_irqrestore(&mdev->lock, irqflags);
412 416
413 avail = sg_dma_len(sgl); 417 avail = sg_dma_len(sgl);
414 418
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
566static void msgdma_issue_pending(struct dma_chan *chan) 570static void msgdma_issue_pending(struct dma_chan *chan)
567{ 571{
568 struct msgdma_device *mdev = to_mdev(chan); 572 struct msgdma_device *mdev = to_mdev(chan);
573 unsigned long flags;
569 574
570 spin_lock_bh(&mdev->lock); 575 spin_lock_irqsave(&mdev->lock, flags);
571 msgdma_start_transfer(mdev); 576 msgdma_start_transfer(mdev);
572 spin_unlock_bh(&mdev->lock); 577 spin_unlock_irqrestore(&mdev->lock, flags);
573} 578}
574 579
575/** 580/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
634static void msgdma_free_chan_resources(struct dma_chan *dchan) 639static void msgdma_free_chan_resources(struct dma_chan *dchan)
635{ 640{
636 struct msgdma_device *mdev = to_mdev(dchan); 641 struct msgdma_device *mdev = to_mdev(dchan);
642 unsigned long flags;
637 643
638 spin_lock_bh(&mdev->lock); 644 spin_lock_irqsave(&mdev->lock, flags);
639 msgdma_free_descriptors(mdev); 645 msgdma_free_descriptors(mdev);
640 spin_unlock_bh(&mdev->lock); 646 spin_unlock_irqrestore(&mdev->lock, flags);
641 kfree(mdev->sw_desq); 647 kfree(mdev->sw_desq);
642} 648}
643 649
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
682 u32 count; 688 u32 count;
683 u32 __maybe_unused size; 689 u32 __maybe_unused size;
684 u32 __maybe_unused status; 690 u32 __maybe_unused status;
691 unsigned long flags;
685 692
686 spin_lock(&mdev->lock); 693 spin_lock_irqsave(&mdev->lock, flags);
687 694
688 /* Read number of responses that are available */ 695 /* Read number of responses that are available */
689 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); 696 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
698 * bits. So we need to just drop these values. 705 * bits. So we need to just drop these values.
699 */ 706 */
700 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); 707 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
701 status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); 708 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
702 709
703 msgdma_complete_descriptor(mdev); 710 msgdma_complete_descriptor(mdev);
704 msgdma_chan_desc_cleanup(mdev); 711 msgdma_chan_desc_cleanup(mdev);
705 } 712 }
706 713
707 spin_unlock(&mdev->lock); 714 spin_unlock_irqrestore(&mdev->lock, flags);
708} 715}
709 716
710/** 717/**
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80a4815..a7ea20e7b8e9 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1143 struct edma_desc *edesc; 1143 struct edma_desc *edesc;
1144 struct device *dev = chan->device->dev; 1144 struct device *dev = chan->device->dev;
1145 struct edma_chan *echan = to_edma_chan(chan); 1145 struct edma_chan *echan = to_edma_chan(chan);
1146 unsigned int width, pset_len; 1146 unsigned int width, pset_len, array_size;
1147 1147
1148 if (unlikely(!echan || !len)) 1148 if (unlikely(!echan || !len))
1149 return NULL; 1149 return NULL;
1150 1150
1151 /* Align the array size (acnt block) with the transfer properties */
1152 switch (__ffs((src | dest | len))) {
1153 case 0:
1154 array_size = SZ_32K - 1;
1155 break;
1156 case 1:
1157 array_size = SZ_32K - 2;
1158 break;
1159 default:
1160 array_size = SZ_32K - 4;
1161 break;
1162 }
1163
1151 if (len < SZ_64K) { 1164 if (len < SZ_64K) {
1152 /* 1165 /*
1153 * Transfer size less than 64K can be handled with one paRAM 1166 * Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1169 * When the full_length is multibple of 32767 one slot can be 1182 * When the full_length is multibple of 32767 one slot can be
1170 * used to complete the transfer. 1183 * used to complete the transfer.
1171 */ 1184 */
1172 width = SZ_32K - 1; 1185 width = array_size;
1173 pset_len = rounddown(len, width); 1186 pset_len = rounddown(len, width);
1174 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1187 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1175 if (unlikely(pset_len == len)) 1188 if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1217 } 1230 }
1218 dest += pset_len; 1231 dest += pset_len;
1219 src += pset_len; 1232 src += pset_len;
1220 pset_len = width = len % (SZ_32K - 1); 1233 pset_len = width = len % array_size;
1221 1234
1222 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1235 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1223 width, pset_len, DMA_MEM_TO_MEM); 1236 width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 2f65a8fde21d..f1d04b70ee67 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
262 mutex_lock(&xbar->mutex); 262 mutex_lock(&xbar->mutex);
263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse, 263 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
264 xbar->dma_requests); 264 xbar->dma_requests);
265 mutex_unlock(&xbar->mutex);
266 if (map->xbar_out == xbar->dma_requests) { 265 if (map->xbar_out == xbar->dma_requests) {
266 mutex_unlock(&xbar->mutex);
267 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 267 dev_err(&pdev->dev, "Run out of free DMA requests\n");
268 kfree(map); 268 kfree(map);
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 } 270 }
271 set_bit(map->xbar_out, xbar->dma_inuse); 271 set_bit(map->xbar_out, xbar->dma_inuse);
272 mutex_unlock(&xbar->mutex);
272 273
273 map->xbar_in = (u16)dma_spec->args[0]; 274 map->xbar_in = (u16)dma_spec->args[0];
274 275
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 1cb2d1c070c3..a94601d5939e 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
238 238
239 efi_random_get_seed(sys_table); 239 efi_random_get_seed(sys_table);
240 240
241 if (!nokaslr()) { 241 /* hibernation expects the runtime regions to stay in the same place */
242 if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
242 /* 243 /*
243 * Randomize the base of the UEFI runtime services region. 244 * Randomize the base of the UEFI runtime services region.
244 * Preserve the 2 MB alignment of the region by taking a 245 * Preserve the 2 MB alignment of the region by taking a
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 08129b7b80ab..41c48a1e8baa 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
593 if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps))) 593 if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
594 return -EFAULT; 594 return -EFAULT;
595 595
596 if (qcaps.capsule_count == ULONG_MAX)
597 return -EINVAL;
598
596 capsules = kcalloc(qcaps.capsule_count + 1, 599 capsules = kcalloc(qcaps.capsule_count + 1,
597 sizeof(efi_capsule_header_t), GFP_KERNEL); 600 sizeof(efi_capsule_header_t), GFP_KERNEL);
598 if (!capsules) 601 if (!capsules)
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 08629ee69d11..00e73d28077c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = {
361 .write_complete = altera_cvp_write_complete, 361 .write_complete = altera_cvp_write_complete,
362}; 362};
363 363
364static ssize_t show_chkcfg(struct device_driver *dev, char *buf) 364static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
365{ 365{
366 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); 366 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
367} 367}
368 368
369static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, 369static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
370 size_t count) 370 size_t count)
371{ 371{
372 int ret; 372 int ret;
@@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
378 return count; 378 return count;
379} 379}
380 380
381static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg); 381static DRIVER_ATTR_RW(chkcfg);
382 382
383static int altera_cvp_probe(struct pci_dev *pdev, 383static int altera_cvp_probe(struct pci_dev *pdev,
384 const struct pci_device_id *dev_id); 384 const struct pci_device_id *dev_id);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3388d54ba114..3f80f167ed56 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -453,7 +453,8 @@ config GPIO_TS4800
453config GPIO_THUNDERX 453config GPIO_THUNDERX
454 tristate "Cavium ThunderX/OCTEON-TX GPIO" 454 tristate "Cavium ThunderX/OCTEON-TX GPIO"
455 depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) 455 depends on ARCH_THUNDER || (64BIT && COMPILE_TEST)
456 depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY 456 depends on PCI_MSI
457 select IRQ_DOMAIN_HIERARCHY
457 select IRQ_FASTEOI_HIERARCHY_HANDLERS 458 select IRQ_FASTEOI_HIERARCHY_HANDLERS
458 help 459 help
459 Say yes here to support the on-chip GPIO lines on the ThunderX 460 Say yes here to support the on-chip GPIO lines on the ThunderX
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dbf869fb63ce..3233b72b6828 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
518 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 518 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
519 irq_set_handler_locked(d, handle_level_irq); 519 irq_set_handler_locked(d, handle_level_irq);
520 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 520 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
521 irq_set_handler_locked(d, handle_edge_irq); 521 /*
522 * Edge IRQs are already cleared/acked in irq_handler and
523 * not need to be masked, as result handle_edge_irq()
524 * logic is excessed here and may cause lose of interrupts.
525 * So just use handle_simple_irq.
526 */
527 irq_set_handler_locked(d, handle_simple_irq);
522 528
523 return 0; 529 return 0;
524 530
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
678static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 684static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
679{ 685{
680 void __iomem *isr_reg = NULL; 686 void __iomem *isr_reg = NULL;
681 u32 isr; 687 u32 enabled, isr, level_mask;
682 unsigned int bit; 688 unsigned int bit;
683 struct gpio_bank *bank = gpiobank; 689 struct gpio_bank *bank = gpiobank;
684 unsigned long wa_lock_flags; 690 unsigned long wa_lock_flags;
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
691 pm_runtime_get_sync(bank->chip.parent); 697 pm_runtime_get_sync(bank->chip.parent);
692 698
693 while (1) { 699 while (1) {
694 u32 isr_saved, level_mask = 0;
695 u32 enabled;
696
697 raw_spin_lock_irqsave(&bank->lock, lock_flags); 700 raw_spin_lock_irqsave(&bank->lock, lock_flags);
698 701
699 enabled = omap_get_gpio_irqbank_mask(bank); 702 enabled = omap_get_gpio_irqbank_mask(bank);
700 isr_saved = isr = readl_relaxed(isr_reg) & enabled; 703 isr = readl_relaxed(isr_reg) & enabled;
701 704
702 if (bank->level_mask) 705 if (bank->level_mask)
703 level_mask = bank->level_mask & enabled; 706 level_mask = bank->level_mask & enabled;
707 else
708 level_mask = 0;
704 709
705 /* clear edge sensitive interrupts before handler(s) are 710 /* clear edge sensitive interrupts before handler(s) are
706 called so that we don't miss any interrupt occurred while 711 called so that we don't miss any interrupt occurred while
707 executing them */ 712 executing them */
708 omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); 713 if (isr & ~level_mask)
709 omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); 714 omap_clear_gpio_irqbank(bank, isr & ~level_mask);
710 omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
711 715
712 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 716 raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
713 717
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1010 1014
1011/*---------------------------------------------------------------------*/ 1015/*---------------------------------------------------------------------*/
1012 1016
1013static void __init omap_gpio_show_rev(struct gpio_bank *bank) 1017static void omap_gpio_show_rev(struct gpio_bank *bank)
1014{ 1018{
1015 static bool called; 1019 static bool called;
1016 u32 rev; 1020 u32 rev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4d2113530735..eb4528c87c0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
203 203
204 if (pin <= 255) { 204 if (pin <= 255) {
205 char ev_name[5]; 205 char ev_name[5];
206 sprintf(ev_name, "_%c%02X", 206 sprintf(ev_name, "_%c%02hhX",
207 agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', 207 agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
208 pin); 208 pin);
209 if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) 209 if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28a34d9..bc746131987f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
834 placement.busy_placement = &placements; 834 placement.busy_placement = &placements;
835 placements.fpfn = 0; 835 placements.fpfn = 0;
836 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; 836 placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
837 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 837 placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
838 838
839 r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); 839 r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
840 if (unlikely(r)) 840 if (unlikely(r))
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d228f5a99044..dbbe986f90f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
636 NUM_BANKS(ADDR_SURF_2_BANK); 636 NUM_BANKS(ADDR_SURF_2_BANK);
637 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 637 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
638 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); 638 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
639 } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { 639 } else if (adev->asic_type == CHIP_OLAND) {
640 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
641 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
642 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
643 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
644 NUM_BANKS(ADDR_SURF_16_BANK) |
645 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
646 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
647 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
648 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
649 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
650 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
651 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
652 NUM_BANKS(ADDR_SURF_16_BANK) |
653 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
654 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
655 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
656 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
657 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
658 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
659 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
660 NUM_BANKS(ADDR_SURF_16_BANK) |
661 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
662 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
663 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
664 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
665 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
666 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
667 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
668 NUM_BANKS(ADDR_SURF_16_BANK) |
669 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
672 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
673 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
674 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
675 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
676 NUM_BANKS(ADDR_SURF_16_BANK) |
677 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
678 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
679 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
680 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
681 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
682 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
683 TILE_SPLIT(split_equal_to_row_size) |
684 NUM_BANKS(ADDR_SURF_16_BANK) |
685 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
686 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
687 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
688 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
689 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
690 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
691 TILE_SPLIT(split_equal_to_row_size) |
692 NUM_BANKS(ADDR_SURF_16_BANK) |
693 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
694 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
695 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
696 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
697 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
698 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
699 TILE_SPLIT(split_equal_to_row_size) |
700 NUM_BANKS(ADDR_SURF_16_BANK) |
701 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
702 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
703 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
704 tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
705 ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
706 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
707 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
708 NUM_BANKS(ADDR_SURF_16_BANK) |
709 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
712 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
713 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
714 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
715 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
716 NUM_BANKS(ADDR_SURF_16_BANK) |
717 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
718 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
719 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
720 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
721 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
722 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
723 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
724 NUM_BANKS(ADDR_SURF_16_BANK) |
725 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
726 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
727 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
728 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
729 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
730 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
732 NUM_BANKS(ADDR_SURF_16_BANK) |
733 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
734 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
735 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
736 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
737 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
738 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
739 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
740 NUM_BANKS(ADDR_SURF_16_BANK) |
741 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
742 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
743 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
744 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
745 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
746 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
747 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
748 NUM_BANKS(ADDR_SURF_16_BANK) |
749 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
750 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
751 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
752 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
753 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
754 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
755 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
756 NUM_BANKS(ADDR_SURF_16_BANK) |
757 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
758 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
759 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
760 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
761 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
762 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
763 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
764 NUM_BANKS(ADDR_SURF_16_BANK) |
765 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
766 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
767 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
768 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
769 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
770 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
771 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
772 NUM_BANKS(ADDR_SURF_16_BANK) |
773 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
774 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
775 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
776 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
777 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
778 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
779 TILE_SPLIT(split_equal_to_row_size) |
780 NUM_BANKS(ADDR_SURF_16_BANK) |
781 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
782 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
783 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
784 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
785 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
786 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
787 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
788 NUM_BANKS(ADDR_SURF_16_BANK) |
789 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
790 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
791 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
792 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
793 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
794 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
795 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
796 NUM_BANKS(ADDR_SURF_16_BANK) |
797 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
798 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
799 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
800 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
801 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
802 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
803 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
804 NUM_BANKS(ADDR_SURF_16_BANK) |
805 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
806 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
807 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
808 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
809 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
810 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
811 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
812 NUM_BANKS(ADDR_SURF_16_BANK) |
813 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
816 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
817 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
818 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
819 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
820 NUM_BANKS(ADDR_SURF_8_BANK) |
821 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
822 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
823 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
824 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
825 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
826 } else if (adev->asic_type == CHIP_HAINAN) {
640 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 827 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
641 ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 828 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
642 PIPE_CONFIG(ADDR_SURF_P2) | 829 PIPE_CONFIG(ADDR_SURF_P2) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 31db356476f8..430a6b4dfac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
225 if (r) 225 if (r)
226 return r; 226 return r;
227 227
228 /* Skip this for APU for now */ 228 return amdgpu_uvd_suspend(adev);
229 if (!(adev->flags & AMD_IS_APU))
230 r = amdgpu_uvd_suspend(adev);
231
232 return r;
233} 229}
234 230
235static int uvd_v6_0_resume(void *handle) 231static int uvd_v6_0_resume(void *handle)
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
237 int r; 233 int r;
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
239 235
240 /* Skip this for APU for now */ 236 r = amdgpu_uvd_resume(adev);
241 if (!(adev->flags & AMD_IS_APU)) { 237 if (r)
242 r = amdgpu_uvd_resume(adev); 238 return r;
243 if (r) 239
244 return r;
245 }
246 return uvd_v6_0_hw_init(adev); 240 return uvd_v6_0_hw_init(adev);
247} 241}
248 242
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index e4a8c2e52cb2..660b3fbade41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
892 int err = 0; 892 int err = 0;
893 893
894 dev = kfd_device_by_id(args->gpu_id); 894 dev = kfd_device_by_id(args->gpu_id);
895 if (!dev)
896 return -EINVAL;
895 897
896 dev->kfd2kgd->get_tile_config(dev->kgd, &config); 898 dev->kfd2kgd->get_tile_config(dev->kgd, &config);
897 899
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5979158c3f7b..944abfad39c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd,
292 struct kfd_event *ev) 292 struct kfd_event *ev)
293{ 293{
294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { 294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
295 pr_warn("Signal event wasn't created because limit was reached\n"); 295 if (!p->signal_event_limit_reached) {
296 pr_warn("Signal event wasn't created because limit was reached\n");
297 p->signal_event_limit_reached = true;
298 }
296 return -ENOMEM; 299 return -ENOMEM;
297 } 300 }
298 301
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 0649dd43e780..ed71ad40e8f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -184,7 +184,7 @@ static void uninitialize(struct kernel_queue *kq)
184 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) 184 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
185 kq->mqd->destroy_mqd(kq->mqd, 185 kq->mqd->destroy_mqd(kq->mqd,
186 kq->queue->mqd, 186 kq->queue->mqd,
187 false, 187 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
188 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, 188 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
189 kq->queue->pipe, 189 kq->queue->pipe,
190 kq->queue->queue); 190 kq->queue->queue);
@@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
210 uint32_t wptr, rptr; 210 uint32_t wptr, rptr;
211 unsigned int *queue_address; 211 unsigned int *queue_address;
212 212
213 /* When rptr == wptr, the buffer is empty.
214 * When rptr == wptr + 1, the buffer is full.
215 * It is always rptr that advances to the position of wptr, rather than
216 * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
217 */
213 rptr = *kq->rptr_kernel; 218 rptr = *kq->rptr_kernel;
214 wptr = *kq->wptr_kernel; 219 wptr = *kq->wptr_kernel;
215 queue_address = (unsigned int *)kq->pq_kernel_addr; 220 queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
219 pr_debug("wptr: %d\n", wptr); 224 pr_debug("wptr: %d\n", wptr);
220 pr_debug("queue_address 0x%p\n", queue_address); 225 pr_debug("queue_address 0x%p\n", queue_address);
221 226
222 available_size = (rptr - 1 - wptr + queue_size_dwords) % 227 available_size = (rptr + queue_size_dwords - 1 - wptr) %
223 queue_size_dwords; 228 queue_size_dwords;
224 229
225 if (packet_size_in_dwords >= queue_size_dwords || 230 if (packet_size_in_dwords > available_size) {
226 packet_size_in_dwords >= available_size) {
227 /* 231 /*
228 * make sure calling functions know 232 * make sure calling functions know
229 * acquire_packet_buffer() failed 233 * acquire_packet_buffer() failed
@@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
233 } 237 }
234 238
235 if (wptr + packet_size_in_dwords >= queue_size_dwords) { 239 if (wptr + packet_size_in_dwords >= queue_size_dwords) {
240 /* make sure after rolling back to position 0, there is
241 * still enough space.
242 */
243 if (packet_size_in_dwords >= rptr) {
244 *buffer_ptr = NULL;
245 return -ENOMEM;
246 }
247 /* fill nops, roll back and start at position 0 */
236 while (wptr > 0) { 248 while (wptr > 0) {
237 queue_address[wptr] = kq->nop_packet; 249 queue_address[wptr] = kq->nop_packet;
238 wptr = (wptr + 1) % queue_size_dwords; 250 wptr = (wptr + 1) % queue_size_dwords;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b397ec726400..b87e96cee5fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -521,6 +521,7 @@ struct kfd_process {
521 struct list_head signal_event_pages; 521 struct list_head signal_event_pages;
522 u32 next_nonsignal_event_id; 522 u32 next_nonsignal_event_id;
523 size_t signal_event_count; 523 size_t signal_event_count;
524 bool signal_event_limit_reached;
524}; 525};
525 526
526/** 527/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c2743233ba10..b526f49be65d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
830{ 830{
831 uint32_t reference_clock, tmp; 831 uint32_t reference_clock, tmp;
832 struct cgs_display_info info = {0}; 832 struct cgs_display_info info = {0};
833 struct cgs_mode_info mode_info; 833 struct cgs_mode_info mode_info = {0};
834 834
835 info.mode_info = &mode_info; 835 info.mode_info = &mode_info;
836 836
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3948 uint32_t ref_clock; 3948 uint32_t ref_clock;
3949 uint32_t refresh_rate = 0; 3949 uint32_t refresh_rate = 0;
3950 struct cgs_display_info info = {0}; 3950 struct cgs_display_info info = {0};
3951 struct cgs_mode_info mode_info; 3951 struct cgs_mode_info mode_info = {0};
3952 3952
3953 info.mode_info = &mode_info; 3953 info.mode_info = &mode_info;
3954
3955 cgs_get_active_displays_info(hwmgr->device, &info); 3954 cgs_get_active_displays_info(hwmgr->device, &info);
3956 num_active_displays = info.display_count; 3955 num_active_displays = info.display_count;
3957 3956
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
3967 frame_time_in_us = 1000000 / refresh_rate; 3966 frame_time_in_us = 1000000 / refresh_rate;
3968 3967
3969 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; 3968 pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
3969
3970 data->frame_time_x2 = frame_time_in_us * 2 / 100; 3970 data->frame_time_x2 = frame_time_in_us * 2 / 100;
3971 3971
3972 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); 3972 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 97c94f9683fa..38cea6fb25a8 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
205 struct amd_sched_entity *entity) 205 struct amd_sched_entity *entity)
206{ 206{
207 struct amd_sched_rq *rq = entity->rq; 207 struct amd_sched_rq *rq = entity->rq;
208 int r;
209 208
210 if (!amd_sched_entity_is_initialized(sched, entity)) 209 if (!amd_sched_entity_is_initialized(sched, entity))
211 return; 210 return;
211
212 /** 212 /**
213 * The client will not queue more IBs during this fini, consume existing 213 * The client will not queue more IBs during this fini, consume existing
214 * queued IBs or discard them on SIGKILL 214 * queued IBs
215 */ 215 */
216 if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) 216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
217 r = -ERESTARTSYS;
218 else
219 r = wait_event_killable(sched->job_scheduled,
220 amd_sched_entity_is_idle(entity));
221 amd_sched_rq_remove_entity(rq, entity);
222 if (r) {
223 struct amd_sched_job *job;
224 217
225 /* Park the kernel for a moment to make sure it isn't processing 218 amd_sched_rq_remove_entity(rq, entity);
226 * our enity.
227 */
228 kthread_park(sched->thread);
229 kthread_unpark(sched->thread);
230 while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
231 sched->ops->free_job(job);
232
233 }
234 kfifo_free(&entity->job_queue); 219 kfifo_free(&entity->job_queue);
235} 220}
236 221
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4e53aae9a1fb..0028591f3f95 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2960,6 +2960,7 @@ out:
2960 drm_modeset_backoff(&ctx); 2960 drm_modeset_backoff(&ctx);
2961 } 2961 }
2962 2962
2963 drm_atomic_state_put(state);
2963 drm_modeset_drop_locks(&ctx); 2964 drm_modeset_drop_locks(&ctx);
2964 drm_modeset_acquire_fini(&ctx); 2965 drm_modeset_acquire_fini(&ctx);
2965 2966
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5a634594a6ce..57881167ccd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
551void etnaviv_gem_free_object(struct drm_gem_object *obj) 551void etnaviv_gem_free_object(struct drm_gem_object *obj)
552{ 552{
553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
554 struct etnaviv_drm_private *priv = obj->dev->dev_private;
554 struct etnaviv_vram_mapping *mapping, *tmp; 555 struct etnaviv_vram_mapping *mapping, *tmp;
555 556
556 /* object should not be active */ 557 /* object should not be active */
557 WARN_ON(is_active(etnaviv_obj)); 558 WARN_ON(is_active(etnaviv_obj));
558 559
560 mutex_lock(&priv->gem_lock);
559 list_del(&etnaviv_obj->gem_node); 561 list_del(&etnaviv_obj->gem_node);
562 mutex_unlock(&priv->gem_lock);
560 563
561 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, 564 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
562 obj_node) { 565 obj_node) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 026ef4e02f85..46dfe0737f43 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
445 cmdbuf->user_size = ALIGN(args->stream_size, 8); 445 cmdbuf->user_size = ALIGN(args->stream_size, 8);
446 446
447 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); 447 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
448 if (ret == 0) 448 if (ret)
449 cmdbuf = NULL; 449 goto out;
450
451 cmdbuf = NULL;
450 452
451 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { 453 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
452 /* 454 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e651a58c18cf..82b72425a42f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
168static int exynos_drm_suspend(struct device *dev) 168static int exynos_drm_suspend(struct device *dev)
169{ 169{
170 struct drm_device *drm_dev = dev_get_drvdata(dev); 170 struct drm_device *drm_dev = dev_get_drvdata(dev);
171 struct exynos_drm_private *private = drm_dev->dev_private; 171 struct exynos_drm_private *private;
172 172
173 if (pm_runtime_suspended(dev) || !drm_dev) 173 if (pm_runtime_suspended(dev) || !drm_dev)
174 return 0; 174 return 0;
175 175
176 private = drm_dev->dev_private;
177
176 drm_kms_helper_poll_disable(drm_dev); 178 drm_kms_helper_poll_disable(drm_dev);
177 exynos_drm_fbdev_suspend(drm_dev); 179 exynos_drm_fbdev_suspend(drm_dev);
178 private->suspend_state = drm_atomic_helper_suspend(drm_dev); 180 private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
188static int exynos_drm_resume(struct device *dev) 190static int exynos_drm_resume(struct device *dev)
189{ 191{
190 struct drm_device *drm_dev = dev_get_drvdata(dev); 192 struct drm_device *drm_dev = dev_get_drvdata(dev);
191 struct exynos_drm_private *private = drm_dev->dev_private; 193 struct exynos_drm_private *private;
192 194
193 if (pm_runtime_suspended(dev) || !drm_dev) 195 if (pm_runtime_suspended(dev) || !drm_dev)
194 return 0; 196 return 0;
195 197
198 private = drm_dev->dev_private;
196 drm_atomic_helper_resume(drm_dev, private->suspend_state); 199 drm_atomic_helper_resume(drm_dev, private->suspend_state);
197 exynos_drm_fbdev_resume(drm_dev); 200 exynos_drm_fbdev_resume(drm_dev);
198 drm_kms_helper_poll_enable(drm_dev); 201 drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
427 430
428 kfree(drm->dev_private); 431 kfree(drm->dev_private);
429 drm->dev_private = NULL; 432 drm->dev_private = NULL;
433 dev_set_drvdata(dev, NULL);
430 434
431 drm_dev_unref(drm); 435 drm_dev_unref(drm);
432} 436}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21c36e256884..d4726a3358a4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2723 uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; 2723 uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2724 unsigned char *bb_start_sva; 2724 unsigned char *bb_start_sva;
2725 2725
2726 if (!wa_ctx->per_ctx.valid)
2727 return 0;
2728
2726 per_ctx_start[0] = 0x18800001; 2729 per_ctx_start[0] = 0x18800001;
2727 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; 2730 per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2728 2731
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..e5320b4eb698 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
701 CACHELINE_BYTES; 701 CACHELINE_BYTES;
702 workload->wa_ctx.per_ctx.guest_gma = 702 workload->wa_ctx.per_ctx.guest_gma =
703 per_ctx & PER_CTX_ADDR_MASK; 703 per_ctx & PER_CTX_ADDR_MASK;
704 704 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
705 WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
706 } 705 }
707 706
708 if (emulate_schedule_in) 707 if (emulate_schedule_in)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2294466dd415..a5bed2e71b92 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1429 return 0; 1429 return 0;
1430} 1430}
1431 1431
1432static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu, 1432static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1433 unsigned int offset, void *p_data, unsigned int bytes)
1434{
1435 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1436
1437 mmio_hw_access_pre(dev_priv);
1438 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1439 mmio_hw_access_post(dev_priv);
1440 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1441}
1442
1443static int instdone_mmio_read(struct intel_vgpu *vgpu,
1444 unsigned int offset, void *p_data, unsigned int bytes) 1433 unsigned int offset, void *p_data, unsigned int bytes)
1445{ 1434{
1446 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1435 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
1589 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ 1578 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
1590 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ 1579 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
1591 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ 1580 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
1581 if (HAS_BSD2(dev_priv)) \
1582 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
1592} while (0) 1583} while (0)
1593 1584
1594#define MMIO_RING_D(prefix, d) \ 1585#define MMIO_RING_D(prefix, d) \
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1635#undef RING_REG 1626#undef RING_REG
1636 1627
1637#define RING_REG(base) (base + 0x6c) 1628#define RING_REG(base) (base + 0x6c)
1638 MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL); 1629 MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
1639 MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
1640#undef RING_REG 1630#undef RING_REG
1641 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL); 1631 MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
1642 1632
1643 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); 1633 MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
1644 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); 1634 MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1648 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1638 MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1649 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1639 MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
1650 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1640 MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
1651 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1641 MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
1652 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); 1642 MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
1653 1643
1654 /* RING MODE */ 1644 /* RING MODE */
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
1662 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1652 MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
1663 NULL, NULL); 1653 NULL, NULL);
1664 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1654 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
1665 ring_timestamp_mmio_read, NULL); 1655 mmio_read_from_hw, NULL);
1666 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1656 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
1667 ring_timestamp_mmio_read, NULL); 1657 mmio_read_from_hw, NULL);
1668 1658
1669 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1659 MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
1670 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1660 MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2411 struct drm_i915_private *dev_priv = gvt->dev_priv; 2401 struct drm_i915_private *dev_priv = gvt->dev_priv;
2412 int ret; 2402 int ret;
2413 2403
2414 MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
2415 intel_vgpu_reg_imr_handler);
2416
2417 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); 2404 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
2418 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); 2405 MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
2419 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); 2406 MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2476 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2463 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
2477 intel_vgpu_reg_master_irq_handler); 2464 intel_vgpu_reg_master_irq_handler);
2478 2465
2479 MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2466 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
2480 F_CMD_ACCESS, NULL, NULL); 2467 mmio_read_from_hw, NULL);
2481 MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2482
2483 MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2484 NULL, NULL);
2485 MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2486 F_CMD_ACCESS, NULL, NULL);
2487 MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2488 MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2489 NULL, NULL);
2490 MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2491 F_CMD_ACCESS, NULL, NULL);
2492 MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2493 F_CMD_ACCESS, NULL, NULL);
2494 MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
2495 ring_mode_mmio_write);
2496 MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2497 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2498 MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
2499 F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2500 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2501 ring_timestamp_mmio_read, NULL);
2502
2503 MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2504 2468
2505#define RING_REG(base) (base + 0xd0) 2469#define RING_REG(base) (base + 0xd0)
2506 MMIO_RING_F(RING_REG, 4, F_RO, 0, 2470 MMIO_RING_F(RING_REG, 4, F_RO, 0,
2507 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, 2471 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2508 ring_reset_ctl_write); 2472 ring_reset_ctl_write);
2509 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
2510 ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
2511 ring_reset_ctl_write);
2512#undef RING_REG 2473#undef RING_REG
2513 2474
2514#define RING_REG(base) (base + 0x230) 2475#define RING_REG(base) (base + 0x230)
2515 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); 2476 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
2516 MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
2517#undef RING_REG 2477#undef RING_REG
2518 2478
2519#define RING_REG(base) (base + 0x234) 2479#define RING_REG(base) (base + 0x234)
2520 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, 2480 MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
2521 NULL, NULL); 2481 NULL, NULL);
2522 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
2523 ~0LL, D_BDW_PLUS, NULL, NULL);
2524#undef RING_REG 2482#undef RING_REG
2525 2483
2526#define RING_REG(base) (base + 0x244) 2484#define RING_REG(base) (base + 0x244)
2527 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2485 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2528 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
2529 NULL, NULL);
2530#undef RING_REG 2486#undef RING_REG
2531 2487
2532#define RING_REG(base) (base + 0x370) 2488#define RING_REG(base) (base + 0x370)
2533 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2489 MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
2534 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
2535 NULL, NULL);
2536#undef RING_REG 2490#undef RING_REG
2537 2491
2538#define RING_REG(base) (base + 0x3a0) 2492#define RING_REG(base) (base + 0x3a0)
2539 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2493 MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2540 MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
2541#undef RING_REG 2494#undef RING_REG
2542 2495
2543 MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); 2496 MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2557 2510
2558#define RING_REG(base) (base + 0x270) 2511#define RING_REG(base) (base + 0x270)
2559 MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2512 MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2560 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
2561#undef RING_REG 2513#undef RING_REG
2562 2514
2563 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2515 MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
2564 MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
2565 2516
2566 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2517 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
2567 2518
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2849 MMIO_D(0x65f08, D_SKL | D_KBL); 2800 MMIO_D(0x65f08, D_SKL | D_KBL);
2850 MMIO_D(0x320f0, D_SKL | D_KBL); 2801 MMIO_D(0x320f0, D_SKL | D_KBL);
2851 2802
2852 MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
2853 MMIO_D(0x70034, D_SKL_PLUS); 2803 MMIO_D(0x70034, D_SKL_PLUS);
2854 MMIO_D(0x71034, D_SKL_PLUS); 2804 MMIO_D(0x71034, D_SKL_PLUS);
2855 MMIO_D(0x72034, D_SKL_PLUS); 2805 MMIO_D(0x72034, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index fbd023a16f18..7d01c77a0f7a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -54,9 +54,6 @@
54 54
55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
56 56
57#define _REG_VECS_EXCC 0x1A028
58#define _REG_VCS2_EXCC 0x1c028
59
60#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) 57#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
61#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) 58#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
62 59
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
308 308
309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
310{ 310{
311 struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
312 int ring_id;
313
314 kfree(vgpu->sched_data); 311 kfree(vgpu->sched_data);
315 vgpu->sched_data = NULL; 312 vgpu->sched_data = NULL;
316
317 spin_lock_bh(&scheduler->mmio_context_lock);
318 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
319 if (scheduler->engine_owner[ring_id] == vgpu) {
320 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
321 scheduler->engine_owner[ring_id] = NULL;
322 }
323 }
324 spin_unlock_bh(&scheduler->mmio_context_lock);
325} 313}
326 314
327static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 315static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
388{ 376{
389 struct intel_gvt_workload_scheduler *scheduler = 377 struct intel_gvt_workload_scheduler *scheduler =
390 &vgpu->gvt->scheduler; 378 &vgpu->gvt->scheduler;
379 int ring_id;
391 380
392 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 381 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
393 382
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
401 scheduler->need_reschedule = true; 390 scheduler->need_reschedule = true;
402 scheduler->current_vgpu = NULL; 391 scheduler->current_vgpu = NULL;
403 } 392 }
393
394 spin_lock_bh(&scheduler->mmio_context_lock);
395 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
396 if (scheduler->engine_owner[ring_id] == vgpu) {
397 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
398 scheduler->engine_owner[ring_id] = NULL;
399 }
400 }
401 spin_unlock_bh(&scheduler->mmio_context_lock);
404} 402}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0d431a968a32..93a49eb0209e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
68struct shadow_per_ctx { 68struct shadow_per_ctx {
69 unsigned long guest_gma; 69 unsigned long guest_gma;
70 unsigned long shadow_gma; 70 unsigned long shadow_gma;
71 unsigned valid;
71}; 72};
72 73
73struct intel_shadow_wa_ctx { 74struct intel_shadow_wa_ctx {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 19404c96eeb1..32e857dc507c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2657 if (READ_ONCE(obj->mm.pages)) 2657 if (READ_ONCE(obj->mm.pages))
2658 return -ENODEV; 2658 return -ENODEV;
2659 2659
2660 if (obj->mm.madv != I915_MADV_WILLNEED)
2661 return -EFAULT;
2662
2660 /* Before the pages are instantiated the object is treated as being 2663 /* Before the pages are instantiated the object is treated as being
2661 * in the CPU domain. The pages will be clflushed as required before 2664 * in the CPU domain. The pages will be clflushed as required before
2662 * use, and we can freely write into the pages directly. If userspace 2665 * use, and we can freely write into the pages directly. If userspace
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3013 3016
3014static void nop_submit_request(struct drm_i915_gem_request *request) 3017static void nop_submit_request(struct drm_i915_gem_request *request)
3015{ 3018{
3019 unsigned long flags;
3020
3016 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); 3021 GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
3017 dma_fence_set_error(&request->fence, -EIO); 3022 dma_fence_set_error(&request->fence, -EIO);
3018 i915_gem_request_submit(request); 3023
3024 spin_lock_irqsave(&request->engine->timeline->lock, flags);
3025 __i915_gem_request_submit(request);
3019 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3026 intel_engine_init_global_seqno(request->engine, request->global_seqno);
3027 spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3020} 3028}
3021 3029
3022static void engine_set_wedged(struct intel_engine_cs *engine) 3030static void engine_set_wedged(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4df039ef2ce3..e161d383b526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,21 +33,20 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static bool ggtt_is_idle(struct drm_i915_private *dev_priv) 36static bool ggtt_is_idle(struct drm_i915_private *i915)
37{ 37{
38 struct i915_ggtt *ggtt = &dev_priv->ggtt; 38 struct intel_engine_cs *engine;
39 struct intel_engine_cs *engine; 39 enum intel_engine_id id;
40 enum intel_engine_id id;
41 40
42 for_each_engine(engine, dev_priv, id) { 41 if (i915->gt.active_requests)
43 struct intel_timeline *tl; 42 return false;
44 43
45 tl = &ggtt->base.timeline.engine[engine->id]; 44 for_each_engine(engine, i915, id) {
46 if (i915_gem_active_isset(&tl->last_request)) 45 if (engine->last_retired_context != i915->kernel_context)
47 return false; 46 return false;
48 } 47 }
49 48
50 return true; 49 return true;
51} 50}
52 51
53static int ggtt_flush(struct drm_i915_private *i915) 52static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
157 min_size, alignment, cache_level, 156 min_size, alignment, cache_level,
158 start, end, mode); 157 start, end, mode);
159 158
160 /* Retire before we search the active list. Although we have 159 /*
160 * Retire before we search the active list. Although we have
161 * reasonable accuracy in our retirement lists, we may have 161 * reasonable accuracy in our retirement lists, we may have
162 * a stray pin (preventing eviction) that can only be resolved by 162 * a stray pin (preventing eviction) that can only be resolved by
163 * retiring. 163 * retiring.
@@ -182,7 +182,8 @@ search_again:
182 BUG_ON(ret); 182 BUG_ON(ret);
183 } 183 }
184 184
185 /* Can we unpin some objects such as idle hw contents, 185 /*
186 * Can we unpin some objects such as idle hw contents,
186 * or pending flips? But since only the GGTT has global entries 187 * or pending flips? But since only the GGTT has global entries
187 * such as scanouts, rinbuffers and contexts, we can skip the 188 * such as scanouts, rinbuffers and contexts, we can skip the
188 * purge when inspecting per-process local address spaces. 189 * purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
190 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 191 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
191 return -ENOSPC; 192 return -ENOSPC;
192 193
193 if (ggtt_is_idle(dev_priv)) { 194 /*
194 /* If we still have pending pageflip completions, drop 195 * Not everything in the GGTT is tracked via VMA using
195 * back to userspace to give our workqueues time to 196 * i915_vma_move_to_active(), otherwise we could evict as required
196 * acquire our locks and unpin the old scanouts. 197 * with minimal stalling. Instead we are forced to idle the GPU and
197 */ 198 * explicitly retire outstanding requests which will then remove
198 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; 199 * the pinning for active objects such as contexts and ring,
199 } 200 * enabling us to evict them on the next iteration.
201 *
202 * To ensure that all user contexts are evictable, we perform
203 * a switch to the perma-pinned kernel context. This all also gives
204 * us a termination condition, when the last retired context is
205 * the kernel's there is no more we can evict.
206 */
207 if (!ggtt_is_idle(dev_priv)) {
208 ret = ggtt_flush(dev_priv);
209 if (ret)
210 return ret;
200 211
201 ret = ggtt_flush(dev_priv); 212 goto search_again;
202 if (ret) 213 }
203 return ret;
204 214
205 goto search_again; 215 /*
216 * If we still have pending pageflip completions, drop
217 * back to userspace to give our workqueues time to
218 * acquire our locks and unpin the old scanouts.
219 */
220 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
206 221
207found: 222found:
208 /* drm_mm doesn't allow any other other operations while 223 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 94185d610673..370b9d248fed 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
2537 .poll = i915_perf_poll, 2537 .poll = i915_perf_poll,
2538 .read = i915_perf_read, 2538 .read = i915_perf_read,
2539 .unlocked_ioctl = i915_perf_ioctl, 2539 .unlocked_ioctl = i915_perf_ioctl,
2540 /* Our ioctl have no arguments, so it's safe to use the same function
2541 * to handle 32bits compatibility.
2542 */
2543 .compat_ioctl = i915_perf_ioctl,
2540}; 2544};
2541 2545
2542 2546
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ed7cd9ee2c2a..c9bcc6c45012 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,6 +6998,7 @@ enum {
6998 */ 6998 */
6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) 6999#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) 7000#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
7001#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
7001 7002
7002#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 7003#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
7003#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 7004#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
606 connector->encoder->base.id, 606 connector->encoder->base.id,
607 connector->encoder->name); 607 connector->encoder->name);
608 608
609 /* ELD Conn_Type */
610 connector->eld[5] &= ~(3 << 2);
611 if (intel_crtc_has_dp_encoder(crtc_state))
612 connector->eld[5] |= (1 << 2);
613
614 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 609 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
615 610
616 if (dev_priv->display.audio_codec_enable) 611 if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 183e87e8ea31..5d4cd3d00564 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1163 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; 1163 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
1164 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); 1164 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
1165 1165
1166 if (port == PORT_A && is_dvi) {
1167 DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
1168 is_hdmi ? "/HDMI" : "");
1169 is_dvi = false;
1170 is_hdmi = false;
1171 }
1172
1166 info->supports_dvi = is_dvi; 1173 info->supports_dvi = is_dvi;
1167 info->supports_hdmi = is_hdmi; 1174 info->supports_hdmi = is_hdmi;
1168 info->supports_dp = is_dp; 1175 info->supports_dp = is_dp;
@@ -1233,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
1233{ 1240{
1234 enum port port; 1241 enum port port;
1235 1242
1236 if (!HAS_DDI(dev_priv)) 1243 if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1237 return; 1244 return;
1238 1245
1239 if (!dev_priv->vbt.child_dev_num) 1246 if (!dev_priv->vbt.child_dev_num)
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index ff9ecd211abb..b8315bca852b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,7 +74,7 @@
74#define I9XX_CSC_COEFF_1_0 \ 74#define I9XX_CSC_COEFF_1_0 \
75 ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) 75 ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
76 76
77static bool crtc_state_is_legacy(struct drm_crtc_state *state) 77static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
78{ 78{
79 return !state->degamma_lut && 79 return !state->degamma_lut &&
80 !state->ctm && 80 !state->ctm &&
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
288 } 288 }
289 289
290 mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); 290 mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
291 if (!crtc_state_is_legacy(state)) { 291 if (!crtc_state_is_legacy_gamma(state)) {
292 mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | 292 mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
293 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); 293 (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
294 } 294 }
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
469 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 469 struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
470 enum pipe pipe = to_intel_crtc(state->crtc)->pipe; 470 enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
471 471
472 if (crtc_state_is_legacy(state)) { 472 if (crtc_state_is_legacy_gamma(state)) {
473 haswell_load_luts(state); 473 haswell_load_luts(state);
474 return; 474 return;
475 } 475 }
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state)
529 529
530 glk_load_degamma_lut(state); 530 glk_load_degamma_lut(state);
531 531
532 if (crtc_state_is_legacy(state)) { 532 if (crtc_state_is_legacy_gamma(state)) {
533 haswell_load_luts(state); 533 haswell_load_luts(state);
534 return; 534 return;
535 } 535 }
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
551 uint32_t i, lut_size; 551 uint32_t i, lut_size;
552 uint32_t word0, word1; 552 uint32_t word0, word1;
553 553
554 if (crtc_state_is_legacy(state)) { 554 if (crtc_state_is_legacy_gamma(state)) {
555 /* Turn off degamma/gamma on CGM block. */ 555 /* Turn off degamma/gamma on CGM block. */
556 I915_WRITE(CGM_PIPE_MODE(pipe), 556 I915_WRITE(CGM_PIPE_MODE(pipe),
557 (state->ctm ? CGM_PIPE_MODE_CSC : 0)); 557 (state->ctm ? CGM_PIPE_MODE_CSC : 0));
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc,
632 return 0; 632 return 0;
633 633
634 /* 634 /*
635 * We also allow no degamma lut and a gamma lut at the legacy 635 * We also allow no degamma lut/ctm and a gamma lut at the legacy
636 * size (256 entries). 636 * size (256 entries).
637 */ 637 */
638 if (!crtc_state->degamma_lut && 638 if (crtc_state_is_legacy_gamma(crtc_state))
639 crtc_state->gamma_lut &&
640 crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
641 return 0; 639 return 0;
642 640
643 return -EINVAL; 641 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..92c1f8e166dc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
216 216
217 mask = DC_STATE_DEBUG_MASK_MEMORY_UP; 217 mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
218 218
219 if (IS_BROXTON(dev_priv)) 219 if (IS_GEN9_LP(dev_priv))
220 mask |= DC_STATE_DEBUG_MASK_CORES; 220 mask |= DC_STATE_DEBUG_MASK_CORES;
221 221
222 /* The below bit doesn't need to be cleared ever afterwards */ 222 /* The below bit doesn't need to be cleared ever afterwards */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4b4fd1f8110b..5e5fe03b638c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
664 int *n_entries) 664 int *n_entries)
665{ 665{
666 if (IS_BROADWELL(dev_priv)) { 666 if (IS_BROADWELL(dev_priv)) {
667 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 667 *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
668 return hsw_ddi_translations_fdi; 668 return bdw_ddi_translations_fdi;
669 } else if (IS_HASWELL(dev_priv)) { 669 } else if (IS_HASWELL(dev_priv)) {
670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); 670 *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
671 return hsw_ddi_translations_fdi; 671 return hsw_ddi_translations_fdi;
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1655out: 1655out:
1656 if (ret && IS_GEN9_LP(dev_priv)) { 1656 if (ret && IS_GEN9_LP(dev_priv)) {
1657 tmp = I915_READ(BXT_PHY_CTL(port)); 1657 tmp = I915_READ(BXT_PHY_CTL(port));
1658 if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | 1658 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
1659 BXT_PHY_LANE_POWERDOWN_ACK |
1659 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) 1660 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
1660 DRM_ERROR("Port %c enabled but PHY powered down? " 1661 DRM_ERROR("Port %c enabled but PHY powered down? "
1661 "(PHY_CTL %08x)\n", port_name(port), tmp); 1662 "(PHY_CTL %08x)\n", port_name(port), tmp);
@@ -2101,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
2101 * register writes. 2102 * register writes.
2102 */ 2103 */
2103 val = I915_READ(DPCLKA_CFGCR0); 2104 val = I915_READ(DPCLKA_CFGCR0);
2104 val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | 2105 val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
2105 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
2106 I915_WRITE(DPCLKA_CFGCR0, val); 2106 I915_WRITE(DPCLKA_CFGCR0, val);
2107 } else if (IS_GEN9_BC(dev_priv)) { 2107 } else if (IS_GEN9_BC(dev_priv)) {
2108 /* DDI -> PLL mapping */ 2108 /* DDI -> PLL mapping */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 00cd17c76fdc..5c7828c52d12 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10245{ 10245{
10246 struct drm_i915_private *dev_priv = to_i915(dev); 10246 struct drm_i915_private *dev_priv = to_i915(dev);
10247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10248 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10248 enum transcoder cpu_transcoder;
10249 struct drm_display_mode *mode; 10249 struct drm_display_mode *mode;
10250 struct intel_crtc_state *pipe_config; 10250 struct intel_crtc_state *pipe_config;
10251 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10251 u32 htot, hsync, vtot, vsync;
10252 int hsync = I915_READ(HSYNC(cpu_transcoder));
10253 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10254 int vsync = I915_READ(VSYNC(cpu_transcoder));
10255 enum pipe pipe = intel_crtc->pipe; 10252 enum pipe pipe = intel_crtc->pipe;
10256 10253
10257 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10254 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10279 i9xx_crtc_clock_get(intel_crtc, pipe_config); 10276 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10280 10277
10281 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 10278 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10279
10280 cpu_transcoder = pipe_config->cpu_transcoder;
10281 htot = I915_READ(HTOTAL(cpu_transcoder));
10282 hsync = I915_READ(HSYNC(cpu_transcoder));
10283 vtot = I915_READ(VTOTAL(cpu_transcoder));
10284 vsync = I915_READ(VSYNC(cpu_transcoder));
10285
10282 mode->hdisplay = (htot & 0xffff) + 1; 10286 mode->hdisplay = (htot & 0xffff) + 1;
10283 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10287 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10284 mode->hsync_start = (hsync & 0xffff) + 1; 10288 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -12359,7 +12363,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12359 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12363 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12360 struct drm_crtc *crtc; 12364 struct drm_crtc *crtc;
12361 struct intel_crtc_state *intel_cstate; 12365 struct intel_crtc_state *intel_cstate;
12362 bool hw_check = intel_state->modeset;
12363 u64 put_domains[I915_MAX_PIPES] = {}; 12366 u64 put_domains[I915_MAX_PIPES] = {};
12364 unsigned crtc_vblank_mask = 0; 12367 unsigned crtc_vblank_mask = 0;
12365 int i; 12368 int i;
@@ -12376,7 +12379,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12376 12379
12377 if (needs_modeset(new_crtc_state) || 12380 if (needs_modeset(new_crtc_state) ||
12378 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12381 to_intel_crtc_state(new_crtc_state)->update_pipe) {
12379 hw_check = true;
12380 12382
12381 put_domains[to_intel_crtc(crtc)->pipe] = 12383 put_domains[to_intel_crtc(crtc)->pipe] =
12382 modeset_get_crtc_power_domains(crtc, 12384 modeset_get_crtc_power_domains(crtc,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 64134947c0aa..203198659ab2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2307 I915_WRITE(pp_ctrl_reg, pp); 2307 I915_WRITE(pp_ctrl_reg, pp);
2308 POSTING_READ(pp_ctrl_reg); 2308 POSTING_READ(pp_ctrl_reg);
2309 2309
2310 intel_dp->panel_power_off_time = ktime_get_boottime();
2311 wait_panel_off(intel_dp); 2310 wait_panel_off(intel_dp);
2311 intel_dp->panel_power_off_time = ktime_get_boottime();
2312 2312
2313 /* We got a reference when we enabled the VDD. */ 2313 /* We got a reference when we enabled the VDD. */
2314 intel_display_power_put(dev_priv, intel_dp->aux_power_domain); 2314 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5273 * seems sufficient to avoid this problem. 5273 * seems sufficient to avoid this problem.
5274 */ 5274 */
5275 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { 5275 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
5276 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); 5276 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
5277 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", 5277 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
5278 vbt.t11_t12); 5278 vbt.t11_t12);
5279 } 5279 }
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 09b670929786..de38d014ed39 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
208 }, 208 },
209}; 209};
210 210
211static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
212{
213 return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
214 BIT(phy_info->channel[DPIO_CH0].port);
215}
216
217static const struct bxt_ddi_phy_info * 211static const struct bxt_ddi_phy_info *
218bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) 212bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
219{ 213{
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
313 enum dpio_phy phy) 307 enum dpio_phy phy)
314{ 308{
315 const struct bxt_ddi_phy_info *phy_info; 309 const struct bxt_ddi_phy_info *phy_info;
316 enum port port;
317 310
318 phy_info = bxt_get_phy_info(dev_priv, phy); 311 phy_info = bxt_get_phy_info(dev_priv, phy);
319 312
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
335 return false; 328 return false;
336 } 329 }
337 330
338 for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
339 u32 tmp = I915_READ(BXT_PHY_CTL(port));
340
341 if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
342 DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
343 "for port %c powered down "
344 "(PHY_CTL %08x)\n",
345 phy, port_name(port), tmp);
346
347 return false;
348 }
349 }
350
351 return true; 331 return true;
352} 332}
353 333
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2a3d93d67bd..df808a94c511 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1996 1996
1997 /* 3. Configure DPLL_CFGCR0 */ 1997 /* 3. Configure DPLL_CFGCR0 */
1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */ 1998 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
1999 if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { 1999 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2000 val = pll->state.hw_state.cfgcr1; 2000 val = pll->state.hw_state.cfgcr1;
2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); 2001 I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
2002 /* 4. Reab back to ensure writes completed */ 2002 /* 4. Reab back to ensure writes completed */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9ab596941372..3c2d9cf22ed5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1048 } 1048 }
1049 1049
1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */ 1050 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) 1051 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1052 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | 1052 u32 val = I915_READ(GEN8_L3SQCREG1);
1053 L3_HIGH_PRIO_CREDITS(2)); 1053 val &= ~L3_PRIO_CREDITS_MASK;
1054 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
1055 I915_WRITE(GEN8_L3SQCREG1, val);
1056 }
1054 1057
1055 /* WaToEnableHwFixForPushConstHWBug:bxt */ 1058 /* WaToEnableHwFixForPushConstHWBug:bxt */
1056 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1059 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
34{
35 u8 conn_type;
36
37 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
38 connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
39 conn_type = DRM_ELD_CONN_TYPE_DP;
40 } else {
41 conn_type = DRM_ELD_CONN_TYPE_HDMI;
42 }
43
44 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
45 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
46}
47
33/** 48/**
34 * intel_connector_update_modes - update connector from edid 49 * intel_connector_update_modes - update connector from edid
35 * @connector: DRM connector device to use 50 * @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
44 ret = drm_add_edid_modes(connector, edid); 59 ret = drm_add_edid_modes(connector, edid);
45 drm_edid_to_eld(connector, edid); 60 drm_edid_to_eld(connector, edid);
46 61
62 intel_connector_update_eld_conn_type(connector);
63
47 return ret; 64 return ret;
48} 65}
49 66
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed662937ec3c..0a09f8ff6aff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
8245 int high_prio_credits) 8245 int high_prio_credits)
8246{ 8246{
8247 u32 misccpctl; 8247 u32 misccpctl;
8248 u32 val;
8248 8249
8249 /* WaTempDisableDOPClkGating:bdw */ 8250 /* WaTempDisableDOPClkGating:bdw */
8250 misccpctl = I915_READ(GEN7_MISCCPCTL); 8251 misccpctl = I915_READ(GEN7_MISCCPCTL);
8251 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 8252 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
8252 8253
8253 I915_WRITE(GEN8_L3SQCREG1, 8254 val = I915_READ(GEN8_L3SQCREG1);
8254 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 8255 val &= ~L3_PRIO_CREDITS_MASK;
8255 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 8256 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
8257 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
8258 I915_WRITE(GEN8_L3SQCREG1, val);
8256 8259
8257 /* 8260 /*
8258 * Wait at least 100 clocks before re-enabling clock gating. 8261 * Wait at least 100 clocks before re-enabling clock gating.
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b66d8e136aa3..49577eba8e7e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
368{ 368{
369 enum i915_power_well_id id = power_well->id; 369 enum i915_power_well_id id = power_well->id;
370 bool wait_fuses = power_well->hsw.has_fuses; 370 bool wait_fuses = power_well->hsw.has_fuses;
371 enum skl_power_gate pg; 371 enum skl_power_gate uninitialized_var(pg);
372 u32 val; 372 u32 val;
373 373
374 if (wait_fuses) { 374 if (wait_fuses) {
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
2782 2782
2783 /* 6. Enable DBUF */ 2783 /* 6. Enable DBUF */
2784 gen9_dbuf_enable(dev_priv); 2784 gen9_dbuf_enable(dev_priv);
2785
2786 if (resume && dev_priv->csr.dmc_payload)
2787 intel_csr_load_program(dev_priv);
2785} 2788}
2786 2789
2787#undef CNL_PROCMON_IDX 2790#undef CNL_PROCMON_IDX
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index dbb31a014419..deaf869374ea 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -248,7 +248,7 @@ disable_clks:
248 clk_disable_unprepare(ahb_clk); 248 clk_disable_unprepare(ahb_clk);
249disable_gdsc: 249disable_gdsc:
250 regulator_disable(gdsc_reg); 250 regulator_disable(gdsc_reg);
251 pm_runtime_put_autosuspend(dev); 251 pm_runtime_put_sync(dev);
252put_clk: 252put_clk:
253 clk_put(ahb_clk); 253 clk_put(ahb_clk);
254put_gdsc: 254put_gdsc:
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index c2bdad88447e..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
83 .caps = MDP_LM_CAP_WB }, 83 .caps = MDP_LM_CAP_WB },
84 }, 84 },
85 .nb_stages = 5, 85 .nb_stages = 5,
86 .max_width = 2048,
87 .max_height = 0xFFFF,
86 }, 88 },
87 .dspp = { 89 .dspp = {
88 .count = 3, 90 .count = 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 6fcb58ab718c..440977677001 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
804 804
805 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 805 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
806 806
807 pm_runtime_put_autosuspend(&pdev->dev);
808
809set_cursor: 807set_cursor:
810 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); 808 ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
811 if (ret) { 809 if (ret) {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f15821a0d900..ea5bb0e1632c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
610 struct dma_fence *fence; 610 struct dma_fence *fence;
611 int i, ret; 611 int i, ret;
612 612
613 if (!exclusive) {
614 /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
615 * which makes this a slightly strange place to call it. OTOH this
616 * is a convenient can-fail point to hook it in. (And similar to
617 * how etnaviv and nouveau handle this.)
618 */
619 ret = reservation_object_reserve_shared(msm_obj->resv);
620 if (ret)
621 return ret;
622 }
623
624 fobj = reservation_object_get_list(msm_obj->resv); 613 fobj = reservation_object_get_list(msm_obj->resv);
625 if (!fobj || (fobj->shared_count == 0)) { 614 if (!fobj || (fobj->shared_count == 0)) {
626 fence = reservation_object_get_excl(msm_obj->resv); 615 fence = reservation_object_get_excl(msm_obj->resv);
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1045 } 1034 }
1046 1035
1047 vaddr = msm_gem_get_vaddr(obj); 1036 vaddr = msm_gem_get_vaddr(obj);
1048 if (!vaddr) { 1037 if (IS_ERR(vaddr)) {
1049 msm_gem_put_iova(obj, aspace); 1038 msm_gem_put_iova(obj, aspace);
1050 drm_gem_object_unreference(obj); 1039 drm_gem_object_unreference(obj);
1051 return ERR_PTR(-ENOMEM); 1040 return ERR_CAST(vaddr);
1052 } 1041 }
1053 1042
1054 if (bo) 1043 if (bo)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5d0a75d4b249..93535cac0676 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -221,7 +221,7 @@ fail:
221 return ret; 221 return ret;
222} 222}
223 223
224static int submit_fence_sync(struct msm_gem_submit *submit) 224static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
225{ 225{
226 int i, ret = 0; 226 int i, ret = 0;
227 227
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit)
229 struct msm_gem_object *msm_obj = submit->bos[i].obj; 229 struct msm_gem_object *msm_obj = submit->bos[i].obj;
230 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; 230 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
231 231
232 if (!write) {
233 /* NOTE: _reserve_shared() must happen before
234 * _add_shared_fence(), which makes this a slightly
235 * strange place to call it. OTOH this is a
236 * convenient can-fail point to hook it in.
237 */
238 ret = reservation_object_reserve_shared(msm_obj->resv);
239 if (ret)
240 return ret;
241 }
242
243 if (no_implicit)
244 continue;
245
232 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); 246 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
233 if (ret) 247 if (ret)
234 break; 248 break;
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
451 if (ret) 465 if (ret)
452 goto out; 466 goto out;
453 467
454 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { 468 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
455 ret = submit_fence_sync(submit); 469 if (ret)
456 if (ret) 470 goto out;
457 goto out;
458 }
459 471
460 ret = submit_pin_objects(submit); 472 ret = submit_pin_objects(submit);
461 if (ret) 473 if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index ffbff27600e0..6a887032c66a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace); 718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
719 msm_ringbuffer_destroy(gpu->rb); 719 msm_ringbuffer_destroy(gpu->rb);
720 } 720 }
721 if (gpu->aspace) { 721
722 if (!IS_ERR_OR_NULL(gpu->aspace)) {
722 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 723 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
723 NULL, 0); 724 NULL, 0);
724 msm_gem_address_space_put(gpu->aspace); 725 msm_gem_address_space_put(gpu->aspace);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0366b8092f97..ec56794ad039 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
111 111
112 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); 112 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
113 113
114 /* Note that smp_load_acquire() is not strictly required
115 * as CIRC_SPACE_TO_END() does not access the tail more
116 * than once.
117 */
114 n = min(sz, circ_space_to_end(&rd->fifo)); 118 n = min(sz, circ_space_to_end(&rd->fifo));
115 memcpy(fptr, ptr, n); 119 memcpy(fptr, ptr, n);
116 120
117 fifo->head = (fifo->head + n) & (BUF_SZ - 1); 121 smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
118 sz -= n; 122 sz -= n;
119 ptr += n; 123 ptr += n;
120 124
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf,
145 if (ret) 149 if (ret)
146 goto out; 150 goto out;
147 151
152 /* Note that smp_load_acquire() is not strictly required
153 * as CIRC_CNT_TO_END() does not access the head more than
154 * once.
155 */
148 n = min_t(int, sz, circ_count_to_end(&rd->fifo)); 156 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
149 if (copy_to_user(buf, fptr, n)) { 157 if (copy_to_user(buf, fptr, n)) {
150 ret = -EFAULT; 158 ret = -EFAULT;
151 goto out; 159 goto out;
152 } 160 }
153 161
154 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); 162 smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
155 *ppos += n; 163 *ppos += n;
156 164
157 wake_up_all(&rd->fifo_event); 165 wake_up_all(&rd->fifo_event);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f7707849bb53..2b12d82aac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -223,7 +223,7 @@ void
223nouveau_fbcon_accel_save_disable(struct drm_device *dev) 223nouveau_fbcon_accel_save_disable(struct drm_device *dev)
224{ 224{
225 struct nouveau_drm *drm = nouveau_drm(dev); 225 struct nouveau_drm *drm = nouveau_drm(dev);
226 if (drm->fbcon) { 226 if (drm->fbcon && drm->fbcon->helper.fbdev) {
227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; 227 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 228 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
229 } 229 }
@@ -233,7 +233,7 @@ void
233nouveau_fbcon_accel_restore(struct drm_device *dev) 233nouveau_fbcon_accel_restore(struct drm_device *dev)
234{ 234{
235 struct nouveau_drm *drm = nouveau_drm(dev); 235 struct nouveau_drm *drm = nouveau_drm(dev);
236 if (drm->fbcon) { 236 if (drm->fbcon && drm->fbcon->helper.fbdev) {
237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; 237 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
238 } 238 }
239} 239}
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
245 struct nouveau_fbdev *fbcon = drm->fbcon; 245 struct nouveau_fbdev *fbcon = drm->fbcon;
246 if (fbcon && drm->channel) { 246 if (fbcon && drm->channel) {
247 console_lock(); 247 console_lock();
248 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 248 if (fbcon->helper.fbdev)
249 fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
249 console_unlock(); 250 console_unlock();
250 nouveau_channel_idle(drm->channel); 251 nouveau_channel_idle(drm->channel);
251 nvif_object_fini(&fbcon->twod); 252 nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dbf62a2ac41..e4751f92b342 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3265,11 +3265,14 @@ nv50_mstm = {
3265void 3265void
3266nv50_mstm_service(struct nv50_mstm *mstm) 3266nv50_mstm_service(struct nv50_mstm *mstm)
3267{ 3267{
3268 struct drm_dp_aux *aux = mstm->mgr.aux; 3268 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3269 bool handled = true; 3269 bool handled = true;
3270 int ret; 3270 int ret;
3271 u8 esi[8] = {}; 3271 u8 esi[8] = {};
3272 3272
3273 if (!aux)
3274 return;
3275
3273 while (handled) { 3276 while (handled) {
3274 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); 3277 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3275 if (ret != 8) { 3278 if (ret != 8) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a74774..44e116f7880d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) 39g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
40{ 40{
41 return nvkm_xtensa_new_(&g84_bsp, device, index, 41 return nvkm_xtensa_new_(&g84_bsp, device, index,
42 true, 0x103000, pengine); 42 device->chipset != 0x92, 0x103000, pengine);
43} 43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index d06ad2c372bf..455da298227f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); 241 mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
242 } 242 }
243 243
244 mmu->func->flush(vm);
245
244 nvkm_memory_del(&pgt); 246 nvkm_memory_del(&pgt);
245 } 247 }
246} 248}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 14c5613b4388..afbf50d0c08f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
509 .y2 = qfb->base.height 509 .y2 = qfb->base.height
510 }; 510 };
511 511
512 if (!old_state->fb) { 512 if (old_state->fb) {
513 qxl_io_log(qdev, 513 qfb_old = to_qxl_framebuffer(old_state->fb);
514 "create primary fb: %dx%d,%d,%d\n", 514 bo_old = gem_to_qxl_bo(qfb_old->obj);
515 bo->surf.width, bo->surf.height, 515 } else {
516 bo->surf.stride, bo->surf.format); 516 bo_old = NULL;
517 }
517 518
518 qxl_io_create_primary(qdev, 0, bo); 519 if (bo == bo_old)
519 bo->is_primary = true;
520 return; 520 return;
521 521
522 } else { 522 if (bo_old && bo_old->is_primary) {
523 qfb_old = to_qxl_framebuffer(old_state->fb); 523 qxl_io_destroy_primary(qdev);
524 bo_old = gem_to_qxl_bo(qfb_old->obj);
525 bo_old->is_primary = false; 524 bo_old->is_primary = false;
526 } 525 }
527 526
528 bo->is_primary = true; 527 if (!bo->is_primary) {
528 qxl_io_create_primary(qdev, 0, bo);
529 bo->is_primary = true;
530 }
529 qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); 531 qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
530} 532}
531 533
@@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
534{ 536{
535 struct qxl_device *qdev = plane->dev->dev_private; 537 struct qxl_device *qdev = plane->dev->dev_private;
536 538
537 if (old_state->fb) 539 if (old_state->fb) {
538 { struct qxl_framebuffer *qfb = 540 struct qxl_framebuffer *qfb =
539 to_qxl_framebuffer(old_state->fb); 541 to_qxl_framebuffer(old_state->fb);
540 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); 542 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
541 543
542 qxl_io_destroy_primary(qdev); 544 if (bo->is_primary) {
543 bo->is_primary = false; 545 qxl_io_destroy_primary(qdev);
546 bo->is_primary = false;
547 }
544 } 548 }
545} 549}
546 550
@@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
698 struct drm_gem_object *obj; 702 struct drm_gem_object *obj;
699 struct qxl_bo *user_bo; 703 struct qxl_bo *user_bo;
700 704
701 if (!plane->state->fb) { 705 if (!old_state->fb) {
702 /* we never executed prepare_fb, so there's nothing to 706 /*
707 * we never executed prepare_fb, so there's nothing to
703 * unpin. 708 * unpin.
704 */ 709 */
705 return; 710 return;
706 } 711 }
707 712
708 obj = to_qxl_framebuffer(plane->state->fb)->obj; 713 obj = to_qxl_framebuffer(old_state->fb)->obj;
709 user_bo = gem_to_qxl_bo(obj); 714 user_bo = gem_to_qxl_bo(obj);
710 qxl_bo_unpin(user_bo); 715 qxl_bo_unpin(user_bo);
711} 716}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 997131d58c7f..ffc10cadcf34 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1663 radeon_agp_suspend(rdev); 1663 radeon_agp_suspend(rdev);
1664 1664
1665 pci_save_state(dev->pdev); 1665 pci_save_state(dev->pdev);
1666 if (freeze && rdev->family >= CHIP_CEDAR) { 1666 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1667 rdev->asic->asic_reset(rdev, true); 1667 rdev->asic->asic_reset(rdev, true);
1668 pci_restore_state(dev->pdev); 1668 pci_restore_state(dev->pdev);
1669 } else if (suspend) { 1669 } else if (suspend) {
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 06f05302ee75..882d85db9053 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC
26 bool "Allwinner A10 HDMI CEC Support" 26 bool "Allwinner A10 HDMI CEC Support"
27 depends on DRM_SUN4I_HDMI 27 depends on DRM_SUN4I_HDMI
28 select CEC_CORE 28 select CEC_CORE
29 depends on CEC_PIN 29 select CEC_PIN
30 help 30 help
31 Choose this option if you have an Allwinner SoC with an HDMI 31 Choose this option if you have an Allwinner SoC with an HDMI
32 controller and want to use CEC. 32 controller and want to use CEC.
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 1457750988da..a1f8cba251a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -15,7 +15,7 @@
15#include <drm/drm_connector.h> 15#include <drm/drm_connector.h>
16#include <drm/drm_encoder.h> 16#include <drm/drm_encoder.h>
17 17
18#include <media/cec.h> 18#include <media/cec-pin.h>
19 19
20#define SUN4I_HDMI_CTRL_REG 0x004 20#define SUN4I_HDMI_CTRL_REG 0x004
21#define SUN4I_HDMI_CTRL_ENABLE BIT(31) 21#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..3cf1a6932fac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
302 hdmi->mod_clk = devm_clk_get(dev, "mod"); 302 hdmi->mod_clk = devm_clk_get(dev, "mod");
303 if (IS_ERR(hdmi->mod_clk)) { 303 if (IS_ERR(hdmi->mod_clk)) {
304 dev_err(dev, "Couldn't get the HDMI mod clock\n"); 304 dev_err(dev, "Couldn't get the HDMI mod clock\n");
305 return PTR_ERR(hdmi->mod_clk); 305 ret = PTR_ERR(hdmi->mod_clk);
306 goto err_disable_bus_clk;
306 } 307 }
307 clk_prepare_enable(hdmi->mod_clk); 308 clk_prepare_enable(hdmi->mod_clk);
308 309
309 hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); 310 hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
310 if (IS_ERR(hdmi->pll0_clk)) { 311 if (IS_ERR(hdmi->pll0_clk)) {
311 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); 312 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
312 return PTR_ERR(hdmi->pll0_clk); 313 ret = PTR_ERR(hdmi->pll0_clk);
314 goto err_disable_mod_clk;
313 } 315 }
314 316
315 hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); 317 hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
316 if (IS_ERR(hdmi->pll1_clk)) { 318 if (IS_ERR(hdmi->pll1_clk)) {
317 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); 319 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
318 return PTR_ERR(hdmi->pll1_clk); 320 ret = PTR_ERR(hdmi->pll1_clk);
321 goto err_disable_mod_clk;
319 } 322 }
320 323
321 ret = sun4i_tmds_create(hdmi); 324 ret = sun4i_tmds_create(hdmi);
322 if (ret) { 325 if (ret) {
323 dev_err(dev, "Couldn't create the TMDS clock\n"); 326 dev_err(dev, "Couldn't create the TMDS clock\n");
324 return ret; 327 goto err_disable_mod_clk;
325 } 328 }
326 329
327 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); 330 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
362 ret = sun4i_hdmi_i2c_create(dev, hdmi); 365 ret = sun4i_hdmi_i2c_create(dev, hdmi);
363 if (ret) { 366 if (ret) {
364 dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); 367 dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
365 return ret; 368 goto err_disable_mod_clk;
366 } 369 }
367 370
368 drm_encoder_helper_add(&hdmi->encoder, 371 drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
422 drm_encoder_cleanup(&hdmi->encoder); 425 drm_encoder_cleanup(&hdmi->encoder);
423err_del_i2c_adapter: 426err_del_i2c_adapter:
424 i2c_del_adapter(hdmi->i2c); 427 i2c_del_adapter(hdmi->i2c);
428err_disable_mod_clk:
429 clk_disable_unprepare(hdmi->mod_clk);
430err_disable_bus_clk:
431 clk_disable_unprepare(hdmi->bus_clk);
425 return ret; 432 return ret;
426} 433}
427 434
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
434 drm_connector_cleanup(&hdmi->connector); 441 drm_connector_cleanup(&hdmi->connector);
435 drm_encoder_cleanup(&hdmi->encoder); 442 drm_encoder_cleanup(&hdmi->encoder);
436 i2c_del_adapter(hdmi->i2c); 443 i2c_del_adapter(hdmi->i2c);
444 clk_disable_unprepare(hdmi->mod_clk);
445 clk_disable_unprepare(hdmi->bus_clk);
437} 446}
438 447
439static const struct component_ops sun4i_hdmi_ops = { 448static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
index e9b7cdad5c4c..5a1ab4046e92 100644
--- a/drivers/gpu/drm/tegra/trace.h
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl,
63 63
64/* This part must be outside protection */ 64/* This part must be outside protection */
65#undef TRACE_INCLUDE_PATH 65#undef TRACE_INCLUDE_PATH
66#define TRACE_INCLUDE_PATH . 66#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
67#define TRACE_INCLUDE_FILE trace 67#define TRACE_INCLUDE_FILE trace
68#include <trace/define_trace.h> 68#include <trace/define_trace.h>
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6a573d21d3cc..658fa2d3e40c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
405 return -EINVAL; 405 return -EINVAL;
406 } 406 }
407 407
408 /*
409 * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
410 * i.MX53 channel arbitration locking doesn't seem to work properly.
411 * Allow enabling the lock feature on IPUv3H / i.MX6 only.
412 */
413 if (bursts && ipu->ipu_type != IPUV3H)
414 return -EINVAL;
415
408 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { 416 for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
409 if (channel->num == idmac_lock_en_info[i].chnum) 417 if (channel->num == idmac_lock_en_info[i].chnum)
410 break; 418 break;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c35f74c83065..c860a7997cb5 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -73,6 +73,14 @@
73#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) 73#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
74#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) 74#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
75 75
76#define IPU_PRE_STORE_ENG_STATUS 0x120
77#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff
78#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0
79#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff
80#define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16
81#define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30)
82#define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31)
83
76#define IPU_PRE_STORE_ENG_SIZE 0x130 84#define IPU_PRE_STORE_ENG_SIZE 0x130
77#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) 85#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
78#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) 86#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
@@ -93,6 +101,7 @@ struct ipu_pre {
93 dma_addr_t buffer_paddr; 101 dma_addr_t buffer_paddr;
94 void *buffer_virt; 102 void *buffer_virt;
95 bool in_use; 103 bool in_use;
104 unsigned int safe_window_end;
96}; 105};
97 106
98static DEFINE_MUTEX(ipu_pre_list_mutex); 107static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
160 u32 active_bpp = info->cpp[0] >> 1; 169 u32 active_bpp = info->cpp[0] >> 1;
161 u32 val; 170 u32 val;
162 171
172 /* calculate safe window for ctrl register updates */
173 pre->safe_window_end = height - 2;
174
163 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); 175 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
164 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 176 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
165 177
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
199 211
200void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) 212void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
201{ 213{
214 unsigned long timeout = jiffies + msecs_to_jiffies(5);
215 unsigned short current_yblock;
216 u32 val;
217
202 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 218 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
219
220 do {
221 if (time_after(jiffies, timeout)) {
222 dev_warn(pre->dev, "timeout waiting for PRE safe window\n");
223 return;
224 }
225
226 val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS);
227 current_yblock =
228 (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) &
229 IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK;
230 } while (current_yblock == 0 || current_yblock >= pre->safe_window_end);
231
203 writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); 232 writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
204} 233}
205 234
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index ecc9ea44dc50..0013ca9f72c8 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -14,6 +14,7 @@
14#include <drm/drm_fourcc.h> 14#include <drm/drm_fourcc.h>
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/iopoll.h>
17#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
18#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 19#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
19#include <linux/module.h> 20#include <linux/module.h>
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
329 val = IPU_PRG_REG_UPDATE_REG_UPDATE; 330 val = IPU_PRG_REG_UPDATE_REG_UPDATE;
330 writel(val, prg->regs + IPU_PRG_REG_UPDATE); 331 writel(val, prg->regs + IPU_PRG_REG_UPDATE);
331 332
333 /* wait for both double buffers to be filled */
334 readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val,
335 (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) &&
336 (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
337 5, 1000);
338
332 clk_disable_unprepare(prg->clk_ipg); 339 clk_disable_unprepare(prg->clk_ipg);
333 340
334 chan->enabled = true; 341 chan->enabled = true;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 0a3117cc29e7..374301fcbc86 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -281,6 +281,7 @@ config HID_ELECOM
281 Support for ELECOM devices: 281 Support for ELECOM devices:
282 - BM084 Bluetooth Mouse 282 - BM084 Bluetooth Mouse
283 - DEFT Trackball (Wired and wireless) 283 - DEFT Trackball (Wired and wireless)
284 - HUGE Trackball (Wired and wireless)
284 285
285config HID_ELO 286config HID_ELO
286 tristate "ELO USB 4000/4500 touchscreen" 287 tristate "ELO USB 4000/4500 touchscreen"
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9bc91160819b..330ca983828b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
2032 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 2032 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
2033 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 2033 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
2034 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 2034 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
2035 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
2036 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
2035#endif 2037#endif
2036#if IS_ENABLED(CONFIG_HID_ELO) 2038#if IS_ENABLED(CONFIG_HID_ELO)
2037 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 2039 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e2c7465df69f..54aeea57d209 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -3,6 +3,7 @@
3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> 3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
4 * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> 4 * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
5 * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> 5 * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
6 * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
6 */ 7 */
7 8
8/* 9/*
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
32 break; 33 break;
33 case USB_DEVICE_ID_ELECOM_DEFT_WIRED: 34 case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
34 case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: 35 case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
35 /* The DEFT trackball has eight buttons, but its descriptor only 36 case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
36 * reports five, disabling the three Fn buttons on the top of 37 case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
37 * the mouse. 38 /* The DEFT/HUGE trackball has eight buttons, but its descriptor
39 * only reports five, disabling the three Fn buttons on the top
40 * of the mouse.
38 * 41 *
39 * Apply the following diff to the descriptor: 42 * Apply the following diff to the descriptor:
40 * 43 *
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
62 * End Collection, End Collection, 65 * End Collection, End Collection,
63 */ 66 */
64 if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { 67 if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
65 hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); 68 hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
66 rdesc[13] = 8; /* Button/Variable Report Count */ 69 rdesc[13] = 8; /* Button/Variable Report Count */
67 rdesc[21] = 8; /* Button/Variable Usage Maximum */ 70 rdesc[21] = 8; /* Button/Variable Usage Maximum */
68 rdesc[29] = 0; /* Button/Constant Report Count */ 71 rdesc[29] = 0; /* Button/Constant Report Count */
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = {
76 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 79 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
77 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 80 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
78 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 81 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
82 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
83 { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
79 { } 84 { }
80}; 85};
81MODULE_DEVICE_TABLE(hid, elecom_devices); 86MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b397a14ab970..be2e005c3c51 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -368,6 +368,8 @@
368#define USB_DEVICE_ID_ELECOM_BM084 0x0061 368#define USB_DEVICE_ID_ELECOM_BM084 0x0061
369#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe 369#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
370#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff 370#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
371#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
372#define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d
371 373
372#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 374#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
373#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 375#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
@@ -533,6 +535,7 @@
533#define USB_VENDOR_ID_IDEACOM 0x1cb6 535#define USB_VENDOR_ID_IDEACOM 0x1cb6
534#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 536#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
535#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 537#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
538#define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680
536 539
537#define USB_VENDOR_ID_ILITEK 0x222a 540#define USB_VENDOR_ID_ILITEK 0x222a
538#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 541#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -660,6 +663,7 @@
660#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 663#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
661#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 664#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
662#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 665#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
666#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
663 667
664#define USB_VENDOR_ID_LG 0x1fd2 668#define USB_VENDOR_ID_LG 0x1fd2
665#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 669#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 440b999304a5..9e8c4d2ba11d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
930 field->application != HID_DG_PEN && 930 field->application != HID_DG_PEN &&
931 field->application != HID_DG_TOUCHPAD && 931 field->application != HID_DG_TOUCHPAD &&
932 field->application != HID_GD_KEYBOARD && 932 field->application != HID_GD_KEYBOARD &&
933 field->application != HID_GD_SYSTEM_CONTROL &&
933 field->application != HID_CP_CONSUMER_CONTROL && 934 field->application != HID_CP_CONSUMER_CONTROL &&
934 field->application != HID_GD_WIRELESS_RADIO_CTLS && 935 field->application != HID_GD_WIRELESS_RADIO_CTLS &&
935 !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && 936 !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = {
1419 USB_VENDOR_ID_ALPS_JP, 1420 USB_VENDOR_ID_ALPS_JP,
1420 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, 1421 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
1421 1422
1423 /* Lenovo X1 TAB Gen 2 */
1424 { .driver_data = MT_CLS_WIN_8_DUAL,
1425 HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
1426 USB_VENDOR_ID_LENOVO,
1427 USB_DEVICE_ID_LENOVO_X1_TAB) },
1428
1422 /* Anton devices */ 1429 /* Anton devices */
1423 { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, 1430 { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
1424 MT_USB_DEVICE(USB_VENDOR_ID_ANTON, 1431 MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 5b40c2614599..ef241d66562e 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
436 if (!(data->device_flags & RMI_DEVICE)) 436 if (!(data->device_flags & RMI_DEVICE))
437 return 0; 437 return 0;
438 438
439 ret = rmi_reset_attn_mode(hdev); 439 /* Make sure the HID device is ready to receive events */
440 ret = hid_hw_open(hdev);
440 if (ret) 441 if (ret)
441 return ret; 442 return ret;
442 443
444 ret = rmi_reset_attn_mode(hdev);
445 if (ret)
446 goto out;
447
443 ret = rmi_driver_resume(rmi_dev, false); 448 ret = rmi_driver_resume(rmi_dev, false);
444 if (ret) { 449 if (ret) {
445 hid_warn(hdev, "Failed to resume device: %d\n", ret); 450 hid_warn(hdev, "Failed to resume device: %d\n", ret);
446 return ret; 451 goto out;
447 } 452 }
448 453
449 return 0; 454out:
455 hid_hw_close(hdev);
456 return ret;
450} 457}
451#endif /* CONFIG_PM */ 458#endif /* CONFIG_PM */
452 459
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ec530454e6f6..5fbe0f81ab2e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
337 kfree(hidraw); 337 kfree(hidraw);
338 } else { 338 } else {
339 /* close device for last reader */ 339 /* close device for last reader */
340 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
341 hid_hw_close(hidraw->hid); 340 hid_hw_close(hidraw->hid);
341 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
342 } 342 }
343 } 343 }
344} 344}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 77396145d2d0..9145c2129a96 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
543{ 543{
544 /* the worst case is computed from the set_report command with a 544 /* the worst case is computed from the set_report command with a
545 * reportID > 15 and the maximum report length */ 545 * reportID > 15 and the maximum report length */
546 int args_len = sizeof(__u8) + /* optional ReportID byte */ 546 int args_len = sizeof(__u8) + /* ReportID */
547 sizeof(__u8) + /* optional ReportID byte */
547 sizeof(__u16) + /* data register */ 548 sizeof(__u16) + /* data register */
548 sizeof(__u16) + /* size of the report */ 549 sizeof(__u16) + /* size of the report */
549 report_size; /* report */ 550 report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 089bad8a9a21..045b5da9b992 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid)
975 unsigned int rsize = 0; 975 unsigned int rsize = 0;
976 char *rdesc; 976 char *rdesc;
977 int ret, n; 977 int ret, n;
978 int num_descriptors;
979 size_t offset = offsetof(struct hid_descriptor, desc);
978 980
979 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), 981 quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
980 le16_to_cpu(dev->descriptor.idProduct)); 982 le16_to_cpu(dev->descriptor.idProduct));
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid)
997 return -ENODEV; 999 return -ENODEV;
998 } 1000 }
999 1001
1002 if (hdesc->bLength < sizeof(struct hid_descriptor)) {
1003 dbg_hid("hid descriptor is too short\n");
1004 return -EINVAL;
1005 }
1006
1000 hid->version = le16_to_cpu(hdesc->bcdHID); 1007 hid->version = le16_to_cpu(hdesc->bcdHID);
1001 hid->country = hdesc->bCountryCode; 1008 hid->country = hdesc->bCountryCode;
1002 1009
1003 for (n = 0; n < hdesc->bNumDescriptors; n++) 1010 num_descriptors = min_t(int, hdesc->bNumDescriptors,
1011 (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
1012
1013 for (n = 0; n < num_descriptors; n++)
1004 if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) 1014 if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
1005 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); 1015 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
1006 1016
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a83fa76655b9..f489a5cfcb48 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
99 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, 99 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
100 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 100 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
101 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 101 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
102 { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
102 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, 103 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
103 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, 104 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
104 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, 105 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e82a696a1d07..906e654fb0ba 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
668 668
669 /* Try to find an already-probed interface from the same device */ 669 /* Try to find an already-probed interface from the same device */
670 list_for_each_entry(data, &wacom_udev_list, list) { 670 list_for_each_entry(data, &wacom_udev_list, list) {
671 if (compare_device_paths(hdev, data->dev, '/')) 671 if (compare_device_paths(hdev, data->dev, '/')) {
672 kref_get(&data->kref);
672 return data; 673 return data;
674 }
673 } 675 }
674 676
675 /* Fallback to finding devices that appear to be "siblings" */ 677 /* Fallback to finding devices that appear to be "siblings" */
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
766 if (!wacom->led.groups) 768 if (!wacom->led.groups)
767 return -ENOTSUPP; 769 return -ENOTSUPP;
768 770
771 if (wacom->wacom_wac.features.type == REMOTE)
772 return -ENOTSUPP;
773
769 if (wacom->wacom_wac.pid) { /* wireless connected */ 774 if (wacom->wacom_wac.pid) { /* wireless connected */
770 report_id = WAC_CMD_WL_LED_CONTROL; 775 report_id = WAC_CMD_WL_LED_CONTROL;
771 buf_size = 13; 776 buf_size = 13;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bb17d7bbefd3..aa692e28b2cd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
567 keys = data[9] & 0x07; 567 keys = data[9] & 0x07;
568 } 568 }
569 } else { 569 } else {
570 buttons = ((data[6] & 0x10) << 10) | 570 buttons = ((data[6] & 0x10) << 5) |
571 ((data[5] & 0x10) << 9) | 571 ((data[5] & 0x10) << 4) |
572 ((data[6] & 0x0F) << 4) | 572 ((data[6] & 0x0F) << 4) |
573 (data[5] & 0x0F); 573 (data[5] & 0x0F);
574 } 574 }
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1227 continue; 1227 continue;
1228 1228
1229 if (range) { 1229 if (range) {
1230 /* Fix rotation alignment: userspace expects zero at left */
1231 int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
1232 rotation += 1800/4;
1233 if (rotation > 899)
1234 rotation -= 1800;
1235
1230 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); 1236 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
1231 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); 1237 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
1232 input_report_abs(pen_input, ABS_TILT_X, frame[7]); 1238 input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
1233 input_report_abs(pen_input, ABS_TILT_Y, frame[8]); 1239 input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
1234 input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9])); 1240 input_report_abs(pen_input, ABS_Z, rotation);
1235 input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); 1241 input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
1236 } 1242 }
1237 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1243 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
1319 unsigned char *data = wacom->data; 1325 unsigned char *data = wacom->data;
1320 1326
1321 int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); 1327 int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
1322 int ring = data[285]; 1328 int ring = data[285] & 0x7F;
1323 int prox = buttons | (ring & 0x80); 1329 bool ringstatus = data[285] & 0x80;
1330 bool prox = buttons || ringstatus;
1331
1332 /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
1333 ring = 71 - ring;
1334 ring += 3*72/16;
1335 if (ring > 71)
1336 ring -= 72;
1324 1337
1325 wacom_report_numbered_buttons(pad_input, 9, buttons); 1338 wacom_report_numbered_buttons(pad_input, 9, buttons);
1326 1339
1327 input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0); 1340 input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
1328 1341
1329 input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); 1342 input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
1330 input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); 1343 input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
1616 return 0; 1629 return 0;
1617} 1630}
1618 1631
1632static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
1633 int value, int num, int denom)
1634{
1635 struct input_absinfo *abs = &input->absinfo[usage->code];
1636 int range = (abs->maximum - abs->minimum + 1);
1637
1638 value += num*range/denom;
1639 if (value > abs->maximum)
1640 value -= range;
1641 else if (value < abs->minimum)
1642 value += range;
1643 return value;
1644}
1645
1619int wacom_equivalent_usage(int usage) 1646int wacom_equivalent_usage(int usage)
1620{ 1647{
1621 if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { 1648 if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1898 unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); 1925 unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
1899 int i; 1926 int i;
1900 bool is_touch_on = value; 1927 bool is_touch_on = value;
1928 bool do_report = false;
1901 1929
1902 /* 1930 /*
1903 * Avoid reporting this event and setting inrange_state if this usage 1931 * Avoid reporting this event and setting inrange_state if this usage
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1912 } 1940 }
1913 1941
1914 switch (equivalent_usage) { 1942 switch (equivalent_usage) {
1943 case WACOM_HID_WD_TOUCHRING:
1944 /*
1945 * Userspace expects touchrings to increase in value with
1946 * clockwise gestures and have their zero point at the
1947 * tablet's left. HID events "should" be clockwise-
1948 * increasing and zero at top, though the MobileStudio
1949 * Pro and 2nd-gen Intuos Pro don't do this...
1950 */
1951 if (hdev->vendor == 0x56a &&
1952 (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
1953 hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
1954 value = (field->logical_maximum - value);
1955
1956 if (hdev->product == 0x357 || hdev->product == 0x358)
1957 value = wacom_offset_rotation(input, usage, value, 3, 16);
1958 else if (hdev->product == 0x34d || hdev->product == 0x34e)
1959 value = wacom_offset_rotation(input, usage, value, 1, 2);
1960 }
1961 else {
1962 value = wacom_offset_rotation(input, usage, value, 1, 4);
1963 }
1964 do_report = true;
1965 break;
1915 case WACOM_HID_WD_TOUCHRINGSTATUS: 1966 case WACOM_HID_WD_TOUCHRINGSTATUS:
1916 if (!value) 1967 if (!value)
1917 input_event(input, usage->type, usage->code, 0); 1968 input_event(input, usage->type, usage->code, 0);
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1945 value, i); 1996 value, i);
1946 /* fall through*/ 1997 /* fall through*/
1947 default: 1998 default:
1999 do_report = true;
2000 break;
2001 }
2002
2003 if (do_report) {
1948 input_event(input, usage->type, usage->code, value); 2004 input_event(input, usage->type, usage->code, value);
1949 if (value) 2005 if (value)
1950 wacom_wac->hid_data.pad_input_event_flag = true; 2006 wacom_wac->hid_data.pad_input_event_flag = true;
1951 break;
1952 } 2007 }
1953} 2008}
1954 2009
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2086 wacom_wac->hid_data.tipswitch |= value; 2141 wacom_wac->hid_data.tipswitch |= value;
2087 return; 2142 return;
2088 case HID_DG_TOOLSERIALNUMBER: 2143 case HID_DG_TOOLSERIALNUMBER:
2089 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2144 if (value) {
2090 wacom_wac->serial[0] |= (__u32)value; 2145 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
2146 wacom_wac->serial[0] |= (__u32)value;
2147 }
2091 return; 2148 return;
2149 case HID_DG_TWIST:
2150 /*
2151 * Userspace expects pen twist to have its zero point when
2152 * the buttons/finger is on the tablet's left. HID values
2153 * are zero when buttons are toward the top.
2154 */
2155 value = wacom_offset_rotation(input, usage, value, 1, 4);
2156 break;
2092 case WACOM_HID_WD_SENSE: 2157 case WACOM_HID_WD_SENSE:
2093 wacom_wac->hid_data.sense_state = value; 2158 wacom_wac->hid_data.sense_state = value;
2094 return; 2159 return;
2095 case WACOM_HID_WD_SERIALHI: 2160 case WACOM_HID_WD_SERIALHI:
2096 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); 2161 if (value) {
2097 wacom_wac->serial[0] |= ((__u64)value) << 32; 2162 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
2098 /* 2163 wacom_wac->serial[0] |= ((__u64)value) << 32;
2099 * Non-USI EMR devices may contain additional tool type 2164 /*
2100 * information here. See WACOM_HID_WD_TOOLTYPE case for 2165 * Non-USI EMR devices may contain additional tool type
2101 * more details. 2166 * information here. See WACOM_HID_WD_TOOLTYPE case for
2102 */ 2167 * more details.
2103 if (value >> 20 == 1) { 2168 */
2104 wacom_wac->id[0] |= value & 0xFFFFF; 2169 if (value >> 20 == 1) {
2170 wacom_wac->id[0] |= value & 0xFFFFF;
2171 }
2105 } 2172 }
2106 return; 2173 return;
2107 case WACOM_HID_WD_TOOLTYPE: 2174 case WACOM_HID_WD_TOOLTYPE:
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
2205 input_report_key(input, wacom_wac->tool[0], prox); 2272 input_report_key(input, wacom_wac->tool[0], prox);
2206 if (wacom_wac->serial[0]) { 2273 if (wacom_wac->serial[0]) {
2207 input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); 2274 input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
2208 input_report_abs(input, ABS_MISC, id); 2275 input_report_abs(input, ABS_MISC, prox ? id : 0);
2209 } 2276 }
2210 2277
2211 wacom_wac->hid_data.tipswitch = false; 2278 wacom_wac->hid_data.tipswitch = false;
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
2216 if (!prox) { 2283 if (!prox) {
2217 wacom_wac->tool[0] = 0; 2284 wacom_wac->tool[0] = 0;
2218 wacom_wac->id[0] = 0; 2285 wacom_wac->id[0] = 0;
2286 wacom_wac->serial[0] = 0;
2219 } 2287 }
2220} 2288}
2221 2289
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index efd5db743319..894b67ac2cae 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel)
640 */ 640 */
641 return; 641 return;
642 } 642 }
643 mutex_lock(&vmbus_connection.channel_mutex);
643 /* 644 /*
644 * Close all the sub-channels first and then close the 645 * Close all the sub-channels first and then close the
645 * primary channel. 646 * primary channel.
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel)
648 cur_channel = list_entry(cur, struct vmbus_channel, sc_list); 649 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
649 vmbus_close_internal(cur_channel); 650 vmbus_close_internal(cur_channel);
650 if (cur_channel->rescind) { 651 if (cur_channel->rescind) {
651 mutex_lock(&vmbus_connection.channel_mutex); 652 hv_process_channel_removal(
652 hv_process_channel_removal(cur_channel,
653 cur_channel->offermsg.child_relid); 653 cur_channel->offermsg.child_relid);
654 mutex_unlock(&vmbus_connection.channel_mutex);
655 } 654 }
656 } 655 }
657 /* 656 /*
658 * Now close the primary. 657 * Now close the primary.
659 */ 658 */
660 vmbus_close_internal(channel); 659 vmbus_close_internal(channel);
660 mutex_unlock(&vmbus_connection.channel_mutex);
661} 661}
662EXPORT_SYMBOL_GPL(vmbus_close); 662EXPORT_SYMBOL_GPL(vmbus_close);
663 663
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 060df71c2e8b..379b0df123be 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
159 159
160 160
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
162 162 channel->rescind = true;
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, 163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
164 msglistentry) { 164 msglistentry) {
165 165
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid)
381 true); 381 true);
382} 382}
383 383
384void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) 384void hv_process_channel_removal(u32 relid)
385{ 385{
386 unsigned long flags; 386 unsigned long flags;
387 struct vmbus_channel *primary_channel; 387 struct vmbus_channel *primary_channel, *channel;
388 388
389 BUG_ON(!channel->rescind);
390 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); 389 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
391 390
391 /*
392 * Make sure channel is valid as we may have raced.
393 */
394 channel = relid2channel(relid);
395 if (!channel)
396 return;
397
398 BUG_ON(!channel->rescind);
392 if (channel->target_cpu != get_cpu()) { 399 if (channel->target_cpu != get_cpu()) {
393 put_cpu(); 400 put_cpu();
394 smp_call_function_single(channel->target_cpu, 401 smp_call_function_single(channel->target_cpu,
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
515 if (!fnew) { 522 if (!fnew) {
516 if (channel->sc_creation_callback != NULL) 523 if (channel->sc_creation_callback != NULL)
517 channel->sc_creation_callback(newchannel); 524 channel->sc_creation_callback(newchannel);
525 newchannel->probe_done = true;
518 return; 526 return;
519 } 527 }
520 528
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
834{ 842{
835 struct vmbus_channel_rescind_offer *rescind; 843 struct vmbus_channel_rescind_offer *rescind;
836 struct vmbus_channel *channel; 844 struct vmbus_channel *channel;
837 unsigned long flags;
838 struct device *dev; 845 struct device *dev;
839 846
840 rescind = (struct vmbus_channel_rescind_offer *)hdr; 847 rescind = (struct vmbus_channel_rescind_offer *)hdr;
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
873 return; 880 return;
874 } 881 }
875 882
876 spin_lock_irqsave(&channel->lock, flags);
877 channel->rescind = true;
878 spin_unlock_irqrestore(&channel->lock, flags);
879
880 /*
881 * Now that we have posted the rescind state, perform
882 * rescind related cleanup.
883 */
884 vmbus_rescind_cleanup(channel);
885
886 /* 883 /*
887 * Now wait for offer handling to complete. 884 * Now wait for offer handling to complete.
888 */ 885 */
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
901 if (channel->device_obj) { 898 if (channel->device_obj) {
902 if (channel->chn_rescind_callback) { 899 if (channel->chn_rescind_callback) {
903 channel->chn_rescind_callback(channel); 900 channel->chn_rescind_callback(channel);
901 vmbus_rescind_cleanup(channel);
904 return; 902 return;
905 } 903 }
906 /* 904 /*
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
909 */ 907 */
910 dev = get_device(&channel->device_obj->device); 908 dev = get_device(&channel->device_obj->device);
911 if (dev) { 909 if (dev) {
910 vmbus_rescind_cleanup(channel);
912 vmbus_device_unregister(channel->device_obj); 911 vmbus_device_unregister(channel->device_obj);
913 put_device(dev); 912 put_device(dev);
914 } 913 }
@@ -921,29 +920,28 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
921 * 1. Close all sub-channels first 920 * 1. Close all sub-channels first
922 * 2. Then close the primary channel. 921 * 2. Then close the primary channel.
923 */ 922 */
923 mutex_lock(&vmbus_connection.channel_mutex);
924 vmbus_rescind_cleanup(channel);
924 if (channel->state == CHANNEL_OPEN_STATE) { 925 if (channel->state == CHANNEL_OPEN_STATE) {
925 /* 926 /*
926 * The channel is currently not open; 927 * The channel is currently not open;
927 * it is safe for us to cleanup the channel. 928 * it is safe for us to cleanup the channel.
928 */ 929 */
929 mutex_lock(&vmbus_connection.channel_mutex); 930 hv_process_channel_removal(rescind->child_relid);
930 hv_process_channel_removal(channel,
931 channel->offermsg.child_relid);
932 mutex_unlock(&vmbus_connection.channel_mutex);
933 } 931 }
932 mutex_unlock(&vmbus_connection.channel_mutex);
934 } 933 }
935} 934}
936 935
937void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) 936void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
938{ 937{
939 mutex_lock(&vmbus_connection.channel_mutex);
940
941 BUG_ON(!is_hvsock_channel(channel)); 938 BUG_ON(!is_hvsock_channel(channel));
942 939
943 channel->rescind = true; 940 /* We always get a rescind msg when a connection is closed. */
944 vmbus_device_unregister(channel->device_obj); 941 while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
942 msleep(1);
945 943
946 mutex_unlock(&vmbus_connection.channel_mutex); 944 vmbus_device_unregister(channel->device_obj);
947} 945}
948EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); 946EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
949 947
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index daa75bd41f86..2364281d8593 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
170 out_src = smsg_out; 170 out_src = smsg_out;
171 break; 171 break;
172 172
173 case WRITE_TO_FILE:
174 out_src = fcopy_transaction.fcopy_msg;
175 out_len = sizeof(struct hv_do_fcopy);
176 break;
173 default: 177 default:
174 out_src = fcopy_transaction.fcopy_msg; 178 out_src = fcopy_transaction.fcopy_msg;
175 out_len = fcopy_transaction.recv_len; 179 out_len = fcopy_transaction.recv_len;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a9d49f6f6501..937801ac2fe0 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device)
768 struct vmbus_channel *channel = hv_dev->channel; 768 struct vmbus_channel *channel = hv_dev->channel;
769 769
770 mutex_lock(&vmbus_connection.channel_mutex); 770 mutex_lock(&vmbus_connection.channel_mutex);
771 hv_process_channel_removal(channel, 771 hv_process_channel_removal(channel->offermsg.child_relid);
772 channel->offermsg.child_relid);
773 mutex_unlock(&vmbus_connection.channel_mutex); 772 mutex_unlock(&vmbus_connection.channel_mutex);
774 kfree(hv_dev); 773 kfree(hv_dev);
775 774
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 97a62f5b9ea4..a973eb6a2890 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
477 /* disable touchscreen features */ 477 /* disable touchscreen features */
478 da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00); 478 da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
479 479
480 /* Sample every 1ms */
481 da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
482 DA9052_ADCCONT_ADCMODE,
483 DA9052_ADCCONT_ADCMODE);
484
480 err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, 485 err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
481 "tsiready-irq", da9052_tsi_datardy_irq, 486 "tsiready-irq", da9052_tsi_datardy_irq,
482 hwmon); 487 hwmon);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5eafbaada795..dfc40c740d07 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
268 return err; 268 return err;
269 } 269 }
270 270
271 tmp102->ready_time = jiffies; 271 /*
272 if (tmp102->config_orig & TMP102_CONF_SD) { 272 * Mark that we are not ready with data until the first
273 /* 273 * conversion is complete
274 * Mark that we are not ready with data until the first 274 */
275 * conversion is complete 275 tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
276 */
277 tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
278 }
279 276
280 hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, 277 hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
281 tmp102, 278 tmp102,
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 9c0dbb8191ad..e1be61095532 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
630 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, 630 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
631 GFP_KERNEL); 631 GFP_KERNEL);
632 if (rc) 632 if (rc)
633 goto out_mbox_free; 633 return -ENOMEM;
634 634
635 INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); 635 INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
636 636
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
646 if (IS_ERR(ctx->mbox_chan)) { 646 if (IS_ERR(ctx->mbox_chan)) {
647 dev_err(&pdev->dev, 647 dev_err(&pdev->dev,
648 "SLIMpro mailbox channel request failed\n"); 648 "SLIMpro mailbox channel request failed\n");
649 return -ENODEV; 649 rc = -ENODEV;
650 goto out_mbox_free;
650 } 651 }
651 } else { 652 } else {
652 struct acpi_pcct_hw_reduced *cppc_ss; 653 struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
654 if (device_property_read_u32(&pdev->dev, "pcc-channel", 655 if (device_property_read_u32(&pdev->dev, "pcc-channel",
655 &ctx->mbox_idx)) { 656 &ctx->mbox_idx)) {
656 dev_err(&pdev->dev, "no pcc-channel property\n"); 657 dev_err(&pdev->dev, "no pcc-channel property\n");
657 return -ENODEV; 658 rc = -ENODEV;
659 goto out_mbox_free;
658 } 660 }
659 661
660 cl->rx_callback = xgene_hwmon_pcc_rx_cb; 662 cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
662 if (IS_ERR(ctx->mbox_chan)) { 664 if (IS_ERR(ctx->mbox_chan)) {
663 dev_err(&pdev->dev, 665 dev_err(&pdev->dev,
664 "PPC channel request failed\n"); 666 "PPC channel request failed\n");
665 return -ENODEV; 667 rc = -ENODEV;
668 goto out_mbox_free;
666 } 669 }
667 670
668 /* 671 /*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
675 if (!cppc_ss) { 678 if (!cppc_ss) {
676 dev_err(&pdev->dev, "PPC subspace not found\n"); 679 dev_err(&pdev->dev, "PPC subspace not found\n");
677 rc = -ENODEV; 680 rc = -ENODEV;
678 goto out_mbox_free; 681 goto out;
679 } 682 }
680 683
681 if (!ctx->mbox_chan->mbox->txdone_irq) { 684 if (!ctx->mbox_chan->mbox->txdone_irq) {
682 dev_err(&pdev->dev, "PCC IRQ not supported\n"); 685 dev_err(&pdev->dev, "PCC IRQ not supported\n");
683 rc = -ENODEV; 686 rc = -ENODEV;
684 goto out_mbox_free; 687 goto out;
685 } 688 }
686 689
687 /* 690 /*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
696 } else { 699 } else {
697 dev_err(&pdev->dev, "Failed to get PCC comm region\n"); 700 dev_err(&pdev->dev, "Failed to get PCC comm region\n");
698 rc = -ENODEV; 701 rc = -ENODEV;
699 goto out_mbox_free; 702 goto out;
700 } 703 }
701 704
702 if (!ctx->pcc_comm_addr) { 705 if (!ctx->pcc_comm_addr) {
703 dev_err(&pdev->dev, 706 dev_err(&pdev->dev,
704 "Failed to ioremap PCC comm region\n"); 707 "Failed to ioremap PCC comm region\n");
705 rc = -ENOMEM; 708 rc = -ENOMEM;
706 goto out_mbox_free; 709 goto out;
707 } 710 }
708 711
709 /* 712 /*
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index bc9cebc30526..c2a2ce8ee541 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -144,6 +144,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
144 .driver_data = (kernel_ulong_t)0, 144 .driver_data = (kernel_ulong_t)0,
145 }, 145 },
146 { 146 {
147 /* Lewisburg PCH */
148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
149 .driver_data = (kernel_ulong_t)0,
150 },
151 {
147 /* Gemini Lake */ 152 /* Gemini Lake */
148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
149 .driver_data = (kernel_ulong_t)&intel_th_2x, 154 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
158 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), 163 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
159 .driver_data = (kernel_ulong_t)&intel_th_2x, 164 .driver_data = (kernel_ulong_t)&intel_th_2x,
160 }, 165 },
166 {
167 /* Cedar Fork PCH */
168 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
169 .driver_data = (kernel_ulong_t)&intel_th_2x,
170 },
161 { 0 }, 171 { 0 },
162}; 172};
163 173
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 9414900575d8..f129869e05a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
1119 1119
1120 stm_source_link_drop(src); 1120 stm_source_link_drop(src);
1121 1121
1122 device_destroy(&stm_source_class, src->dev.devt); 1122 device_unregister(&src->dev);
1123} 1123}
1124EXPORT_SYMBOL_GPL(stm_source_unregister_device); 1124EXPORT_SYMBOL_GPL(stm_source_unregister_device);
1125 1125
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c06dce2c1da7..45a3f3ca29b3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
131 Gemini Lake (SOC) 131 Gemini Lake (SOC)
132 Cannon Lake-H (PCH) 132 Cannon Lake-H (PCH)
133 Cannon Lake-LP (PCH) 133 Cannon Lake-LP (PCH)
134 Cedar Fork (PCH)
134 135
135 This driver can also be built as a module. If so, the module 136 This driver can also be built as a module. If so, the module
136 will be called i2c-i801. 137 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e114e4e00d29..9e12a53ef7b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
68 * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes 68 * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
69 * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes 69 * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
70 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes 70 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
71 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
71 * 72 *
72 * Features supported by this driver: 73 * Features supported by this driver:
73 * Software PEC no 74 * Software PEC no
@@ -204,6 +205,7 @@
204 205
205/* Older devices have their ID defined in <linux/pci_ids.h> */ 206/* Older devices have their ID defined in <linux/pci_ids.h> */
206#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 207#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
208#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 210#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
209#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 211#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
1025 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, 1027 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
1026 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, 1028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
1027 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 1029 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
1030 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
1028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 1031 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
1029 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 1032 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
1030 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, 1033 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1513 case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: 1516 case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
1514 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: 1517 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1515 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: 1518 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1519 case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
1516 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1520 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1517 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1521 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1518 priv->features |= FEATURE_I2C_BLOCK_READ; 1522 priv->features |= FEATURE_I2C_BLOCK_READ;
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 84fb35f6837f..eb1d91b986fd 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = {
1459}; 1459};
1460module_platform_driver(img_scb_i2c_driver); 1460module_platform_driver(img_scb_i2c_driver);
1461 1461
1462MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); 1462MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
1463MODULE_DESCRIPTION("IMG host I2C driver"); 1463MODULE_DESCRIPTION("IMG host I2C driver");
1464MODULE_LICENSE("GPL v2"); 1464MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 54a47b40546f..f96830ffd9f1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1021 } 1021 }
1022 1022
1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", 1023 dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
1024 rinfo->sda_gpio, rinfo->scl_gpio); 1024 rinfo->scl_gpio, rinfo->sda_gpio);
1025 1025
1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery; 1026 rinfo->prepare_recovery = i2c_imx_prepare_recovery;
1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; 1027 rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
1100 } 1100 }
1101 1101
1102 /* Request IRQ */ 1102 /* Request IRQ */
1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, 1103 ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
1104 pdev->name, i2c_imx); 1104 pdev->name, i2c_imx);
1105 if (ret) { 1105 if (ret) {
1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq); 1106 dev_err(&pdev->dev, "can't claim irq %d\n", irq);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 22ffcb73c185..b51adffa4841 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
340 data->word = dma_buffer[0] | (dma_buffer[1] << 8); 340 data->word = dma_buffer[0] | (dma_buffer[1] << 8);
341 break; 341 break;
342 case I2C_SMBUS_BLOCK_DATA: 342 case I2C_SMBUS_BLOCK_DATA:
343 case I2C_SMBUS_I2C_BLOCK_DATA:
344 if (desc->rxbytes != dma_buffer[0] + 1) 343 if (desc->rxbytes != dma_buffer[0] + 1)
345 return -EMSGSIZE; 344 return -EMSGSIZE;
346 345
347 memcpy(data->block, dma_buffer, desc->rxbytes); 346 memcpy(data->block, dma_buffer, desc->rxbytes);
348 break; 347 break;
348 case I2C_SMBUS_I2C_BLOCK_DATA:
349 memcpy(&data->block[1], dma_buffer, desc->rxbytes);
350 data->block[0] = desc->rxbytes;
351 break;
349 } 352 }
350 return 0; 353 return 0;
351 } 354 }
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1ebb5e947e0b..23c2ea2baedc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
360 unsigned long fclk_rate = 12000000; 360 unsigned long fclk_rate = 12000000;
361 unsigned long internal_clk = 0; 361 unsigned long internal_clk = 0;
362 struct clk *fclk; 362 struct clk *fclk;
363 int error;
363 364
364 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { 365 if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
365 /* 366 /*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
378 * do this bit unconditionally. 379 * do this bit unconditionally.
379 */ 380 */
380 fclk = clk_get(omap->dev, "fck"); 381 fclk = clk_get(omap->dev, "fck");
382 if (IS_ERR(fclk)) {
383 error = PTR_ERR(fclk);
384 dev_err(omap->dev, "could not get fck: %i\n", error);
385
386 return error;
387 }
388
381 fclk_rate = clk_get_rate(fclk); 389 fclk_rate = clk_get_rate(fclk);
382 clk_put(fclk); 390 clk_put(fclk);
383 391
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
410 else 418 else
411 internal_clk = 4000; 419 internal_clk = 4000;
412 fclk = clk_get(omap->dev, "fck"); 420 fclk = clk_get(omap->dev, "fck");
421 if (IS_ERR(fclk)) {
422 error = PTR_ERR(fclk);
423 dev_err(omap->dev, "could not get fck: %i\n", error);
424
425 return error;
426 }
413 fclk_rate = clk_get_rate(fclk) / 1000; 427 fclk_rate = clk_get_rate(fclk) / 1000;
414 clk_put(fclk); 428 clk_put(fclk);
415 429
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0ecdb47a23ab..174579d32e5f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,6 +85,9 @@
85/* SB800 constants */ 85/* SB800 constants */
86#define SB800_PIIX4_SMB_IDX 0xcd6 86#define SB800_PIIX4_SMB_IDX 0xcd6
87 87
88#define KERNCZ_IMC_IDX 0x3e
89#define KERNCZ_IMC_DATA 0x3f
90
88/* 91/*
89 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) 92 * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
90 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. 93 * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
@@ -94,6 +97,12 @@
94#define SB800_PIIX4_PORT_IDX_ALT 0x2e 97#define SB800_PIIX4_PORT_IDX_ALT 0x2e
95#define SB800_PIIX4_PORT_IDX_SEL 0x2f 98#define SB800_PIIX4_PORT_IDX_SEL 0x2f
96#define SB800_PIIX4_PORT_IDX_MASK 0x06 99#define SB800_PIIX4_PORT_IDX_MASK 0x06
100#define SB800_PIIX4_PORT_IDX_SHIFT 1
101
102/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
103#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
104#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
105#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
97 106
98/* insmod parameters */ 107/* insmod parameters */
99 108
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
149 */ 158 */
150static DEFINE_MUTEX(piix4_mutex_sb800); 159static DEFINE_MUTEX(piix4_mutex_sb800);
151static u8 piix4_port_sel_sb800; 160static u8 piix4_port_sel_sb800;
161static u8 piix4_port_mask_sb800;
162static u8 piix4_port_shift_sb800;
152static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { 163static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
153 " port 0", " port 2", " port 3", " port 4" 164 " port 0", " port 2", " port 3", " port 4"
154}; 165};
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
159 170
160 /* SB800 */ 171 /* SB800 */
161 bool sb800_main; 172 bool sb800_main;
173 bool notify_imc;
162 u8 port; /* Port number, shifted */ 174 u8 port; /* Port number, shifted */
163}; 175};
164 176
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
347 359
348 /* Find which register is used for port selection */ 360 /* Find which register is used for port selection */
349 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { 361 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
350 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 362 switch (PIIX4_dev->device) {
363 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
367 break;
368 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
369 default:
370 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
371 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
372 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
373 break;
374 }
351 } else { 375 } else {
352 mutex_lock(&piix4_mutex_sb800); 376 mutex_lock(&piix4_mutex_sb800);
353 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); 377 outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
355 piix4_port_sel_sb800 = (port_sel & 0x01) ? 379 piix4_port_sel_sb800 = (port_sel & 0x01) ?
356 SB800_PIIX4_PORT_IDX_ALT : 380 SB800_PIIX4_PORT_IDX_ALT :
357 SB800_PIIX4_PORT_IDX; 381 SB800_PIIX4_PORT_IDX;
382 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
383 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
358 mutex_unlock(&piix4_mutex_sb800); 384 mutex_unlock(&piix4_mutex_sb800);
359 } 385 }
360 386
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
572 return 0; 598 return 0;
573} 599}
574 600
601static uint8_t piix4_imc_read(uint8_t idx)
602{
603 outb_p(idx, KERNCZ_IMC_IDX);
604 return inb_p(KERNCZ_IMC_DATA);
605}
606
607static void piix4_imc_write(uint8_t idx, uint8_t value)
608{
609 outb_p(idx, KERNCZ_IMC_IDX);
610 outb_p(value, KERNCZ_IMC_DATA);
611}
612
613static int piix4_imc_sleep(void)
614{
615 int timeout = MAX_TIMEOUT;
616
617 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
618 return -EBUSY;
619
620 /* clear response register */
621 piix4_imc_write(0x82, 0x00);
622 /* request ownership flag */
623 piix4_imc_write(0x83, 0xB4);
624 /* kick off IMC Mailbox command 96 */
625 piix4_imc_write(0x80, 0x96);
626
627 while (timeout--) {
628 if (piix4_imc_read(0x82) == 0xfa) {
629 release_region(KERNCZ_IMC_IDX, 2);
630 return 0;
631 }
632 usleep_range(1000, 2000);
633 }
634
635 release_region(KERNCZ_IMC_IDX, 2);
636 return -ETIMEDOUT;
637}
638
639static void piix4_imc_wakeup(void)
640{
641 int timeout = MAX_TIMEOUT;
642
643 if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
644 return;
645
646 /* clear response register */
647 piix4_imc_write(0x82, 0x00);
648 /* release ownership flag */
649 piix4_imc_write(0x83, 0xB5);
650 /* kick off IMC Mailbox command 96 */
651 piix4_imc_write(0x80, 0x96);
652
653 while (timeout--) {
654 if (piix4_imc_read(0x82) == 0xfa)
655 break;
656 usleep_range(1000, 2000);
657 }
658
659 release_region(KERNCZ_IMC_IDX, 2);
660}
661
575/* 662/*
576 * Handles access to multiple SMBus ports on the SB800. 663 * Handles access to multiple SMBus ports on the SB800.
577 * The port is selected by bits 2:1 of the smb_en register (0x2c). 664 * The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
612 return -EBUSY; 699 return -EBUSY;
613 } 700 }
614 701
702 /*
703 * Notify the IMC (Integrated Micro Controller) if required.
704 * Among other responsibilities, the IMC is in charge of monitoring
705 * the System fans and temperature sensors, and act accordingly.
706 * All this is done through SMBus and can/will collide
707 * with our transactions if they are long (BLOCK_DATA).
708 * Therefore we need to request the ownership flag during those
709 * transactions.
710 */
711 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
712 int ret;
713
714 ret = piix4_imc_sleep();
715 switch (ret) {
716 case -EBUSY:
717 dev_warn(&adap->dev,
718 "IMC base address index region 0x%x already in use.\n",
719 KERNCZ_IMC_IDX);
720 break;
721 case -ETIMEDOUT:
722 dev_warn(&adap->dev,
723 "Failed to communicate with the IMC.\n");
724 break;
725 default:
726 break;
727 }
728
729 /* If IMC communication fails do not retry */
730 if (ret) {
731 dev_warn(&adap->dev,
732 "Continuing without IMC notification.\n");
733 adapdata->notify_imc = false;
734 }
735 }
736
615 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); 737 outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
616 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); 738 smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
617 739
618 port = adapdata->port; 740 port = adapdata->port;
619 if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) 741 if ((smba_en_lo & piix4_port_mask_sb800) != port)
620 outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, 742 outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
621 SB800_PIIX4_SMB_IDX + 1); 743 SB800_PIIX4_SMB_IDX + 1);
622 744
623 retval = piix4_access(adap, addr, flags, read_write, 745 retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
628 /* Release the semaphore */ 750 /* Release the semaphore */
629 outb_p(smbslvcnt | 0x20, SMBSLVCNT); 751 outb_p(smbslvcnt | 0x20, SMBSLVCNT);
630 752
753 if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
754 piix4_imc_wakeup();
755
631 mutex_unlock(&piix4_mutex_sb800); 756 mutex_unlock(&piix4_mutex_sb800);
632 757
633 return retval; 758 return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
679static struct i2c_adapter *piix4_aux_adapter; 804static struct i2c_adapter *piix4_aux_adapter;
680 805
681static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, 806static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
682 bool sb800_main, u8 port, 807 bool sb800_main, u8 port, bool notify_imc,
683 const char *name, struct i2c_adapter **padap) 808 const char *name, struct i2c_adapter **padap)
684{ 809{
685 struct i2c_adapter *adap; 810 struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
706 831
707 adapdata->smba = smba; 832 adapdata->smba = smba;
708 adapdata->sb800_main = sb800_main; 833 adapdata->sb800_main = sb800_main;
709 adapdata->port = port << 1; 834 adapdata->port = port << piix4_port_shift_sb800;
835 adapdata->notify_imc = notify_imc;
710 836
711 /* set up the sysfs linkage to our parent device */ 837 /* set up the sysfs linkage to our parent device */
712 adap->dev.parent = &dev->dev; 838 adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
728 return 0; 854 return 0;
729} 855}
730 856
731static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) 857static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
858 bool notify_imc)
732{ 859{
733 struct i2c_piix4_adapdata *adapdata; 860 struct i2c_piix4_adapdata *adapdata;
734 int port; 861 int port;
735 int retval; 862 int retval;
736 863
737 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { 864 for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
738 retval = piix4_add_adapter(dev, smba, true, port, 865 retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
739 piix4_main_port_names_sb800[port], 866 piix4_main_port_names_sb800[port],
740 &piix4_main_adapters[port]); 867 &piix4_main_adapters[port]);
741 if (retval < 0) 868 if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
769 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && 896 dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
770 dev->revision >= 0x40) || 897 dev->revision >= 0x40) ||
771 dev->vendor == PCI_VENDOR_ID_AMD) { 898 dev->vendor == PCI_VENDOR_ID_AMD) {
899 bool notify_imc = false;
772 is_sb800 = true; 900 is_sb800 = true;
773 901
774 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { 902 if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
778 return -EBUSY; 906 return -EBUSY;
779 } 907 }
780 908
909 if (dev->vendor == PCI_VENDOR_ID_AMD &&
910 dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
911 u8 imc;
912
913 /*
914 * Detect if IMC is active or not, this method is
915 * described on coreboot's AMD IMC notes
916 */
917 pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
918 0x40, &imc);
919 if (imc & 0x80)
920 notify_imc = true;
921 }
922
781 /* base address location etc changed in SB800 */ 923 /* base address location etc changed in SB800 */
782 retval = piix4_setup_sb800(dev, id, 0); 924 retval = piix4_setup_sb800(dev, id, 0);
783 if (retval < 0) { 925 if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
789 * Try to register multiplexed main SMBus adapter, 931 * Try to register multiplexed main SMBus adapter,
790 * give up if we can't 932 * give up if we can't
791 */ 933 */
792 retval = piix4_add_adapters_sb800(dev, retval); 934 retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
793 if (retval < 0) { 935 if (retval < 0) {
794 release_region(SB800_PIIX4_SMB_IDX, 2); 936 release_region(SB800_PIIX4_SMB_IDX, 2);
795 return retval; 937 return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
800 return retval; 942 return retval;
801 943
802 /* Try to register main SMBus adapter, give up if we can't */ 944 /* Try to register main SMBus adapter, give up if we can't */
803 retval = piix4_add_adapter(dev, retval, false, 0, "", 945 retval = piix4_add_adapter(dev, retval, false, 0, false, "",
804 &piix4_main_adapters[0]); 946 &piix4_main_adapters[0]);
805 if (retval < 0) 947 if (retval < 0)
806 return retval; 948 return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
827 if (retval > 0) { 969 if (retval > 0) {
828 /* Try to add the aux adapter if it exists, 970 /* Try to add the aux adapter if it exists,
829 * piix4_add_adapter will clean up if this fails */ 971 * piix4_add_adapter will clean up if this fails */
830 piix4_add_adapter(dev, retval, false, 0, 972 piix4_add_adapter(dev, retval, false, 0, false,
831 is_sb800 ? piix4_aux_port_name_sb800 : "", 973 is_sb800 ? piix4_aux_port_name_sb800 : "",
832 &piix4_aux_adapter); 974 &piix4_aux_adapter);
833 } 975 }
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 22e08ae1704f..25fcc3c1e32b 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
627 627
628static const struct of_device_id sprd_i2c_of_match[] = { 628static const struct of_device_id sprd_i2c_of_match[] = {
629 { .compatible = "sprd,sc9860-i2c", }, 629 { .compatible = "sprd,sc9860-i2c", },
630 {},
630}; 631};
631 632
632static struct platform_driver sprd_i2c_driver = { 633static struct platform_driver sprd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 47c67b0ca896..d4a6e9c2e9aa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
215 unsigned int msg_num; 215 unsigned int msg_num;
216 unsigned int msg_id; 216 unsigned int msg_id;
217 struct stm32f7_i2c_msg f7_msg; 217 struct stm32f7_i2c_msg f7_msg;
218 struct stm32f7_i2c_setup *setup; 218 struct stm32f7_i2c_setup setup;
219 struct stm32f7_i2c_timings timing; 219 struct stm32f7_i2c_timings timing;
220}; 220};
221 221
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
265 }, 265 },
266}; 266};
267 267
268struct stm32f7_i2c_setup stm32f7_setup = { 268static const struct stm32f7_i2c_setup stm32f7_setup = {
269 .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, 269 .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
270 .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, 270 .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
271 .dnf = STM32F7_I2C_DNF_DEFAULT, 271 .dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
537 writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); 537 writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
538 538
539 /* Enable I2C */ 539 /* Enable I2C */
540 if (i2c_dev->setup->analog_filter) 540 if (i2c_dev->setup.analog_filter)
541 stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, 541 stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
542 STM32F7_I2C_CR1_ANFOFF); 542 STM32F7_I2C_CR1_ANFOFF);
543 else 543 else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
887 } 887 }
888 888
889 setup = of_device_get_match_data(&pdev->dev); 889 setup = of_device_get_match_data(&pdev->dev);
890 i2c_dev->setup->rise_time = setup->rise_time; 890 i2c_dev->setup = *setup;
891 i2c_dev->setup->fall_time = setup->fall_time;
892 i2c_dev->setup->dnf = setup->dnf;
893 i2c_dev->setup->analog_filter = setup->analog_filter;
894 891
895 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", 892 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
896 &rise_time); 893 &rise_time);
897 if (!ret) 894 if (!ret)
898 i2c_dev->setup->rise_time = rise_time; 895 i2c_dev->setup.rise_time = rise_time;
899 896
900 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", 897 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
901 &fall_time); 898 &fall_time);
902 if (!ret) 899 if (!ret)
903 i2c_dev->setup->fall_time = fall_time; 900 i2c_dev->setup.fall_time = fall_time;
904 901
905 ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); 902 ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
906 if (ret) 903 if (ret)
907 goto clk_free; 904 goto clk_free;
908 905
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 81e18f9628d0..a7355ab3bb22 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1328 unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
1329 struct scsi_request *req = scsi_req(rq); 1329 struct scsi_request *req = scsi_req(rq);
1330 1330
1331 scsi_req_init(req);
1331 memset(req->cmd, 0, BLK_MAX_CDB); 1332 memset(req->cmd, 0, BLK_MAX_CDB);
1332 1333
1333 if (rq_data_dir(rq) == READ) 1334 if (rq_data_dir(rq) == READ)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 01b2adfd8226..eaf39e5db08b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1451 if (hwif_init(hwif) == 0) { 1451 if (hwif_init(hwif) == 0) {
1452 printk(KERN_INFO "%s: failed to initialize IDE " 1452 printk(KERN_INFO "%s: failed to initialize IDE "
1453 "interface\n", hwif->name); 1453 "interface\n", hwif->name);
1454 device_unregister(hwif->portdev);
1454 device_unregister(&hwif->gendev); 1455 device_unregister(&hwif->gendev);
1455 ide_disable_port(hwif); 1456 ide_disable_port(hwif);
1456 continue; 1457 continue;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 86aa88aeb3a6..acf874800ca4 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
56{ 56{
57 struct list_head *l; 57 struct list_head *l;
58 struct pci_driver *d; 58 struct pci_driver *d;
59 int ret;
59 60
60 list_for_each(l, &ide_pci_drivers) { 61 list_for_each(l, &ide_pci_drivers) {
61 d = list_entry(l, struct pci_driver, node); 62 d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
63 const struct pci_device_id *id = 64 const struct pci_device_id *id =
64 pci_match_id(d->id_table, dev); 65 pci_match_id(d->id_table, dev);
65 66
66 if (id != NULL && d->probe(dev, id) >= 0) { 67 if (id != NULL) {
67 dev->driver = d; 68 pci_assign_irq(dev);
68 pci_dev_get(dev); 69 ret = d->probe(dev, id);
69 return 1; 70 if (ret >= 0) {
71 dev->driver = d;
72 pci_dev_get(dev);
73 return 1;
74 }
70 } 75 }
71 } 76 }
72 } 77 }
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 112d2fe1bcdb..fdc8e813170c 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
179/** 179/**
180 * ide_pci_enable - do PCI enables 180 * ide_pci_enable - do PCI enables
181 * @dev: PCI device 181 * @dev: PCI device
182 * @bars: PCI BARs mask
182 * @d: IDE port info 183 * @d: IDE port info
183 * 184 *
184 * Enable the IDE PCI device. We attempt to enable the device in full 185 * Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
189 * Returns zero on success or an error code 190 * Returns zero on success or an error code
190 */ 191 */
191 192
192static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) 193static int ide_pci_enable(struct pci_dev *dev, int bars,
194 const struct ide_port_info *d)
193{ 195{
194 int ret, bars; 196 int ret;
195 197
196 if (pci_enable_device(dev)) { 198 if (pci_enable_device(dev)) {
197 ret = pci_enable_device_io(dev); 199 ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
216 goto out; 218 goto out;
217 } 219 }
218 220
219 if (d->host_flags & IDE_HFLAG_SINGLE)
220 bars = (1 << 2) - 1;
221 else
222 bars = (1 << 4) - 1;
223
224 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
225 if (d->host_flags & IDE_HFLAG_CS5520)
226 bars |= (1 << 2);
227 else
228 bars |= (1 << 4);
229 }
230
231 ret = pci_request_selected_regions(dev, bars, d->name); 221 ret = pci_request_selected_regions(dev, bars, d->name);
232 if (ret < 0) 222 if (ret < 0)
233 printk(KERN_ERR "%s %s: can't reserve resources\n", 223 printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
403/** 393/**
404 * ide_setup_pci_controller - set up IDE PCI 394 * ide_setup_pci_controller - set up IDE PCI
405 * @dev: PCI device 395 * @dev: PCI device
396 * @bars: PCI BARs mask
406 * @d: IDE port info 397 * @d: IDE port info
407 * @noisy: verbose flag 398 * @noisy: verbose flag
408 * 399 *
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
411 * and enables it if need be 402 * and enables it if need be
412 */ 403 */
413 404
414static int ide_setup_pci_controller(struct pci_dev *dev, 405static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
415 const struct ide_port_info *d, int noisy) 406 const struct ide_port_info *d, int noisy)
416{ 407{
417 int ret; 408 int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
420 if (noisy) 411 if (noisy)
421 ide_setup_pci_noise(dev, d); 412 ide_setup_pci_noise(dev, d);
422 413
423 ret = ide_pci_enable(dev, d); 414 ret = ide_pci_enable(dev, bars, d);
424 if (ret < 0) 415 if (ret < 0)
425 goto out; 416 goto out;
426 417
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
428 if (ret < 0) { 419 if (ret < 0) {
429 printk(KERN_ERR "%s %s: error accessing PCI regs\n", 420 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
430 d->name, pci_name(dev)); 421 d->name, pci_name(dev));
431 goto out; 422 goto out_free_bars;
432 } 423 }
433 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ 424 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
434 ret = ide_pci_configure(dev, d); 425 ret = ide_pci_configure(dev, d);
435 if (ret < 0) 426 if (ret < 0)
436 goto out; 427 goto out_free_bars;
437 printk(KERN_INFO "%s %s: device enabled (Linux)\n", 428 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
438 d->name, pci_name(dev)); 429 d->name, pci_name(dev));
439 } 430 }
440 431
432 goto out;
433
434out_free_bars:
435 pci_release_selected_regions(dev, bars);
441out: 436out:
442 return ret; 437 return ret;
443} 438}
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
540{ 535{
541 struct pci_dev *pdev[] = { dev1, dev2 }; 536 struct pci_dev *pdev[] = { dev1, dev2 };
542 struct ide_host *host; 537 struct ide_host *host;
543 int ret, i, n_ports = dev2 ? 4 : 2; 538 int ret, i, n_ports = dev2 ? 4 : 2, bars;
544 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 539 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
545 540
541 if (d->host_flags & IDE_HFLAG_SINGLE)
542 bars = (1 << 2) - 1;
543 else
544 bars = (1 << 4) - 1;
545
546 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
547 if (d->host_flags & IDE_HFLAG_CS5520)
548 bars |= (1 << 2);
549 else
550 bars |= (1 << 4);
551 }
552
546 for (i = 0; i < n_ports / 2; i++) { 553 for (i = 0; i < n_ports / 2; i++) {
547 ret = ide_setup_pci_controller(pdev[i], d, !i); 554 ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
548 if (ret < 0) 555 if (ret < 0) {
556 if (i == 1)
557 pci_release_selected_regions(pdev[0], bars);
549 goto out; 558 goto out;
559 }
550 560
551 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); 561 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
552 } 562 }
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
554 host = ide_host_alloc(d, hws, n_ports); 564 host = ide_host_alloc(d, hws, n_ports);
555 if (host == NULL) { 565 if (host == NULL) {
556 ret = -ENOMEM; 566 ret = -ENOMEM;
557 goto out; 567 goto out_free_bars;
558 } 568 }
559 569
560 host->dev[0] = &dev1->dev; 570 host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
576 * do_ide_setup_pci_device() on the first device! 586 * do_ide_setup_pci_device() on the first device!
577 */ 587 */
578 if (ret < 0) 588 if (ret < 0)
579 goto out; 589 goto out_free_bars;
580 590
581 /* fixup IRQ */ 591 /* fixup IRQ */
582 if (ide_pci_is_in_compatibility_mode(pdev[i])) { 592 if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
589 ret = ide_host_register(host, d, hws); 599 ret = ide_host_register(host, d, hws);
590 if (ret) 600 if (ret)
591 ide_host_free(host); 601 ide_host_free(host);
602 else
603 goto out;
604
605out_free_bars:
606 i = n_ports / 2;
607 while (i--)
608 pci_release_selected_regions(pdev[i], bars);
592out: 609out:
593 return ret; 610 return ret;
594} 611}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 57625653fcb6..1d13bf03c758 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -243,6 +243,8 @@ config DA9150_GPADC
243config DLN2_ADC 243config DLN2_ADC
244 tristate "Diolan DLN-2 ADC driver support" 244 tristate "Diolan DLN-2 ADC driver support"
245 depends on MFD_DLN2 245 depends on MFD_DLN2
246 select IIO_BUFFER
247 select IIO_TRIGGERED_BUFFER
246 help 248 help
247 Say yes here to build support for Diolan DLN-2 ADC. 249 Say yes here to build support for Diolan DLN-2 ADC.
248 250
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index e6706a09e100..47c3d7f32900 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
257 unsigned int vref_mv) 257 unsigned int vref_mv)
258{ 258{
259 struct ad7793_state *st = iio_priv(indio_dev); 259 struct ad7793_state *st = iio_priv(indio_dev);
260 int i, ret = -1; 260 int i, ret;
261 unsigned long long scale_uv; 261 unsigned long long scale_uv;
262 u32 id; 262 u32 id;
263 263
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
266 return ret; 266 return ret;
267 267
268 /* reset the serial interface */ 268 /* reset the serial interface */
269 ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret)); 269 ret = ad_sd_reset(&st->sd, 32);
270 if (ret < 0) 270 if (ret < 0)
271 goto out; 271 goto out;
272 usleep_range(500, 2000); /* Wait for at least 500us */ 272 usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d10bd0c97233..22c4c17cd996 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -177,6 +177,34 @@ out:
177} 177}
178EXPORT_SYMBOL_GPL(ad_sd_read_reg); 178EXPORT_SYMBOL_GPL(ad_sd_read_reg);
179 179
180/**
181 * ad_sd_reset() - Reset the serial interface
182 *
183 * @sigma_delta: The sigma delta device
184 * @reset_length: Number of SCLKs with DIN = 1
185 *
186 * Returns 0 on success, an error code otherwise.
187 **/
188int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
189 unsigned int reset_length)
190{
191 uint8_t *buf;
192 unsigned int size;
193 int ret;
194
195 size = DIV_ROUND_UP(reset_length, 8);
196 buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
197 if (!buf)
198 return -ENOMEM;
199
200 memset(buf, 0xff, size);
201 ret = spi_write(sigma_delta->spi, buf, size);
202 kfree(buf);
203
204 return ret;
205}
206EXPORT_SYMBOL_GPL(ad_sd_reset);
207
180static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, 208static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
181 unsigned int mode, unsigned int channel) 209 unsigned int mode, unsigned int channel)
182{ 210{
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index bc5b38e3a147..a70ef7fec95f 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -225,6 +225,7 @@ struct at91_adc_trigger {
225 char *name; 225 char *name;
226 unsigned int trgmod_value; 226 unsigned int trgmod_value;
227 unsigned int edge_type; 227 unsigned int edge_type;
228 bool hw_trig;
228}; 229};
229 230
230struct at91_adc_state { 231struct at91_adc_state {
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
254 .name = "external_rising", 255 .name = "external_rising",
255 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE, 256 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
256 .edge_type = IRQ_TYPE_EDGE_RISING, 257 .edge_type = IRQ_TYPE_EDGE_RISING,
258 .hw_trig = true,
257 }, 259 },
258 { 260 {
259 .name = "external_falling", 261 .name = "external_falling",
260 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL, 262 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
261 .edge_type = IRQ_TYPE_EDGE_FALLING, 263 .edge_type = IRQ_TYPE_EDGE_FALLING,
264 .hw_trig = true,
262 }, 265 },
263 { 266 {
264 .name = "external_any", 267 .name = "external_any",
265 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY, 268 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
266 .edge_type = IRQ_TYPE_EDGE_BOTH, 269 .edge_type = IRQ_TYPE_EDGE_BOTH,
270 .hw_trig = true,
271 },
272 {
273 .name = "software",
274 .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
275 .edge_type = IRQ_TYPE_NONE,
276 .hw_trig = false,
267 }, 277 },
268}; 278};
269 279
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev)
597 struct at91_adc_state *st; 607 struct at91_adc_state *st;
598 struct resource *res; 608 struct resource *res;
599 int ret, i; 609 int ret, i;
600 u32 edge_type; 610 u32 edge_type = IRQ_TYPE_NONE;
601 611
602 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); 612 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
603 if (!indio_dev) 613 if (!indio_dev)
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev)
641 ret = of_property_read_u32(pdev->dev.of_node, 651 ret = of_property_read_u32(pdev->dev.of_node,
642 "atmel,trigger-edge-type", &edge_type); 652 "atmel,trigger-edge-type", &edge_type);
643 if (ret) { 653 if (ret) {
644 dev_err(&pdev->dev, 654 dev_dbg(&pdev->dev,
645 "invalid or missing value for atmel,trigger-edge-type\n"); 655 "atmel,trigger-edge-type not specified, only software trigger available\n");
646 return ret;
647 } 656 }
648 657
649 st->selected_trig = NULL; 658 st->selected_trig = NULL;
650 659
651 for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++) 660 /* find the right trigger, or no trigger at all */
661 for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
652 if (at91_adc_trigger_list[i].edge_type == edge_type) { 662 if (at91_adc_trigger_list[i].edge_type == edge_type) {
653 st->selected_trig = &at91_adc_trigger_list[i]; 663 st->selected_trig = &at91_adc_trigger_list[i];
654 break; 664 break;
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev)
717 727
718 platform_set_drvdata(pdev, indio_dev); 728 platform_set_drvdata(pdev, indio_dev);
719 729
720 ret = at91_adc_buffer_init(indio_dev); 730 if (st->selected_trig->hw_trig) {
721 if (ret < 0) { 731 ret = at91_adc_buffer_init(indio_dev);
722 dev_err(&pdev->dev, "couldn't initialize the buffer.\n"); 732 if (ret < 0) {
723 goto per_clk_disable_unprepare; 733 dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
724 } 734 goto per_clk_disable_unprepare;
735 }
725 736
726 ret = at91_adc_trigger_init(indio_dev); 737 ret = at91_adc_trigger_init(indio_dev);
727 if (ret < 0) { 738 if (ret < 0) {
728 dev_err(&pdev->dev, "couldn't setup the triggers.\n"); 739 dev_err(&pdev->dev, "couldn't setup the triggers.\n");
729 goto per_clk_disable_unprepare; 740 goto per_clk_disable_unprepare;
741 }
730 } 742 }
731 743
732 ret = iio_device_register(indio_dev); 744 ret = iio_device_register(indio_dev);
733 if (ret < 0) 745 if (ret < 0)
734 goto per_clk_disable_unprepare; 746 goto per_clk_disable_unprepare;
735 747
736 dev_info(&pdev->dev, "setting up trigger as %s\n", 748 if (st->selected_trig->hw_trig)
737 st->selected_trig->name); 749 dev_info(&pdev->dev, "setting up trigger as %s\n",
750 st->selected_trig->name);
738 751
739 dev_info(&pdev->dev, "version: %x\n", 752 dev_info(&pdev->dev, "version: %x\n",
740 readl_relaxed(st->base + AT91_SAMA5D2_VERSION)); 753 readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 634717ae12f3..071dd23a33d9 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -17,6 +17,8 @@
17 * MCP3204 17 * MCP3204
18 * MCP3208 18 * MCP3208
19 * ------------ 19 * ------------
20 * 13 bit converter
21 * MCP3301
20 * 22 *
21 * Datasheet can be found here: 23 * Datasheet can be found here:
22 * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001 24 * http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
96} 98}
97 99
98static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel, 100static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
99 bool differential, int device_index) 101 bool differential, int device_index, int *val)
100{ 102{
101 int ret; 103 int ret;
102 104
@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
117 119
118 switch (device_index) { 120 switch (device_index) {
119 case mcp3001: 121 case mcp3001:
120 return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3); 122 *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
123 return 0;
121 case mcp3002: 124 case mcp3002:
122 case mcp3004: 125 case mcp3004:
123 case mcp3008: 126 case mcp3008:
124 return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6); 127 *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
128 return 0;
125 case mcp3201: 129 case mcp3201:
126 return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1); 130 *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
131 return 0;
127 case mcp3202: 132 case mcp3202:
128 case mcp3204: 133 case mcp3204:
129 case mcp3208: 134 case mcp3208:
130 return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4); 135 *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
136 return 0;
131 case mcp3301: 137 case mcp3301:
132 return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12); 138 *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
139 | adc->rx_buf[1], 12);
140 return 0;
133 default: 141 default:
134 return -EINVAL; 142 return -EINVAL;
135 } 143 }
@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
150 switch (mask) { 158 switch (mask) {
151 case IIO_CHAN_INFO_RAW: 159 case IIO_CHAN_INFO_RAW:
152 ret = mcp320x_adc_conversion(adc, channel->address, 160 ret = mcp320x_adc_conversion(adc, channel->address,
153 channel->differential, device_index); 161 channel->differential, device_index, val);
154
155 if (ret < 0) 162 if (ret < 0)
156 goto out; 163 goto out;
157 164
158 *val = ret;
159 ret = IIO_VAL_INT; 165 ret = IIO_VAL_INT;
160 break; 166 break;
161 167
@@ -312,6 +318,7 @@ static int mcp320x_probe(struct spi_device *spi)
312 indio_dev->name = spi_get_device_id(spi)->name; 318 indio_dev->name = spi_get_device_id(spi)->name;
313 indio_dev->modes = INDIO_DIRECT_MODE; 319 indio_dev->modes = INDIO_DIRECT_MODE;
314 indio_dev->info = &mcp320x_info; 320 indio_dev->info = &mcp320x_info;
321 spi_set_drvdata(spi, indio_dev);
315 322
316 chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data]; 323 chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
317 indio_dev->channels = chip_info->channels; 324 indio_dev->channels = chip_info->channels;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index e3c15f88075f..4df32cf1650e 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1666,7 +1666,7 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
1666 1666
1667 num_channels = of_property_count_u32_elems(node, "st,adc-channels"); 1667 num_channels = of_property_count_u32_elems(node, "st,adc-channels");
1668 if (num_channels < 0 || 1668 if (num_channels < 0 ||
1669 num_channels >= adc_info->max_channels) { 1669 num_channels > adc_info->max_channels) {
1670 dev_err(&indio_dev->dev, "Bad st,adc-channels?\n"); 1670 dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
1671 return num_channels < 0 ? num_channels : -EINVAL; 1671 return num_channels < 0 ? num_channels : -EINVAL;
1672 } 1672 }
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index d1210024f6bc..e0dc20488335 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -52,7 +52,7 @@
52 52
53#define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0) 53#define ADS1015_CFG_COMP_QUE_MASK GENMASK(1, 0)
54#define ADS1015_CFG_COMP_LAT_MASK BIT(2) 54#define ADS1015_CFG_COMP_LAT_MASK BIT(2)
55#define ADS1015_CFG_COMP_POL_MASK BIT(2) 55#define ADS1015_CFG_COMP_POL_MASK BIT(3)
56#define ADS1015_CFG_COMP_MODE_MASK BIT(4) 56#define ADS1015_CFG_COMP_MODE_MASK BIT(4)
57#define ADS1015_CFG_DR_MASK GENMASK(7, 5) 57#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
58#define ADS1015_CFG_MOD_MASK BIT(8) 58#define ADS1015_CFG_MOD_MASK BIT(8)
@@ -1017,10 +1017,12 @@ static int ads1015_probe(struct i2c_client *client,
1017 1017
1018 switch (irq_trig) { 1018 switch (irq_trig) {
1019 case IRQF_TRIGGER_LOW: 1019 case IRQF_TRIGGER_LOW:
1020 cfg_comp |= ADS1015_CFG_COMP_POL_LOW; 1020 cfg_comp |= ADS1015_CFG_COMP_POL_LOW <<
1021 ADS1015_CFG_COMP_POL_SHIFT;
1021 break; 1022 break;
1022 case IRQF_TRIGGER_HIGH: 1023 case IRQF_TRIGGER_HIGH:
1023 cfg_comp |= ADS1015_CFG_COMP_POL_HIGH; 1024 cfg_comp |= ADS1015_CFG_COMP_POL_HIGH <<
1025 ADS1015_CFG_COMP_POL_SHIFT;
1024 break; 1026 break;
1025 default: 1027 default:
1026 return -EINVAL; 1028 return -EINVAL;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 1edd99f0c5e5..e3cfb91bffc6 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -887,21 +887,27 @@ static int twl4030_madc_probe(struct platform_device *pdev)
887 887
888 /* Enable 3v1 bias regulator for MADC[3:6] */ 888 /* Enable 3v1 bias regulator for MADC[3:6] */
889 madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); 889 madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
890 if (IS_ERR(madc->usb3v1)) 890 if (IS_ERR(madc->usb3v1)) {
891 return -ENODEV; 891 ret = -ENODEV;
892 goto err_i2c;
893 }
892 894
893 ret = regulator_enable(madc->usb3v1); 895 ret = regulator_enable(madc->usb3v1);
894 if (ret) 896 if (ret) {
895 dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); 897 dev_err(madc->dev, "could not enable 3v1 bias regulator\n");
898 goto err_i2c;
899 }
896 900
897 ret = iio_device_register(iio_dev); 901 ret = iio_device_register(iio_dev);
898 if (ret) { 902 if (ret) {
899 dev_err(&pdev->dev, "could not register iio device\n"); 903 dev_err(&pdev->dev, "could not register iio device\n");
900 goto err_i2c; 904 goto err_usb3v1;
901 } 905 }
902 906
903 return 0; 907 return 0;
904 908
909err_usb3v1:
910 regulator_disable(madc->usb3v1);
905err_i2c: 911err_i2c:
906 twl4030_madc_set_current_generator(madc, 0, 0); 912 twl4030_madc_set_current_generator(madc, 0, 0);
907err_current_generator: 913err_current_generator:
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index d99bb1460fe2..02e833b14db0 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -463,8 +463,17 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
463 u8 drdy_mask; 463 u8 drdy_mask;
464 struct st_sensor_data *sdata = iio_priv(indio_dev); 464 struct st_sensor_data *sdata = iio_priv(indio_dev);
465 465
466 if (!sdata->sensor_settings->drdy_irq.addr) 466 if (!sdata->sensor_settings->drdy_irq.addr) {
467 /*
468 * there are some devices (e.g. LIS3MDL) where drdy line is
469 * routed to a given pin and it is not possible to select a
470 * different one. Take into account irq status register
471 * to understand if irq trigger can be properly supported
472 */
473 if (sdata->sensor_settings->drdy_irq.addr_stat_drdy)
474 sdata->hw_irq_trigger = enable;
467 return 0; 475 return 0;
476 }
468 477
469 /* Enable/Disable the interrupt generator 1. */ 478 /* Enable/Disable the interrupt generator 1. */
470 if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) { 479 if (sdata->sensor_settings->drdy_irq.ig1.en_addr > 0) {
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd849f8..7ec2a0bb0807 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
72 st->event_en = state; 72 st->event_en = state;
73 else 73 else
74 return -EINVAL; 74 return -EINVAL;
75 break;
75 default: 76 default:
76 return -EINVAL; 77 return -EINVAL;
77 } 78 }
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 17ec4cee51dc..a47428b4d31b 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -310,8 +310,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
310 ret = indio_dev->info->debugfs_reg_access(indio_dev, 310 ret = indio_dev->info->debugfs_reg_access(indio_dev,
311 indio_dev->cached_reg_addr, 311 indio_dev->cached_reg_addr,
312 0, &val); 312 0, &val);
313 if (ret) 313 if (ret) {
314 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 314 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
315 return ret;
316 }
315 317
316 len = snprintf(buf, sizeof(buf), "0x%X\n", val); 318 len = snprintf(buf, sizeof(buf), "0x%X\n", val);
317 319
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e68368b5b2a3..08aafba4481c 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -315,6 +315,10 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
315 }, 315 },
316 }, 316 },
317 }, 317 },
318 .drdy_irq = {
319 /* drdy line is routed drdy pin */
320 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
321 },
318 .multi_read_bit = true, 322 .multi_read_bit = true,
319 .bootime = 2, 323 .bootime = 2,
320 }, 324 },
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 0d2ea3ee371b..8f26428804a2 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -573,7 +573,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
573 u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) | 573 u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
574 BMP280_OSRS_PRESS_X(data->oversampling_press + 1); 574 BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
575 575
576 ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS, 576 ret = regmap_write_bits(data->regmap, BMP280_REG_CTRL_MEAS,
577 BMP280_OSRS_TEMP_MASK | 577 BMP280_OSRS_TEMP_MASK |
578 BMP280_OSRS_PRESS_MASK | 578 BMP280_OSRS_PRESS_MASK |
579 BMP280_MODE_MASK, 579 BMP280_MODE_MASK,
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index ebfb1de7377f..91431454eb85 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -865,7 +865,6 @@ complete:
865static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, 865static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
866 struct zpa2326_private *private) 866 struct zpa2326_private *private)
867{ 867{
868 int ret;
869 unsigned int val; 868 unsigned int val;
870 long timeout; 869 long timeout;
871 870
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
887 /* Timed out. */ 886 /* Timed out. */
888 zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)", 887 zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
889 timeout); 888 timeout);
890 ret = -ETIME; 889 return -ETIME;
891 } else if (timeout < 0) {
892 zpa2326_warn(indio_dev,
893 "wait for one shot interrupt cancelled");
894 ret = -ERESTARTSYS;
895 } 890 }
896 891
897 return ret; 892 zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
893 return -ERESTARTSYS;
898} 894}
899 895
900static int zpa2326_init_managed_irq(struct device *parent, 896static int zpa2326_init_managed_irq(struct device *parent,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 0eeff29b61be..4a48b7ba3a1c 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -39,8 +39,12 @@
39#define AS3935_AFE_GAIN_MAX 0x1F 39#define AS3935_AFE_GAIN_MAX 0x1F
40#define AS3935_AFE_PWR_BIT BIT(0) 40#define AS3935_AFE_PWR_BIT BIT(0)
41 41
42#define AS3935_NFLWDTH 0x01
43#define AS3935_NFLWDTH_MASK 0x7f
44
42#define AS3935_INT 0x03 45#define AS3935_INT 0x03
43#define AS3935_INT_MASK 0x0f 46#define AS3935_INT_MASK 0x0f
47#define AS3935_DISTURB_INT BIT(2)
44#define AS3935_EVENT_INT BIT(3) 48#define AS3935_EVENT_INT BIT(3)
45#define AS3935_NOISE_INT BIT(0) 49#define AS3935_NOISE_INT BIT(0)
46 50
@@ -48,6 +52,7 @@
48#define AS3935_DATA_MASK 0x3F 52#define AS3935_DATA_MASK 0x3F
49 53
50#define AS3935_TUNE_CAP 0x08 54#define AS3935_TUNE_CAP 0x08
55#define AS3935_DEFAULTS 0x3C
51#define AS3935_CALIBRATE 0x3D 56#define AS3935_CALIBRATE 0x3D
52 57
53#define AS3935_READ_DATA BIT(14) 58#define AS3935_READ_DATA BIT(14)
@@ -62,7 +67,9 @@ struct as3935_state {
62 struct mutex lock; 67 struct mutex lock;
63 struct delayed_work work; 68 struct delayed_work work;
64 69
70 unsigned long noise_tripped;
65 u32 tune_cap; 71 u32 tune_cap;
72 u32 nflwdth_reg;
66 u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ 73 u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
67 u8 buf[2] ____cacheline_aligned; 74 u8 buf[2] ____cacheline_aligned;
68}; 75};
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
145 return len; 152 return len;
146} 153}
147 154
155static ssize_t as3935_noise_level_tripped_show(struct device *dev,
156 struct device_attribute *attr,
157 char *buf)
158{
159 struct as3935_state *st = iio_priv(dev_to_iio_dev(dev));
160 int ret;
161
162 mutex_lock(&st->lock);
163 ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ));
164 mutex_unlock(&st->lock);
165
166 return ret;
167}
168
148static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR, 169static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
149 as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0); 170 as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
150 171
172static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO,
173 as3935_noise_level_tripped_show, NULL, 0);
151 174
152static struct attribute *as3935_attributes[] = { 175static struct attribute *as3935_attributes[] = {
153 &iio_dev_attr_sensor_sensitivity.dev_attr.attr, 176 &iio_dev_attr_sensor_sensitivity.dev_attr.attr,
177 &iio_dev_attr_noise_level_tripped.dev_attr.attr,
154 NULL, 178 NULL,
155}; 179};
156 180
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work)
246 case AS3935_EVENT_INT: 270 case AS3935_EVENT_INT:
247 iio_trigger_poll_chained(st->trig); 271 iio_trigger_poll_chained(st->trig);
248 break; 272 break;
273 case AS3935_DISTURB_INT:
249 case AS3935_NOISE_INT: 274 case AS3935_NOISE_INT:
275 mutex_lock(&st->lock);
276 st->noise_tripped = jiffies;
277 mutex_unlock(&st->lock);
250 dev_warn(&st->spi->dev, "noise level is too high\n"); 278 dev_warn(&st->spi->dev, "noise level is too high\n");
251 break; 279 break;
252 } 280 }
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
269 297
270static void calibrate_as3935(struct as3935_state *st) 298static void calibrate_as3935(struct as3935_state *st)
271{ 299{
272 /* mask disturber interrupt bit */ 300 as3935_write(st, AS3935_DEFAULTS, 0x96);
273 as3935_write(st, AS3935_INT, BIT(5));
274
275 as3935_write(st, AS3935_CALIBRATE, 0x96); 301 as3935_write(st, AS3935_CALIBRATE, 0x96);
276 as3935_write(st, AS3935_TUNE_CAP, 302 as3935_write(st, AS3935_TUNE_CAP,
277 BIT(5) | (st->tune_cap / TUNE_CAP_DIV)); 303 BIT(5) | (st->tune_cap / TUNE_CAP_DIV));
278 304
279 mdelay(2); 305 mdelay(2);
280 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); 306 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
307 as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg);
281} 308}
282 309
283#ifdef CONFIG_PM_SLEEP 310#ifdef CONFIG_PM_SLEEP
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi)
370 return -EINVAL; 397 return -EINVAL;
371 } 398 }
372 399
400 ret = of_property_read_u32(np,
401 "ams,nflwdth", &st->nflwdth_reg);
402 if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
403 dev_err(&spi->dev,
404 "invalid nflwdth setting of %d\n",
405 st->nflwdth_reg);
406 return -EINVAL;
407 }
408
373 indio_dev->dev.parent = &spi->dev; 409 indio_dev->dev.parent = &spi->dev;
374 indio_dev->name = spi_get_device_id(spi)->name; 410 indio_dev->name = spi_get_device_id(spi)->name;
375 indio_dev->channels = as3935_channels; 411 indio_dev->channels = as3935_channels;
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi)
384 return -ENOMEM; 420 return -ENOMEM;
385 421
386 st->trig = trig; 422 st->trig = trig;
423 st->noise_tripped = jiffies - HZ;
387 trig->dev.parent = indio_dev->dev.parent; 424 trig->dev.parent = indio_dev->dev.parent;
388 iio_trigger_set_drvdata(trig, indio_dev); 425 iio_trigger_set_drvdata(trig, indio_dev);
389 trig->ops = &iio_interrupt_trigger_ops; 426 trig->ops = &iio_interrupt_trigger_ops;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 9b9053494daf..eb212f8c8879 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -174,6 +174,7 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
174 clk_disable(priv->clk); 174 clk_disable(priv->clk);
175 175
176 /* Stop timer */ 176 /* Stop timer */
177 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
177 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); 178 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
178 regmap_write(priv->regmap, TIM_PSC, 0); 179 regmap_write(priv->regmap, TIM_PSC, 0);
179 regmap_write(priv->regmap, TIM_ARR, 0); 180 regmap_write(priv->regmap, TIM_ARR, 0);
@@ -715,8 +716,9 @@ static ssize_t stm32_count_set_preset(struct iio_dev *indio_dev,
715 if (ret) 716 if (ret)
716 return ret; 717 return ret;
717 718
719 /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
720 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
718 regmap_write(priv->regmap, TIM_ARR, preset); 721 regmap_write(priv->regmap, TIM_ARR, preset);
719 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE);
720 722
721 return len; 723 return len;
722} 724}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
100 if (ret) 100 if (ret)
101 goto pid_query_error; 101 goto pid_query_error;
102 102
103 nlmsg_end(skb, nlh);
104
103 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", 105 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
104 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); 106 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
105 107
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
170 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); 172 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
171 if (ret) 173 if (ret)
172 goto add_mapping_error; 174 goto add_mapping_error;
175
176 nlmsg_end(skb, nlh);
173 nlmsg_request->req_buffer = pm_msg; 177 nlmsg_request->req_buffer = pm_msg;
174 178
175 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 179 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
246 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); 250 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
247 if (ret) 251 if (ret)
248 goto query_mapping_error; 252 goto query_mapping_error;
253
254 nlmsg_end(skb, nlh);
249 nlmsg_request->req_buffer = pm_msg; 255 nlmsg_request->req_buffer = pm_msg;
250 256
251 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 257 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
308 if (ret) 314 if (ret)
309 goto remove_mapping_error; 315 goto remove_mapping_error;
310 316
317 nlmsg_end(skb, nlh);
318
311 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 319 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
312 if (ret) { 320 if (ret) {
313 skb = NULL; /* skb is freed in the netlink send-op handling */ 321 skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); 597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
598 if (ret) 598 if (ret)
599 goto mapinfo_num_error; 599 goto mapinfo_num_error;
600
601 nlmsg_end(skb, nlh);
602
600 ret = rdma_nl_unicast(skb, iwpm_pid); 603 ret = rdma_nl_unicast(skb, iwpm_pid);
601 if (ret) { 604 if (ret) {
602 skb = NULL; 605 skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
678 if (ret) 681 if (ret)
679 goto send_mapping_info_unlock; 682 goto send_mapping_info_unlock;
680 683
684 nlmsg_end(skb, nlh);
685
681 iwpm_print_sockaddr(&map_info->local_sockaddr, 686 iwpm_print_sockaddr(&map_info->local_sockaddr,
682 "send_mapping_info: Local sockaddr:"); 687 "send_mapping_info: Local sockaddr:");
683 iwpm_print_sockaddr(&map_info->mapped_sockaddr, 688 iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b12e58787c3d..1fb72c356e36 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
175 !netlink_capable(skb, CAP_NET_ADMIN)) 175 !netlink_capable(skb, CAP_NET_ADMIN))
176 return -EPERM; 176 return -EPERM;
177 177
178 /*
179 * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
180 * mistakenly call the .dump() function.
181 */
182 if (index == RDMA_NL_LS) {
183 if (cb_table[op].doit)
184 return cb_table[op].doit(skb, nlh, extack);
185 return -EINVAL;
186 }
178 /* FIXME: Convert IWCM to properly handle doit callbacks */ 187 /* FIXME: Convert IWCM to properly handle doit callbacks */
179 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || 188 if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
180 index == RDMA_NL_IWCM) { 189 index == RDMA_NL_IWCM) {
181 struct netlink_dump_control c = { 190 struct netlink_dump_control c = {
182 .dump = cb_table[op].dump, 191 .dump = cb_table[op].dump,
183 }; 192 };
184 return netlink_dump_start(nls, skb, nlh, &c); 193 if (c.dump)
194 return netlink_dump_start(nls, skb, nlh, &c);
195 return -EINVAL;
185 } 196 }
186 197
187 if (cb_table[op].doit) 198 if (cb_table[op].doit)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 3ba24c428c3b..2fae850a3eff 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
214 214
215 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, 215 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
216 nldev_policy, extack); 216 nldev_policy, extack);
217 if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) 217 if (err ||
218 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
219 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
218 return -EINVAL; 220 return -EINVAL;
219 221
220 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); 222 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 70ad19c4c73e..88bdafb297f5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
432 atomic_set(&qp->qp_sec->error_list_count, 0); 432 atomic_set(&qp->qp_sec->error_list_count, 0);
433 init_completion(&qp->qp_sec->error_complete); 433 init_completion(&qp->qp_sec->error_complete);
434 ret = security_ib_alloc_security(&qp->qp_sec->security); 434 ret = security_ib_alloc_security(&qp->qp_sec->security);
435 if (ret) 435 if (ret) {
436 kfree(qp->qp_sec); 436 kfree(qp->qp_sec);
437 qp->qp_sec = NULL;
438 }
437 439
438 return ret; 440 return ret;
439} 441}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4ab30d832ac5..52a2cf2d83aa 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3869 resp.raw_packet_caps = attr.raw_packet_caps; 3869 resp.raw_packet_caps = attr.raw_packet_caps;
3870 resp.response_length += sizeof(resp.raw_packet_caps); 3870 resp.response_length += sizeof(resp.raw_packet_caps);
3871 3871
3872 if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) 3872 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
3873 goto end; 3873 goto end;
3874 3874
3875 resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; 3875 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3876 resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; 3876 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3877 resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; 3877 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3878 resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; 3878 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3879 resp.xrq_caps.flags = attr.xrq_caps.flags; 3879 resp.tm_caps.flags = attr.tm_caps.flags;
3880 resp.response_length += sizeof(resp.xrq_caps); 3880 resp.response_length += sizeof(resp.tm_caps);
3881end: 3881end:
3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3883 return err; 3883 return err;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b2ed4b9cda6e..0be42787759f 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd); 1066static int thermal_init(struct hfi1_devdata *dd);
1067 1067
1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070 int msecs);
1069static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1071static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs); 1072 int msecs);
1071static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1073static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
8238 u64 regs[CCE_NUM_INT_CSRS]; 8240 u64 regs[CCE_NUM_INT_CSRS];
8239 u32 bit; 8241 u32 bit;
8240 int i; 8242 int i;
8243 irqreturn_t handled = IRQ_NONE;
8241 8244
8242 this_cpu_inc(*dd->int_counter); 8245 this_cpu_inc(*dd->int_counter);
8243 8246
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
8258 for_each_set_bit(bit, (unsigned long *)&regs[0], 8261 for_each_set_bit(bit, (unsigned long *)&regs[0],
8259 CCE_NUM_INT_CSRS * 64) { 8262 CCE_NUM_INT_CSRS * 64) {
8260 is_interrupt(dd, bit); 8263 is_interrupt(dd, bit);
8264 handled = IRQ_HANDLED;
8261 } 8265 }
8262 8266
8263 return IRQ_HANDLED; 8267 return handled;
8264} 8268}
8265 8269
8266static irqreturn_t sdma_interrupt(int irq, void *data) 8270static irqreturn_t sdma_interrupt(int irq, void *data)
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9413 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9417 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9414} 9418}
9415 9419
9416void reset_qsfp(struct hfi1_pportdata *ppd) 9420int reset_qsfp(struct hfi1_pportdata *ppd)
9417{ 9421{
9418 struct hfi1_devdata *dd = ppd->dd; 9422 struct hfi1_devdata *dd = ppd->dd;
9419 u64 mask, qsfp_mask; 9423 u64 mask, qsfp_mask;
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
9443 * for alarms and warnings 9447 * for alarms and warnings
9444 */ 9448 */
9445 set_qsfp_int_n(ppd, 1); 9449 set_qsfp_int_n(ppd, 1);
9450
9451 /*
9452 * After the reset, AOC transmitters are enabled by default. They need
9453 * to be turned off to complete the QSFP setup before they can be
9454 * enabled again.
9455 */
9456 return set_qsfp_tx(ppd, 0);
9446} 9457}
9447 9458
9448static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9459static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10305{ 10316{
10306 struct hfi1_devdata *dd = ppd->dd; 10317 struct hfi1_devdata *dd = ppd->dd;
10307 u32 previous_state; 10318 u32 previous_state;
10319 int offline_state_ret;
10308 int ret; 10320 int ret;
10309 10321
10310 update_lcb_cache(dd); 10322 update_lcb_cache(dd);
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10326 ppd->offline_disabled_reason = 10338 ppd->offline_disabled_reason =
10327 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10339 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10328 10340
10329 /* 10341 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10330 * Wait for offline transition. It can take a while for 10342 if (offline_state_ret < 0)
10331 * the link to go down. 10343 return offline_state_ret;
10332 */
10333 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10334 if (ret < 0)
10335 return ret;
10336
10337 /*
10338 * Now in charge of LCB - must be after the physical state is
10339 * offline.quiet and before host_link_state is changed.
10340 */
10341 set_host_lcb_access(dd);
10342 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10343
10344 /* make sure the logical state is also down */
10345 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10346 if (ret)
10347 force_logical_link_state_down(ppd);
10348
10349 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10350 10344
10345 /* Disabling AOC transmitters */
10351 if (ppd->port_type == PORT_TYPE_QSFP && 10346 if (ppd->port_type == PORT_TYPE_QSFP &&
10352 ppd->qsfp_info.limiting_active && 10347 ppd->qsfp_info.limiting_active &&
10353 qsfp_mod_present(ppd)) { 10348 qsfp_mod_present(ppd)) {
@@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10365 } 10360 }
10366 10361
10367 /* 10362 /*
10363 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10364 * can take a while for the link to go down.
10365 */
10366 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10367 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10368 if (ret < 0)
10369 return ret;
10370 }
10371
10372 /*
10373 * Now in charge of LCB - must be after the physical state is
10374 * offline.quiet and before host_link_state is changed.
10375 */
10376 set_host_lcb_access(dd);
10377 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10378
10379 /* make sure the logical state is also down */
10380 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10381 if (ret)
10382 force_logical_link_state_down(ppd);
10383
10384 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10385
10386 /*
10368 * The LNI has a mandatory wait time after the physical state 10387 * The LNI has a mandatory wait time after the physical state
10369 * moves to Offline.Quiet. The wait time may be different 10388 * moves to Offline.Quiet. The wait time may be different
10370 * depending on how the link went down. The 8051 firmware 10389 * depending on how the link went down. The 8051 firmware
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10396 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10415 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10397 /* went down while attempting link up */ 10416 /* went down while attempting link up */
10398 check_lni_states(ppd); 10417 check_lni_states(ppd);
10418
10419 /* The QSFP doesn't need to be reset on LNI failure */
10420 ppd->qsfp_info.reset_needed = 0;
10399 } 10421 }
10400 10422
10401 /* the active link width (downgrade) is 0 on link down */ 10423 /* the active link width (downgrade) is 0 on link down */
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12804 return 0; 12826 return 0;
12805} 12827}
12806 12828
12829/*
12830 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12831 * @ppd: port device
12832 * @msecs: the number of milliseconds to wait
12833 *
12834 * Wait up to msecs milliseconds for any offline physical link
12835 * state change to occur.
12836 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12837 */
12838static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12839 int msecs)
12840{
12841 u32 read_state;
12842 unsigned long timeout;
12843
12844 timeout = jiffies + msecs_to_jiffies(msecs);
12845 while (1) {
12846 read_state = read_physical_state(ppd->dd);
12847 if ((read_state & 0xF0) == PLS_OFFLINE)
12848 break;
12849 if (time_after(jiffies, timeout)) {
12850 dd_dev_err(ppd->dd,
12851 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12852 read_state, msecs);
12853 return -ETIMEDOUT;
12854 }
12855 usleep_range(1950, 2050); /* sleep 2ms-ish */
12856 }
12857
12858 log_state_transition(ppd, read_state);
12859 return read_state;
12860}
12861
12807#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 12862#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12808(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12863(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12809 12864
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index b8345a60a0fb..50b8645d0b87 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -204,6 +204,7 @@
204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
205#define PLS_OFFLINE_REPORT_FAILURE 0x93 205#define PLS_OFFLINE_REPORT_FAILURE 0x93
206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
207#define PLS_OFFLINE_QUIET_DURATION 0x95
207#define PLS_POLLING 0x20 208#define PLS_POLLING 0x20
208#define PLS_POLLING_QUIET 0x20 209#define PLS_POLLING_QUIET 0x20
209#define PLS_POLLING_ACTIVE 0x21 210#define PLS_POLLING_ACTIVE 0x21
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
722void handle_link_bounce(struct work_struct *work); 723void handle_link_bounce(struct work_struct *work);
723void handle_start_link(struct work_struct *work); 724void handle_start_link(struct work_struct *work);
724void handle_sma_message(struct work_struct *work); 725void handle_sma_message(struct work_struct *work);
725void reset_qsfp(struct hfi1_pportdata *ppd); 726int reset_qsfp(struct hfi1_pportdata *ppd);
726void qsfp_event(struct work_struct *work); 727void qsfp_event(struct work_struct *work);
727void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); 728void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
728int send_idle_sma(struct hfi1_devdata *dd, u64 message); 729int send_idle_sma(struct hfi1_devdata *dd, u64 message);
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index d46b17107901..1613af1c58d9 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -204,7 +204,10 @@ done_asic:
204 return ret; 204 return ret;
205} 205}
206 206
207/* magic character sequence that trails an image */ 207/* magic character sequence that begins an image */
208#define IMAGE_START_MAGIC "APO="
209
210/* magic character sequence that might trail an image */
208#define IMAGE_TRAIL_MAGIC "egamiAPO" 211#define IMAGE_TRAIL_MAGIC "egamiAPO"
209 212
210/* EPROM file types */ 213/* EPROM file types */
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
250{ 253{
251 void *buffer; 254 void *buffer;
252 void *p; 255 void *p;
256 u32 length;
253 int ret; 257 int ret;
254 258
255 buffer = kmalloc(P1_SIZE, GFP_KERNEL); 259 buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
262 return ret; 266 return ret;
263 } 267 }
264 268
265 /* scan for image magic that may trail the actual data */ 269 /* config partition is valid only if it starts with IMAGE_START_MAGIC */
266 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); 270 if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
267 if (!p) {
268 kfree(buffer); 271 kfree(buffer);
269 return -ENOENT; 272 return -ENOENT;
270 } 273 }
271 274
275 /* scan for image magic that may trail the actual data */
276 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
277 if (p)
278 length = p - buffer;
279 else
280 length = P1_SIZE;
281
272 *data = buffer; 282 *data = buffer;
273 *size = p - buffer; 283 *size = length;
274 return 0; 284 return 0;
275} 285}
276 286
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2bc89260235a..d9a1e9893136 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
930 switch (ret) { 930 switch (ret) {
931 case 0: 931 case 0:
932 ret = setup_base_ctxt(fd, uctxt); 932 ret = setup_base_ctxt(fd, uctxt);
933 if (uctxt->subctxt_cnt) { 933 if (ret)
934 /* 934 deallocate_ctxt(uctxt);
935 * Base context is done (successfully or not), notify
936 * anybody using a sub-context that is waiting for
937 * this completion.
938 */
939 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
940 wake_up(&uctxt->wait);
941 }
942 break; 935 break;
943 case 1: 936 case 1:
944 ret = complete_subctxt(fd); 937 ret = complete_subctxt(fd);
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1305 /* Now allocate the RcvHdr queue and eager buffers. */ 1298 /* Now allocate the RcvHdr queue and eager buffers. */
1306 ret = hfi1_create_rcvhdrq(dd, uctxt); 1299 ret = hfi1_create_rcvhdrq(dd, uctxt);
1307 if (ret) 1300 if (ret)
1308 return ret; 1301 goto done;
1309 1302
1310 ret = hfi1_setup_eagerbufs(uctxt); 1303 ret = hfi1_setup_eagerbufs(uctxt);
1311 if (ret) 1304 if (ret)
1312 goto setup_failed; 1305 goto done;
1313 1306
1314 /* If sub-contexts are enabled, do the appropriate setup */ 1307 /* If sub-contexts are enabled, do the appropriate setup */
1315 if (uctxt->subctxt_cnt) 1308 if (uctxt->subctxt_cnt)
1316 ret = setup_subctxt(uctxt); 1309 ret = setup_subctxt(uctxt);
1317 if (ret) 1310 if (ret)
1318 goto setup_failed; 1311 goto done;
1319 1312
1320 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); 1313 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1321 if (ret) 1314 if (ret)
1322 goto setup_failed; 1315 goto done;
1323 1316
1324 ret = init_user_ctxt(fd, uctxt); 1317 ret = init_user_ctxt(fd, uctxt);
1325 if (ret) 1318 if (ret)
1326 goto setup_failed; 1319 goto done;
1327 1320
1328 user_init(uctxt); 1321 user_init(uctxt);
1329 1322
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1331 fd->uctxt = uctxt; 1324 fd->uctxt = uctxt;
1332 hfi1_rcd_get(uctxt); 1325 hfi1_rcd_get(uctxt);
1333 1326
1334 return 0; 1327done:
1328 if (uctxt->subctxt_cnt) {
1329 /*
1330 * On error, set the failed bit so sub-contexts will clean up
1331 * correctly.
1332 */
1333 if (ret)
1334 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1335 1335
1336setup_failed: 1336 /*
1337 /* Set the failed bit so sub-context init can do the right thing */ 1337 * Base context is done (successfully or not), notify anybody
1338 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); 1338 * using a sub-context that is waiting for this completion.
1339 deallocate_ctxt(uctxt); 1339 */
1340 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1341 wake_up(&uctxt->wait);
1342 }
1340 1343
1341 return ret; 1344 return ret;
1342} 1345}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 82447b7cdda1..09e50fd2a08f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -68,7 +68,7 @@
68/* 68/*
69 * Code to adjust PCIe capabilities. 69 * Code to adjust PCIe capabilities.
70 */ 70 */
71static int tune_pcie_caps(struct hfi1_devdata *); 71static void tune_pcie_caps(struct hfi1_devdata *);
72 72
73/* 73/*
74 * Do all the common PCIe setup and initialization. 74 * Do all the common PCIe setup and initialization.
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
351 */ 351 */
352int request_msix(struct hfi1_devdata *dd, u32 msireq) 352int request_msix(struct hfi1_devdata *dd, u32 msireq)
353{ 353{
354 int nvec, ret; 354 int nvec;
355 355
356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, 356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY); 357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
360 return nvec; 360 return nvec;
361 } 361 }
362 362
363 ret = tune_pcie_caps(dd); 363 tune_pcie_caps(dd);
364 if (ret) {
365 dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
366 pci_free_irq_vectors(dd->pcidev);
367 return ret;
368 }
369 364
370 /* check for legacy IRQ */ 365 /* check for legacy IRQ */
371 if (nvec == 1 && !dd->pcidev->msix_enabled) 366 if (nvec == 1 && !dd->pcidev->msix_enabled)
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
502module_param_named(aspm, aspm_mode, uint, S_IRUGO); 497module_param_named(aspm, aspm_mode, uint, S_IRUGO);
503MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); 498MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
504 499
505static int tune_pcie_caps(struct hfi1_devdata *dd) 500static void tune_pcie_caps(struct hfi1_devdata *dd)
506{ 501{
507 struct pci_dev *parent; 502 struct pci_dev *parent;
508 u16 rc_mpss, rc_mps, ep_mpss, ep_mps; 503 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
513 * Turn on extended tags in DevCtl in case the BIOS has turned it off 508 * Turn on extended tags in DevCtl in case the BIOS has turned it off
514 * to improve WFR SDMA bandwidth 509 * to improve WFR SDMA bandwidth
515 */ 510 */
516 ret = pcie_capability_read_word(dd->pcidev, 511 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
517 PCI_EXP_DEVCTL, &ectl); 512 if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
518 if (ret) {
519 dd_dev_err(dd, "Unable to read from PCI config\n");
520 return ret;
521 }
522
523 if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
524 dd_dev_info(dd, "Enabling PCIe extended tags\n"); 513 dd_dev_info(dd, "Enabling PCIe extended tags\n");
525 ectl |= PCI_EXP_DEVCTL_EXT_TAG; 514 ectl |= PCI_EXP_DEVCTL_EXT_TAG;
526 ret = pcie_capability_write_word(dd->pcidev, 515 ret = pcie_capability_write_word(dd->pcidev,
527 PCI_EXP_DEVCTL, ectl); 516 PCI_EXP_DEVCTL, ectl);
528 if (ret) { 517 if (ret)
529 dd_dev_err(dd, "Unable to write to PCI config\n"); 518 dd_dev_info(dd, "Unable to write to PCI config\n");
530 return ret;
531 }
532 } 519 }
533 /* Find out supported and configured values for parent (root) */ 520 /* Find out supported and configured values for parent (root) */
534 parent = dd->pcidev->bus->self; 521 parent = dd->pcidev->bus->self;
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
536 * The driver cannot perform the tuning if it does not have 523 * The driver cannot perform the tuning if it does not have
537 * access to the upstream component. 524 * access to the upstream component.
538 */ 525 */
539 if (!parent) 526 if (!parent) {
540 return -EINVAL; 527 dd_dev_info(dd, "Parent not found\n");
528 return;
529 }
541 if (!pci_is_root_bus(parent->bus)) { 530 if (!pci_is_root_bus(parent->bus)) {
542 dd_dev_info(dd, "Parent not root\n"); 531 dd_dev_info(dd, "Parent not root\n");
543 return -EINVAL; 532 return;
533 }
534 if (!pci_is_pcie(parent)) {
535 dd_dev_info(dd, "Parent is not PCI Express capable\n");
536 return;
537 }
538 if (!pci_is_pcie(dd->pcidev)) {
539 dd_dev_info(dd, "PCI device is not PCI Express capable\n");
540 return;
544 } 541 }
545
546 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
547 return -EINVAL;
548 rc_mpss = parent->pcie_mpss; 542 rc_mpss = parent->pcie_mpss;
549 rc_mps = ffs(pcie_get_mps(parent)) - 8; 543 rc_mps = ffs(pcie_get_mps(parent)) - 8;
550 /* Find out supported and configured values for endpoint (us) */ 544 /* Find out supported and configured values for endpoint (us) */
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
590 ep_mrrs = max_mrrs; 584 ep_mrrs = max_mrrs;
591 pcie_set_readrq(dd->pcidev, ep_mrrs); 585 pcie_set_readrq(dd->pcidev, ep_mrrs);
592 } 586 }
593
594 return 0;
595} 587}
596 588
597/* End of PCIe capability tuning */ 589/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index a8af96d2b1b0..d486355880cb 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
790 * reuse of stale settings established in our previous pass through. 790 * reuse of stale settings established in our previous pass through.
791 */ 791 */
792 if (ppd->qsfp_info.reset_needed) { 792 if (ppd->qsfp_info.reset_needed) {
793 reset_qsfp(ppd); 793 ret = reset_qsfp(ppd);
794 if (ret)
795 return ret;
794 refresh_qsfp_cache(ppd, &ppd->qsfp_info); 796 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
795 } else { 797 } else {
796 ppd->qsfp_info.reset_needed = 1; 798 ppd->qsfp_info.reset_needed = 1;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
48 * @wqe: cqp wqe for header 48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe 49 * @header: header for the cqp wqe
50 */ 50 */
51static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) 51void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52{ 52{
53 wmb(); /* make sure WQE is populated before polarity is set */ 53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe, 24, header); 54 set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
59 struct i40iw_fast_reg_stag_info *info, 59 struct i40iw_fast_reg_stag_info *info,
60 bool post_sq); 60 bool post_sq);
61 61
62void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
63
62/* HMC/FPM functions */ 64/* HMC/FPM functions */
63enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, 65enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
64 u8 hmc_fn_id); 66 u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
123 get_64bit_val(wqe, 24, &offset24); 123 get_64bit_val(wqe, 24, &offset24);
124 124
125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); 125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
126 set_64bit_val(wqe, 24, offset24);
127 126
128 set_64bit_val(wqe, 0, buf->mem.pa); 127 set_64bit_val(wqe, 0, buf->mem.pa);
129 set_64bit_val(wqe, 8, 128 set_64bit_val(wqe, 8,
130 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); 129 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
131 set_64bit_val(wqe, 24, offset24); 130 i40iw_insert_wqe_hdr(wqe, offset24);
132} 131}
133 132
134/** 133/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); 408 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]); 409 set_64bit_val(wqe, 16, header[0]);
411 410
412 /* Ensure all data is written before writing valid bit */ 411 i40iw_insert_wqe_hdr(wqe, header[1]);
413 wmb();
414 set_64bit_val(wqe, 24, header[1]);
415 412
416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 413 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
417 i40iw_qp_post_wr(&qp->qp_uk); 414 i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
539 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | 536 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
540 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 537 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
541 538
542 set_64bit_val(wqe, 24, header); 539 i40iw_insert_wqe_hdr(wqe, header);
543 540
544 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); 541 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
545 i40iw_sc_cqp_post_sq(cqp); 542 i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
655 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | 652 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
656 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | 653 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
657 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 654 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
658 set_64bit_val(wqe, 24, header); 655 i40iw_insert_wqe_hdr(wqe, header);
659 656
660 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", 657 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
661 wqe, I40IW_CQP_WQE_SIZE * 8); 658 wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 28b3d02d511b..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; 826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
829 attr->port_num = 1;
829 init_attr->event_handler = iwqp->ibqp.event_handler; 830 init_attr->event_handler = iwqp->ibqp.event_handler;
830 init_attr->qp_context = iwqp->ibqp.qp_context; 831 init_attr->qp_context = iwqp->ibqp.qp_context;
831 init_attr->send_cq = iwqp->ibqp.send_cq; 832 init_attr->send_cq = iwqp->ibqp.send_cq;
832 init_attr->recv_cq = iwqp->ibqp.recv_cq; 833 init_attr->recv_cq = iwqp->ibqp.recv_cq;
833 init_attr->srq = iwqp->ibqp.srq; 834 init_attr->srq = iwqp->ibqp.srq;
834 init_attr->cap = attr->cap; 835 init_attr->cap = attr->cap;
836 init_attr->port_num = 1;
835 return 0; 837 return 0;
836} 838}
837 839
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 05fb4bdff6a0..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
778 } 778 }
779 779
780 if (MLX5_CAP_GEN(mdev, tag_matching)) { 780 if (MLX5_CAP_GEN(mdev, tag_matching)) {
781 props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 781 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
782 props->xrq_caps.max_num_tags = 782 props->tm_caps.max_num_tags =
783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
784 props->xrq_caps.flags = IB_TM_CAP_RC; 784 props->tm_caps.flags = IB_TM_CAP_RC;
785 props->xrq_caps.max_ops = 785 props->tm_caps.max_ops =
786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
787 props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; 787 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
788 } 788 }
789 789
790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
@@ -4174,9 +4174,9 @@ err_bfreg:
4174err_uar_page: 4174err_uar_page:
4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4176 4176
4177err_cnt:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cong: 4177err_cong:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cnt:
4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4181 mlx5_ib_dealloc_counters(dev); 4181 mlx5_ib_dealloc_counters(dev);
4182 4182
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 914f212e7ef6..f3dbd75a0a96 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
50{ 50{
51 unsigned long tmp; 51 unsigned long tmp;
52 unsigned long m; 52 unsigned long m;
53 int i, k; 53 u64 base = ~0, p = 0;
54 u64 base = 0; 54 u64 len, pfn;
55 int p = 0; 55 int i = 0;
56 int skip;
57 int mask;
58 u64 len;
59 u64 pfn;
60 struct scatterlist *sg; 56 struct scatterlist *sg;
61 int entry; 57 int entry;
62 unsigned long page_shift = umem->page_shift; 58 unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
76 m = find_first_bit(&tmp, BITS_PER_LONG); 72 m = find_first_bit(&tmp, BITS_PER_LONG);
77 if (max_page_shift) 73 if (max_page_shift)
78 m = min_t(unsigned long, max_page_shift - page_shift, m); 74 m = min_t(unsigned long, max_page_shift - page_shift, m);
79 skip = 1 << m; 75
80 mask = skip - 1;
81 i = 0;
82 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 76 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
83 len = sg_dma_len(sg) >> page_shift; 77 len = sg_dma_len(sg) >> page_shift;
84 pfn = sg_dma_address(sg) >> page_shift; 78 pfn = sg_dma_address(sg) >> page_shift;
85 for (k = 0; k < len; k++) { 79 if (base + p != pfn) {
86 if (!(i & mask)) { 80 /* If either the offset or the new
87 tmp = (unsigned long)pfn; 81 * base are unaligned update m
88 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); 82 */
89 skip = 1 << m; 83 tmp = (unsigned long)(pfn | p);
90 mask = skip - 1; 84 if (!IS_ALIGNED(tmp, 1 << m))
91 base = pfn; 85 m = find_first_bit(&tmp, BITS_PER_LONG);
92 p = 0; 86
93 } else { 87 base = pfn;
94 if (base + p != pfn) { 88 p = 0;
95 tmp = (unsigned long)p;
96 m = find_first_bit(&tmp, BITS_PER_LONG);
97 skip = 1 << m;
98 mask = skip - 1;
99 base = pfn;
100 p = 0;
101 }
102 }
103 p++;
104 i++;
105 } 89 }
90
91 p += len;
92 i += len;
106 } 93 }
107 94
108 if (i) { 95 if (i) {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 0e2789d9bb4d..37bbc543847a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,7 +47,8 @@ enum {
47 47
48#define MLX5_UMR_ALIGN 2048 48#define MLX5_UMR_ALIGN 2048
49 49
50static int clean_mr(struct mlx5_ib_mr *mr); 50static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int mr_cache_max_order(struct mlx5_ib_dev *dev); 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
52static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
53 54
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1270 1271
1271 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, 1272 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1272 update_xlt_flags); 1273 update_xlt_flags);
1274
1273 if (err) { 1275 if (err) {
1274 mlx5_ib_dereg_mr(&mr->ibmr); 1276 dereg_mr(dev, mr);
1275 return ERR_PTR(err); 1277 return ERR_PTR(err);
1276 } 1278 }
1277 } 1279 }
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1356 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, 1358 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1357 &npages, &page_shift, &ncont, &order); 1359 &npages, &page_shift, &ncont, &order);
1358 if (err < 0) { 1360 if (err < 0) {
1359 clean_mr(mr); 1361 clean_mr(dev, mr);
1360 return err; 1362 return err;
1361 } 1363 }
1362 } 1364 }
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1410 if (err) { 1412 if (err) {
1411 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1413 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1412 ib_umem_release(mr->umem); 1414 ib_umem_release(mr->umem);
1413 clean_mr(mr); 1415 clean_mr(dev, mr);
1414 return err; 1416 return err;
1415 } 1417 }
1416 } 1418 }
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1469 } 1471 }
1470} 1472}
1471 1473
1472static int clean_mr(struct mlx5_ib_mr *mr) 1474static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1473{ 1475{
1474 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1475 int allocated_from_cache = mr->allocated_from_cache; 1476 int allocated_from_cache = mr->allocated_from_cache;
1476 int err; 1477 int err;
1477 1478
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
1507 return 0; 1508 return 0;
1508} 1509}
1509 1510
1510int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1511static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1511{ 1512{
1512 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1513 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1514 int npages = mr->npages; 1513 int npages = mr->npages;
1515 struct ib_umem *umem = mr->umem; 1514 struct ib_umem *umem = mr->umem;
1516 1515
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1539 } 1538 }
1540#endif 1539#endif
1541 1540
1542 clean_mr(mr); 1541 clean_mr(dev, mr);
1543 1542
1544 if (umem) { 1543 if (umem) {
1545 ib_umem_release(umem); 1544 ib_umem_release(umem);
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1549 return 0; 1548 return 0;
1550} 1549}
1551 1550
1551int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1552{
1553 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1554 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1555
1556 return dereg_mr(dev, mr);
1557}
1558
1552struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, 1559struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1553 enum ib_mr_type mr_type, 1560 enum ib_mr_type mr_type,
1554 u32 max_num_sg) 1561 u32 max_num_sg)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f0dc5f4aa177..442b9bdc0f03 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3232 mr->ibmr.iova); 3232 mr->ibmr.iova);
3233 set_wqe_32bit_value(wqe->wqe_words, 3233 set_wqe_32bit_value(wqe->wqe_words,
3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3235 mr->ibmr.length); 3235 lower_32_bits(mr->ibmr.length));
3236 set_wqe_32bit_value(wqe->wqe_words, 3236 set_wqe_32bit_value(wqe->wqe_words,
3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); 3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
3238 set_wqe_32bit_value(wqe->wqe_words, 3238 set_wqe_32bit_value(wqe->wqe_words,
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3274 mr->npages * 8); 3274 mr->npages * 8);
3275 3275
3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " 3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
3277 "length: %d, rkey: %0x, pgl_paddr: %llx, " 3277 "length: %lld, rkey: %0x, pgl_paddr: %llx, "
3278 "page_list_len: %u, wqe_misc: %x\n", 3278 "page_list_len: %u, wqe_misc: %x\n",
3279 (unsigned long long) mr->ibmr.iova, 3279 (unsigned long long) mr->ibmr.iova,
3280 mr->ibmr.length, 3280 mr->ibmr.length,
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
387 u8 wqe_size; 387 u8 wqe_size;
388 388
389 u8 smac[ETH_ALEN]; 389 u8 smac[ETH_ALEN];
390 u16 vlan_id; 390 u16 vlan;
391 int rc; 391 int rc;
392 } *rqe_wr_id; 392 } *rqe_wr_id;
393 393
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
105 105
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? 106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
107 -EINVAL : 0; 107 -EINVAL : 0;
108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; 108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
109 /* note: length stands for data length i.e. GRH is excluded */ 109 /* note: length stands for data length i.e. GRH is excluded */
110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = 110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
111 data->length.data_length; 111 data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
694 struct qedr_cq *cq = get_qedr_cq(ibcq); 694 struct qedr_cq *cq = get_qedr_cq(ibcq);
695 struct qedr_qp *qp = dev->gsi_qp; 695 struct qedr_qp *qp = dev->gsi_qp;
696 unsigned long flags; 696 unsigned long flags;
697 u16 vlan_id;
697 int i = 0; 698 int i = 0;
698 699
699 spin_lock_irqsave(&cq->cq_lock, flags); 700 spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
712 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; 713 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
713 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); 714 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
714 wc[i].wc_flags |= IB_WC_WITH_SMAC; 715 wc[i].wc_flags |= IB_WC_WITH_SMAC;
715 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { 716
717 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
718 VLAN_VID_MASK;
719 if (vlan_id) {
716 wc[i].wc_flags |= IB_WC_WITH_VLAN; 720 wc[i].wc_flags |= IB_WC_WITH_VLAN;
717 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; 721 wc[i].vlan_id = vlan_id;
722 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
723 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 } 724 }
719 725
720 qedr_inc_sw_cons(&qp->rq); 726 qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2e075377242e..6cd61638b441 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1000 */ 1000 */
1001 priv->dev->broadcast[8] = priv->pkey >> 8; 1001 priv->dev->broadcast[8] = priv->pkey >> 8;
1002 priv->dev->broadcast[9] = priv->pkey & 0xff; 1002 priv->dev->broadcast[9] = priv->pkey & 0xff;
1003
1004 /*
1005 * Update the broadcast address in the priv->broadcast object,
1006 * in case it already exists, otherwise no one will do that.
1007 */
1008 if (priv->broadcast) {
1009 spin_lock_irq(&priv->lock);
1010 memcpy(priv->broadcast->mcmember.mgid.raw,
1011 priv->dev->broadcast + 4,
1012 sizeof(union ib_gid));
1013 spin_unlock_irq(&priv->lock);
1014 }
1015
1016 return 0; 1003 return 0;
1017 } 1004 }
1018 1005
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bac95b509a9b..dcc77014018d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
2180{ 2180{
2181 struct ipoib_dev_priv *priv; 2181 struct ipoib_dev_priv *priv;
2182 struct ib_port_attr attr; 2182 struct ib_port_attr attr;
2183 struct rdma_netdev *rn;
2183 int result = -ENOMEM; 2184 int result = -ENOMEM;
2184 2185
2185 priv = ipoib_intf_alloc(hca, port, format); 2186 priv = ipoib_intf_alloc(hca, port, format);
@@ -2279,7 +2280,8 @@ register_failed:
2279 ipoib_dev_cleanup(priv->dev); 2280 ipoib_dev_cleanup(priv->dev);
2280 2281
2281device_init_failed: 2282device_init_failed:
2282 free_netdev(priv->dev); 2283 rn = netdev_priv(priv->dev);
2284 rn->free_rdma_netdev(priv->dev);
2283 kfree(priv); 2285 kfree(priv);
2284 2286
2285alloc_mem_failed: 2287alloc_mem_failed:
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2328 return; 2330 return;
2329 2331
2330 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2332 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2331 struct rdma_netdev *rn = netdev_priv(priv->dev); 2333 struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
2332 2334
2333 ib_unregister_event_handler(&priv->event_handler); 2335 ib_unregister_event_handler(&priv->event_handler);
2334 flush_workqueue(ipoib_workqueue); 2336 flush_workqueue(ipoib_workqueue);
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2350 unregister_netdev(priv->dev); 2352 unregister_netdev(priv->dev);
2351 mutex_unlock(&priv->sysfs_mutex); 2353 mutex_unlock(&priv->sysfs_mutex);
2352 2354
2353 rn->free_rdma_netdev(priv->dev); 2355 parent_rn->free_rdma_netdev(priv->dev);
2356
2357 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
2358 struct rdma_netdev *child_rn;
2354 2359
2355 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) 2360 child_rn = netdev_priv(cpriv->dev);
2361 child_rn->free_rdma_netdev(cpriv->dev);
2356 kfree(cpriv); 2362 kfree(cpriv);
2363 }
2357 2364
2358 kfree(priv); 2365 kfree(priv);
2359 } 2366 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9927cd6b7082..55a9b71ed05a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
141 return restart_syscall(); 141 return restart_syscall();
142 } 142 }
143 143
144 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 144 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
145 if (!priv) {
146 rtnl_unlock(); 145 rtnl_unlock();
147 mutex_unlock(&ppriv->sysfs_mutex); 146 mutex_unlock(&ppriv->sysfs_mutex);
148 return -ENOMEM; 147 return restart_syscall();
149 } 148 }
150 149
151 down_write(&ppriv->vlan_rwsem); 150 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
151 if (!priv) {
152 result = -ENOMEM;
153 goto out;
154 }
152 155
153 /* 156 /*
154 * First ensure this isn't a duplicate. We check the parent device and 157 * First ensure this isn't a duplicate. We check the parent device and
@@ -175,8 +178,11 @@ out:
175 rtnl_unlock(); 178 rtnl_unlock();
176 mutex_unlock(&ppriv->sysfs_mutex); 179 mutex_unlock(&ppriv->sysfs_mutex);
177 180
178 if (result) { 181 if (result && priv) {
179 free_netdev(priv->dev); 182 struct rdma_netdev *rn;
183
184 rn = netdev_priv(priv->dev);
185 rn->free_rdma_netdev(priv->dev);
180 kfree(priv); 186 kfree(priv);
181 } 187 }
182 188
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
204 return restart_syscall(); 210 return restart_syscall();
205 } 211 }
206 212
207 down_write(&ppriv->vlan_rwsem); 213 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
214 rtnl_unlock();
215 mutex_unlock(&ppriv->sysfs_mutex);
216 return restart_syscall();
217 }
218
208 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 219 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
209 if (priv->pkey == pkey && 220 if (priv->pkey == pkey &&
210 priv->child_type == IPOIB_LEGACY_CHILD) { 221 priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
224 mutex_unlock(&ppriv->sysfs_mutex); 235 mutex_unlock(&ppriv->sysfs_mutex);
225 236
226 if (dev) { 237 if (dev) {
227 free_netdev(dev); 238 struct rdma_netdev *rn;
239
240 rn = netdev_priv(dev);
241 rn->free_rdma_netdev(priv->dev);
228 kfree(priv); 242 kfree(priv);
229 return 0; 243 return 0;
230 } 244 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9c3e9ab53a41..322209d5ff58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
154{ 154{
155 int i; 155 int i;
156 156
157 iser_err("page vec npages %d data length %d\n", 157 iser_err("page vec npages %d data length %lld\n",
158 page_vec->npages, page_vec->fake_mr.length); 158 page_vec->npages, page_vec->fake_mr.length);
159 for (i = 0; i < page_vec->npages; i++) 159 for (i = 0; i < page_vec->npages; i++)
160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); 160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d268fdc23c64..762bfb9487dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
933} 933}
934EXPORT_SYMBOL(input_set_keycode); 934EXPORT_SYMBOL(input_set_keycode);
935 935
936bool input_match_device_id(const struct input_dev *dev,
937 const struct input_device_id *id)
938{
939 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
940 if (id->bustype != dev->id.bustype)
941 return false;
942
943 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
944 if (id->vendor != dev->id.vendor)
945 return false;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
948 if (id->product != dev->id.product)
949 return false;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
952 if (id->version != dev->id.version)
953 return false;
954
955 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
956 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
957 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
958 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
959 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
960 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
961 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
962 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
963 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
964 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
965 return false;
966 }
967
968 return true;
969}
970EXPORT_SYMBOL(input_match_device_id);
971
936static const struct input_device_id *input_match_device(struct input_handler *handler, 972static const struct input_device_id *input_match_device(struct input_handler *handler,
937 struct input_dev *dev) 973 struct input_dev *dev)
938{ 974{
939 const struct input_device_id *id; 975 const struct input_device_id *id;
940 976
941 for (id = handler->id_table; id->flags || id->driver_info; id++) { 977 for (id = handler->id_table; id->flags || id->driver_info; id++) {
942 978 if (input_match_device_id(dev, id) &&
943 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 979 (!handler->match || handler->match(handler, dev))) {
944 if (id->bustype != dev->id.bustype)
945 continue;
946
947 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
948 if (id->vendor != dev->id.vendor)
949 continue;
950
951 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
952 if (id->product != dev->id.product)
953 continue;
954
955 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
956 if (id->version != dev->id.version)
957 continue;
958
959 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
960 continue;
961
962 if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
963 continue;
964
965 if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
966 continue;
967
968 if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
969 continue;
970
971 if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
972 continue;
973
974 if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
975 continue;
976
977 if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
978 continue;
979
980 if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
981 continue;
982
983 if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
984 continue;
985
986 if (!handler->match || handler->match(handler, dev))
987 return id; 980 return id;
981 }
988 } 982 }
989 983
990 return NULL; 984 return NULL;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 29d677c714d2..7b29a8944039 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
747 input_close_device(handle); 747 input_close_device(handle);
748} 748}
749 749
750/*
751 * These codes are copied from from hid-ids.h, unfortunately there is no common
752 * usb_ids/bt_ids.h header.
753 */
754#define USB_VENDOR_ID_SONY 0x054c
755#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
756#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
757#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
758#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
759
760#define USB_VENDOR_ID_THQ 0x20d6
761#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
762
763#define ACCEL_DEV(vnd, prd) \
764 { \
765 .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
766 INPUT_DEVICE_ID_MATCH_PRODUCT | \
767 INPUT_DEVICE_ID_MATCH_PROPBIT, \
768 .vendor = (vnd), \
769 .product = (prd), \
770 .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
771 }
772
773static const struct input_device_id joydev_blacklist[] = {
774 /* Avoid touchpads and touchscreens */
775 {
776 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
777 INPUT_DEVICE_ID_MATCH_KEYBIT,
778 .evbit = { BIT_MASK(EV_KEY) },
779 .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
780 },
781 /* Avoid tablets, digitisers and similar devices */
782 {
783 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
784 INPUT_DEVICE_ID_MATCH_KEYBIT,
785 .evbit = { BIT_MASK(EV_KEY) },
786 .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
787 },
788 /* Disable accelerometers on composite devices */
789 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
790 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
791 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
792 ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
793 ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
794 { /* sentinel */ }
795};
796
797static bool joydev_dev_is_blacklisted(struct input_dev *dev)
798{
799 const struct input_device_id *id;
800
801 for (id = joydev_blacklist; id->flags; id++) {
802 if (input_match_device_id(dev, id)) {
803 dev_dbg(&dev->dev,
804 "joydev: blacklisting '%s'\n", dev->name);
805 return true;
806 }
807 }
808
809 return false;
810}
811
750static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) 812static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
751{ 813{
752 DECLARE_BITMAP(jd_scratch, KEY_CNT); 814 DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
807 869
808static bool joydev_match(struct input_handler *handler, struct input_dev *dev) 870static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
809{ 871{
810 /* Avoid touchpads and touchscreens */ 872 /* Disable blacklisted devices */
811 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) 873 if (joydev_dev_is_blacklisted(dev))
812 return false;
813
814 /* Avoid tablets, digitisers and similar devices */
815 if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
816 return false; 874 return false;
817 875
818 /* Avoid absolute mice */ 876 /* Avoid absolute mice */
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index e37e335e406f..6da607d3b811 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
234static int tca8418_configure(struct tca8418_keypad *keypad_data, 234static int tca8418_configure(struct tca8418_keypad *keypad_data,
235 u32 rows, u32 cols) 235 u32 rows, u32 cols)
236{ 236{
237 int reg, error; 237 int reg, error = 0;
238
239 /* Write config register, if this fails assume device not present */
240 error = tca8418_write_byte(keypad_data, REG_CFG,
241 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
242 if (error < 0)
243 return -ENODEV;
244
245 238
246 /* Assemble a mask for row and column registers */ 239 /* Assemble a mask for row and column registers */
247 reg = ~(~0 << rows); 240 reg = ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
257 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); 250 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
258 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); 251 error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
259 252
253 if (error)
254 return error;
255
256 error = tca8418_write_byte(keypad_data, REG_CFG,
257 CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
258
260 return error; 259 return error;
261} 260}
262 261
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
268 struct input_dev *input; 267 struct input_dev *input;
269 u32 rows = 0, cols = 0; 268 u32 rows = 0, cols = 0;
270 int error, row_shift, max_keys; 269 int error, row_shift, max_keys;
270 u8 reg;
271 271
272 /* Check i2c driver capabilities */ 272 /* Check i2c driver capabilities */
273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { 273 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
301 keypad_data->client = client; 301 keypad_data->client = client;
302 keypad_data->row_shift = row_shift; 302 keypad_data->row_shift = row_shift;
303 303
304 /* Initialize the chip or fail if chip isn't present */ 304 /* Read key lock register, if this fails assume device not present */
305 error = tca8418_configure(keypad_data, rows, cols); 305 error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
306 if (error < 0) 306 if (error)
307 return error; 307 return -ENODEV;
308 308
309 /* Configure input device */ 309 /* Configure input device */
310 input = devm_input_allocate_device(dev); 310 input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
340 return error; 340 return error;
341 } 341 }
342 342
343 /* Initialize the chip */
344 error = tca8418_configure(keypad_data, rows, cols);
345 if (error < 0)
346 return error;
347
343 error = input_register_device(input); 348 error = input_register_device(input);
344 if (error) { 349 if (error) {
345 dev_err(dev, "Unable to register input device, error: %d\n", 350 dev_err(dev, "Unable to register input device, error: %d\n",
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 6cee5adc3b5c..debeeaeb8812 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
403 }, 403 },
404 { /* sentinel */ } 404 { /* sentinel */ }
405}; 405};
406MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
406 407
407static struct platform_driver axp20x_pek_driver = { 408static struct platform_driver axp20x_pek_driver = {
408 .probe = axp20x_pek_probe, 409 .probe = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
417MODULE_DESCRIPTION("axp20x Power Button"); 418MODULE_DESCRIPTION("axp20x Power Button");
418MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 419MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
419MODULE_LICENSE("GPL"); 420MODULE_LICENSE("GPL");
420MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6bf82ea8c918..ae473123583b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
1635 return NULL; 1635 return NULL;
1636 } 1636 }
1637 1637
1638 while (buflen > 0) { 1638 while (buflen >= sizeof(*union_desc)) {
1639 union_desc = (struct usb_cdc_union_desc *)buf; 1639 union_desc = (struct usb_cdc_union_desc *)buf;
1640 1640
1641 if (union_desc->bLength > buflen) {
1642 dev_err(&intf->dev, "Too large descriptor\n");
1643 return NULL;
1644 }
1645
1641 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && 1646 if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
1642 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { 1647 union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
1643 dev_dbg(&intf->dev, "Found union header\n"); 1648 dev_dbg(&intf->dev, "Found union header\n");
1644 return union_desc; 1649
1650 if (union_desc->bLength >= sizeof(*union_desc))
1651 return union_desc;
1652
1653 dev_err(&intf->dev,
1654 "Union descriptor to short (%d vs %zd\n)",
1655 union_desc->bLength, sizeof(*union_desc));
1656 return NULL;
1645 } 1657 }
1646 1658
1647 buflen -= union_desc->bLength; 1659 buflen -= union_desc->bLength;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0e761d079dc4..6d6b092e2da9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1258 { "ELAN0605", 0 }, 1258 { "ELAN0605", 0 },
1259 { "ELAN0609", 0 }, 1259 { "ELAN0609", 0 },
1260 { "ELAN060B", 0 }, 1260 { "ELAN060B", 0 },
1261 { "ELAN0611", 0 },
1261 { "ELAN1000", 0 }, 1262 { "ELAN1000", 0 },
1262 { } 1263 { }
1263}; 1264};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5af0b7d200bc..ee5466a374bf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
1709 .sensor_pdata = { 1709 .sensor_pdata = {
1710 .sensor_type = rmi_sensor_touchpad, 1710 .sensor_type = rmi_sensor_touchpad,
1711 .axis_align.flip_y = true, 1711 .axis_align.flip_y = true,
1712 /* to prevent cursors jumps: */ 1712 .kernel_tracking = false,
1713 .kernel_tracking = true,
1714 .topbuttonpad = topbuttonpad, 1713 .topbuttonpad = topbuttonpad,
1715 }, 1714 },
1716 .f30_data = { 1715 .f30_data = {
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 34dfee555b20..82e0f0d43d55 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
232 unsigned int trackstick_button = BTN_LEFT; 232 unsigned int trackstick_button = BTN_LEFT;
233 bool button_mapped = false; 233 bool button_mapped = false;
234 int i; 234 int i;
235 int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
235 236
236 f30->gpioled_key_map = devm_kcalloc(&fn->dev, 237 f30->gpioled_key_map = devm_kcalloc(&fn->dev,
237 f30->gpioled_count, 238 button_count,
238 sizeof(f30->gpioled_key_map[0]), 239 sizeof(f30->gpioled_key_map[0]),
239 GFP_KERNEL); 240 GFP_KERNEL);
240 if (!f30->gpioled_key_map) { 241 if (!f30->gpioled_key_map) {
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
242 return -ENOMEM; 243 return -ENOMEM;
243 } 244 }
244 245
245 for (i = 0; i < f30->gpioled_count; i++) { 246 for (i = 0; i < button_count; i++) {
246 if (!rmi_f30_is_valid_button(i, f30->ctrl)) 247 if (!rmi_f30_is_valid_button(i, f30->ctrl))
247 continue; 248 continue;
248 249
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index b796e891e2ee..4b8b9d7aa75e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
230 230
231 /* Walk this report and pull out the info we need */ 231 /* Walk this report and pull out the info we need */
232 while (i < length) { 232 while (i < length) {
233 prefix = report[i]; 233 prefix = report[i++];
234
235 /* Skip over prefix */
236 i++;
237 234
238 /* Determine data size and save the data in the proper variable */ 235 /* Determine data size and save the data in the proper variable */
239 size = PREF_SIZE(prefix); 236 size = (1U << PREF_SIZE(prefix)) >> 1;
237 if (i + size > length) {
238 dev_err(ddev,
239 "Not enough data (need %d, have %d)\n",
240 i + size, length);
241 break;
242 }
243
240 switch (size) { 244 switch (size) {
241 case 1: 245 case 1:
242 data = report[i]; 246 data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
244 case 2: 248 case 2:
245 data16 = get_unaligned_le16(&report[i]); 249 data16 = get_unaligned_le16(&report[i]);
246 break; 250 break;
247 case 3: 251 case 4:
248 size = 4;
249 data32 = get_unaligned_le32(&report[i]); 252 data32 = get_unaligned_le32(&report[i]);
250 break; 253 break;
251 } 254 }
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 32d2762448aa..b3bbad7d2282 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -72,6 +72,9 @@ struct goodix_ts_data {
72#define GOODIX_REG_CONFIG_DATA 0x8047 72#define GOODIX_REG_CONFIG_DATA 0x8047
73#define GOODIX_REG_ID 0x8140 73#define GOODIX_REG_ID 0x8140
74 74
75#define GOODIX_BUFFER_STATUS_READY BIT(7)
76#define GOODIX_BUFFER_STATUS_TIMEOUT 20
77
75#define RESOLUTION_LOC 1 78#define RESOLUTION_LOC 1
76#define MAX_CONTACTS_LOC 5 79#define MAX_CONTACTS_LOC 5
77#define TRIGGER_LOC 6 80#define TRIGGER_LOC 6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
195 198
196static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) 199static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
197{ 200{
201 unsigned long max_timeout;
198 int touch_num; 202 int touch_num;
199 int error; 203 int error;
200 204
201 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, 205 /*
202 GOODIX_CONTACT_SIZE + 1); 206 * The 'buffer status' bit, which indicates that the data is valid, is
203 if (error) { 207 * not set as soon as the interrupt is raised, but slightly after.
204 dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); 208 * This takes around 10 ms to happen, so we poll for 20 ms.
205 return error; 209 */
206 } 210 max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
211 do {
212 error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
213 data, GOODIX_CONTACT_SIZE + 1);
214 if (error) {
215 dev_err(&ts->client->dev, "I2C transfer error: %d\n",
216 error);
217 return error;
218 }
207 219
208 if (!(data[0] & 0x80)) 220 if (data[0] & GOODIX_BUFFER_STATUS_READY) {
209 return -EAGAIN; 221 touch_num = data[0] & 0x0f;
222 if (touch_num > ts->max_touch_num)
223 return -EPROTO;
224
225 if (touch_num > 1) {
226 data += 1 + GOODIX_CONTACT_SIZE;
227 error = goodix_i2c_read(ts->client,
228 GOODIX_READ_COOR_ADDR +
229 1 + GOODIX_CONTACT_SIZE,
230 data,
231 GOODIX_CONTACT_SIZE *
232 (touch_num - 1));
233 if (error)
234 return error;
235 }
236
237 return touch_num;
238 }
210 239
211 touch_num = data[0] & 0x0f; 240 usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
212 if (touch_num > ts->max_touch_num) 241 } while (time_before(jiffies, max_timeout));
213 return -EPROTO;
214
215 if (touch_num > 1) {
216 data += 1 + GOODIX_CONTACT_SIZE;
217 error = goodix_i2c_read(ts->client,
218 GOODIX_READ_COOR_ADDR +
219 1 + GOODIX_CONTACT_SIZE,
220 data,
221 GOODIX_CONTACT_SIZE * (touch_num - 1));
222 if (error)
223 return error;
224 }
225 242
226 return touch_num; 243 /*
244 * The Goodix panel will send spurious interrupts after a
245 * 'finger up' event, which will always cause a timeout.
246 */
247 return 0;
227} 248}
228 249
229static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) 250static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 157fdb4bb2e8..8c6c6178ec12 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
663 sdata->input->open = stmfts_input_open; 663 sdata->input->open = stmfts_input_open;
664 sdata->input->close = stmfts_input_close; 664 sdata->input->close = stmfts_input_close;
665 665
666 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
667 input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
666 touchscreen_parse_properties(sdata->input, true, &sdata->prop); 668 touchscreen_parse_properties(sdata->input, true, &sdata->prop);
667 669
668 input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
669 sdata->prop.max_x, 0, 0);
670 input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
671 sdata->prop.max_y, 0, 0);
672 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); 670 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
673 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); 671 input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
674 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); 672 input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 7953381d939a..f1043ae71dcc 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
161 break; 161 break;
162 case 5: 162 case 5:
163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | 163 config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
164 ts_dev->bit_xn | ts_dev->bit_yp; 164 STEPCONFIG_XNP | STEPCONFIG_YPN;
165 break; 165 break;
166 case 8: 166 case 8:
167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); 167 config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 51f8215877f5..8e8874d23717 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void)
2773 2773
2774int __init amd_iommu_init_dma_ops(void) 2774int __init amd_iommu_init_dma_ops(void)
2775{ 2775{
2776 swiotlb = iommu_pass_through ? 1 : 0; 2776 swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
2777 iommu_detected = 1; 2777 iommu_detected = 1;
2778 2778
2779 /* 2779 /*
2780 * In case we don't initialize SWIOTLB (actually the common case 2780 * In case we don't initialize SWIOTLB (actually the common case
2781 * when AMD IOMMU is enabled), make sure there are global 2781 * when AMD IOMMU is enabled and SME is not active), make sure there
2782 * dma_ops set as a fall-back for devices not handled by this 2782 * are global dma_ops set as a fall-back for devices not handled by
2783 * driver (for example non-PCI devices). 2783 * this driver (for example non-PCI devices). When SME is active,
2784 * make sure that swiotlb variable remains set so the global dma_ops
2785 * continue to be SWIOTLB.
2784 */ 2786 */
2785 if (!swiotlb) 2787 if (!swiotlb)
2786 dma_ops = &nommu_dma_ops; 2788 dma_ops = &nommu_dma_ops;
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3046 mutex_unlock(&domain->api_lock); 3048 mutex_unlock(&domain->api_lock);
3047 3049
3048 domain_flush_tlb_pde(domain); 3050 domain_flush_tlb_pde(domain);
3051 domain_flush_complete(domain);
3049 3052
3050 return unmap_size; 3053 return unmap_size;
3051} 3054}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 382de42b8359..6fe2d0346073 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -874,7 +874,7 @@ static bool copy_device_table(void)
874 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 874 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
875 entry = (((u64) hi) << 32) + lo; 875 entry = (((u64) hi) << 32) + lo;
876 if (last_entry && last_entry != entry) { 876 if (last_entry && last_entry != entry) {
877 pr_err("IOMMU:%d should use the same dev table as others!/n", 877 pr_err("IOMMU:%d should use the same dev table as others!\n",
878 iommu->index); 878 iommu->index);
879 return false; 879 return false;
880 } 880 }
@@ -882,7 +882,7 @@ static bool copy_device_table(void)
882 882
883 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 883 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
884 if (old_devtb_size != dev_table_size) { 884 if (old_devtb_size != dev_table_size) {
885 pr_err("The device table size of IOMMU:%d is not expected!/n", 885 pr_err("The device table size of IOMMU:%d is not expected!\n",
886 iommu->index); 886 iommu->index);
887 return false; 887 return false;
888 } 888 }
@@ -890,7 +890,7 @@ static bool copy_device_table(void)
890 890
891 old_devtb_phys = entry & PAGE_MASK; 891 old_devtb_phys = entry & PAGE_MASK;
892 if (old_devtb_phys >= 0x100000000ULL) { 892 if (old_devtb_phys >= 0x100000000ULL) {
893 pr_err("The address of old device table is above 4G, not trustworthy!/n"); 893 pr_err("The address of old device table is above 4G, not trustworthy!\n");
894 return false; 894 return false;
895 } 895 }
896 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 896 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -901,7 +901,7 @@ static bool copy_device_table(void)
901 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 901 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
902 get_order(dev_table_size)); 902 get_order(dev_table_size));
903 if (old_dev_tbl_cpy == NULL) { 903 if (old_dev_tbl_cpy == NULL) {
904 pr_err("Failed to allocate memory for copying old device table!/n"); 904 pr_err("Failed to allocate memory for copying old device table!\n");
905 return false; 905 return false;
906 } 906 }
907 907
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index f596fcc32898..25c2c75f5332 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = {
709 pm_runtime_force_resume) 709 pm_runtime_force_resume)
710}; 710};
711 711
712static const struct of_device_id sysmmu_of_match[] __initconst = { 712static const struct of_device_id sysmmu_of_match[] = {
713 { .compatible = "samsung,exynos-sysmmu", }, 713 { .compatible = "samsung,exynos-sysmmu", },
714 { }, 714 { },
715}; 715};
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d665d0dc16e8..6961fc393f0b 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
245static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, 245static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
246 struct io_pgtable_cfg *cfg) 246 struct io_pgtable_cfg *cfg)
247{ 247{
248 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) 248 if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
249 return; 249 return;
250 250
251 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), 251 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bd515be5b380..16d33ac19db0 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
371 int ret; 371 int ret;
372 372
373 spin_lock_irqsave(&dom->pgtlock, flags); 373 spin_lock_irqsave(&dom->pgtlock, flags);
374 ret = dom->iop->map(dom->iop, iova, paddr, size, prot); 374 ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
375 size, prot);
375 spin_unlock_irqrestore(&dom->pgtlock, flags); 376 spin_unlock_irqrestore(&dom->pgtlock, flags);
376 377
377 return ret; 378 return ret;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e8d89343d613..e88395605e32 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -107,6 +107,10 @@ struct its_node {
107 107
108#define ITS_ITT_ALIGN SZ_256 108#define ITS_ITT_ALIGN SZ_256
109 109
110/* The maximum number of VPEID bits supported by VLPI commands */
111#define ITS_MAX_VPEID_BITS (16)
112#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
113
110/* Convert page order to size in bytes */ 114/* Convert page order to size in bytes */
111#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) 115#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
112 116
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
308 312
309static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) 313static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
310{ 314{
311 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); 315 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
312} 316}
313 317
314static void its_encode_valid(struct its_cmd_block *cmd, int valid) 318static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
318 322
319static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) 323static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
320{ 324{
321 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); 325 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
322} 326}
323 327
324static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 328static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
358 362
359static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 363static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
360{ 364{
361 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); 365 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
362} 366}
363 367
364static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 368static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1478 u64 val = its_read_baser(its, baser); 1482 u64 val = its_read_baser(its, baser);
1479 u64 esz = GITS_BASER_ENTRY_SIZE(val); 1483 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1480 u64 type = GITS_BASER_TYPE(val); 1484 u64 type = GITS_BASER_TYPE(val);
1485 u64 baser_phys, tmp;
1481 u32 alloc_pages; 1486 u32 alloc_pages;
1482 void *base; 1487 void *base;
1483 u64 tmp;
1484 1488
1485retry_alloc_baser: 1489retry_alloc_baser:
1486 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 1490 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
1496 if (!base) 1500 if (!base)
1497 return -ENOMEM; 1501 return -ENOMEM;
1498 1502
1503 baser_phys = virt_to_phys(base);
1504
1505 /* Check if the physical address of the memory is above 48bits */
1506 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1507
1508 /* 52bit PA is supported only when PageSize=64K */
1509 if (psz != SZ_64K) {
1510 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1511 free_pages((unsigned long)base, order);
1512 return -ENXIO;
1513 }
1514
1515 /* Convert 52bit PA to 48bit field */
1516 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1517 }
1518
1499retry_baser: 1519retry_baser:
1500 val = (virt_to_phys(base) | 1520 val = (baser_phys |
1501 (type << GITS_BASER_TYPE_SHIFT) | 1521 (type << GITS_BASER_TYPE_SHIFT) |
1502 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 1522 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1503 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 1523 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
@@ -1582,13 +1602,12 @@ retry_baser:
1582 1602
1583static bool its_parse_indirect_baser(struct its_node *its, 1603static bool its_parse_indirect_baser(struct its_node *its,
1584 struct its_baser *baser, 1604 struct its_baser *baser,
1585 u32 psz, u32 *order) 1605 u32 psz, u32 *order, u32 ids)
1586{ 1606{
1587 u64 tmp = its_read_baser(its, baser); 1607 u64 tmp = its_read_baser(its, baser);
1588 u64 type = GITS_BASER_TYPE(tmp); 1608 u64 type = GITS_BASER_TYPE(tmp);
1589 u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 1609 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1590 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1610 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1591 u32 ids = its->device_ids;
1592 u32 new_order = *order; 1611 u32 new_order = *order;
1593 bool indirect = false; 1612 bool indirect = false;
1594 1613
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
1680 continue; 1699 continue;
1681 1700
1682 case GITS_BASER_TYPE_DEVICE: 1701 case GITS_BASER_TYPE_DEVICE:
1702 indirect = its_parse_indirect_baser(its, baser,
1703 psz, &order,
1704 its->device_ids);
1683 case GITS_BASER_TYPE_VCPU: 1705 case GITS_BASER_TYPE_VCPU:
1684 indirect = its_parse_indirect_baser(its, baser, 1706 indirect = its_parse_indirect_baser(its, baser,
1685 psz, &order); 1707 psz, &order,
1708 ITS_MAX_VPEID_BITS);
1686 break; 1709 break;
1687 } 1710 }
1688 1711
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
2551 2574
2552static int its_vpe_id_alloc(void) 2575static int its_vpe_id_alloc(void)
2553{ 2576{
2554 return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 2577 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2555} 2578}
2556 2579
2557static void its_vpe_id_free(u16 id) 2580static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
2851 return -ENOMEM; 2874 return -ENOMEM;
2852 } 2875 }
2853 2876
2854 BUG_ON(entries != vpe_proxy.dev->nr_ites); 2877 BUG_ON(entries > vpe_proxy.dev->nr_ites);
2855 2878
2856 raw_spin_lock_init(&vpe_proxy.lock); 2879 raw_spin_lock_init(&vpe_proxy.lock);
2857 vpe_proxy.next_victim = 0; 2880 vpe_proxy.next_victim = 0;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 40159ac12ac8..c90976d7e53c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -175,14 +175,13 @@ static void gic_mask_irq(struct irq_data *d)
175 175
176static void gic_unmask_irq(struct irq_data *d) 176static void gic_unmask_irq(struct irq_data *d)
177{ 177{
178 struct cpumask *affinity = irq_data_get_affinity_mask(d);
179 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
180 unsigned int cpu; 179 unsigned int cpu;
181 180
182 write_gic_smask(intr); 181 write_gic_smask(intr);
183 182
184 gic_clear_pcpu_masks(intr); 183 gic_clear_pcpu_masks(intr);
185 cpu = cpumask_first_and(affinity, cpu_online_mask); 184 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
186 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 185 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
187} 186}
188 187
@@ -420,13 +419,17 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
420 irq_hw_number_t hw, unsigned int cpu) 419 irq_hw_number_t hw, unsigned int cpu)
421{ 420{
422 int intr = GIC_HWIRQ_TO_SHARED(hw); 421 int intr = GIC_HWIRQ_TO_SHARED(hw);
422 struct irq_data *data;
423 unsigned long flags; 423 unsigned long flags;
424 424
425 data = irq_get_irq_data(virq);
426
425 spin_lock_irqsave(&gic_lock, flags); 427 spin_lock_irqsave(&gic_lock, flags);
426 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 428 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
427 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 429 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
428 gic_clear_pcpu_masks(intr); 430 gic_clear_pcpu_masks(intr);
429 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 431 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
432 irq_data_update_effective_affinity(data, cpumask_of(cpu));
430 spin_unlock_irqrestore(&gic_lock, flags); 433 spin_unlock_irqrestore(&gic_lock, flags);
431 434
432 return 0; 435 return 0;
@@ -645,7 +648,7 @@ static int __init gic_of_init(struct device_node *node,
645 648
646 /* Find the first available CPU vector. */ 649 /* Find the first available CPU vector. */
647 i = 0; 650 i = 0;
648 reserved = (C_SW0 | C_SW1) >> __fls(C_SW0); 651 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
649 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 652 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
650 i++, &cpu_vec)) 653 i++, &cpu_vec))
651 reserved |= BIT(cpu_vec); 654 reserved |= BIT(cpu_vec);
@@ -684,11 +687,11 @@ static int __init gic_of_init(struct device_node *node,
684 687
685 gicconfig = read_gic_config(); 688 gicconfig = read_gic_config();
686 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 689 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
687 gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS); 690 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
688 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 691 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
689 692
690 gic_vpes = gicconfig & GIC_CONFIG_PVPS; 693 gic_vpes = gicconfig & GIC_CONFIG_PVPS;
691 gic_vpes >>= __fls(GIC_CONFIG_PVPS); 694 gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
692 gic_vpes = gic_vpes + 1; 695 gic_vpes = gic_vpes + 1;
693 696
694 if (cpu_has_veic) { 697 if (cpu_has_veic) {
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index bdbb5c0ff7fe..0c085303a583 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
141 for (i = 0; i < 2; i++) { 141 for (i = 0; i < 2; i++) {
142 ct[i].chip.irq_ack = irq_gc_ack_set_bit; 142 ct[i].chip.irq_ack = irq_gc_ack_set_bit;
143 ct[i].chip.irq_mask = irq_gc_mask_disable_reg; 143 ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
144 ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack; 144 ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
145 ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; 145 ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
146 ct[i].chip.irq_set_type = tangox_irq_set_type; 146 ct[i].chip.irq_set_type = tangox_irq_set_type;
147 ct[i].chip.name = gc->domain->name; 147 ct[i].chip.name = gc->domain->name;
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index bbbbe0898233..9a257f969300 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -112,6 +112,10 @@
112#define AS_PEAK_mA_TO_REG(a) \ 112#define AS_PEAK_mA_TO_REG(a) \
113 ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) 113 ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
114 114
115/* LED numbers for Devicetree */
116#define AS_LED_FLASH 0
117#define AS_LED_INDICATOR 1
118
115enum as_mode { 119enum as_mode {
116 AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, 120 AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
117 AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, 121 AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
@@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash,
491 struct device_node *node) 495 struct device_node *node)
492{ 496{
493 struct as3645a_config *cfg = &flash->cfg; 497 struct as3645a_config *cfg = &flash->cfg;
498 struct device_node *child;
494 const char *name; 499 const char *name;
495 int rval; 500 int rval;
496 501
497 flash->flash_node = of_get_child_by_name(node, "flash"); 502 for_each_child_of_node(node, child) {
503 u32 id = 0;
504
505 of_property_read_u32(child, "reg", &id);
506
507 switch (id) {
508 case AS_LED_FLASH:
509 flash->flash_node = of_node_get(child);
510 break;
511 case AS_LED_INDICATOR:
512 flash->indicator_node = of_node_get(child);
513 break;
514 default:
515 dev_warn(&flash->client->dev,
516 "unknown LED %u encountered, ignoring\n", id);
517 break;
518 }
519 }
520
498 if (!flash->flash_node) { 521 if (!flash->flash_node) {
499 dev_err(&flash->client->dev, "can't find flash node\n"); 522 dev_err(&flash->client->dev, "can't find flash node\n");
500 return -ENODEV; 523 return -ENODEV;
@@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash,
534 of_property_read_u32(flash->flash_node, "voltage-reference", 557 of_property_read_u32(flash->flash_node, "voltage-reference",
535 &cfg->voltage_reference); 558 &cfg->voltage_reference);
536 559
537 of_property_read_u32(flash->flash_node, "peak-current-limit", 560 of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
538 &cfg->peak); 561 &cfg->peak);
539 cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); 562 cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
540 563
541 flash->indicator_node = of_get_child_by_name(node, "indicator");
542 if (!flash->indicator_node) { 564 if (!flash->indicator_node) {
543 dev_warn(&flash->client->dev, 565 dev_warn(&flash->client->dev,
544 "can't find indicator node\n"); 566 "can't find indicator node\n");
@@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client)
721 as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); 743 as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
722 744
723 v4l2_flash_release(flash->vf); 745 v4l2_flash_release(flash->vf);
746 v4l2_flash_release(flash->vfind);
724 747
725 led_classdev_flash_unregister(&flash->fled); 748 led_classdev_flash_unregister(&flash->fled);
726 led_classdev_unregister(&flash->iled_cdev); 749 led_classdev_unregister(&flash->iled_cdev);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
64void __closure_wake_up(struct closure_waitlist *wait_list) 64void __closure_wake_up(struct closure_waitlist *wait_list)
65{ 65{
66 struct llist_node *list; 66 struct llist_node *list;
67 struct closure *cl; 67 struct closure *cl, *t;
68 struct llist_node *reverse = NULL; 68 struct llist_node *reverse = NULL;
69 69
70 list = llist_del_all(&wait_list->list); 70 list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
73 reverse = llist_reverse_order(list); 73 reverse = llist_reverse_order(list);
74 74
75 /* Then do the wakeups */ 75 /* Then do the wakeups */
76 llist_for_each_entry(cl, reverse, list) { 76 llist_for_each_entry_safe(cl, t, reverse, list) {
77 closure_set_waiting(cl, 0); 77 closure_set_waiting(cl, 0);
78 closure_sub(cl, CLOSURE_WAITING + 1); 78 closure_sub(cl, CLOSURE_WAITING + 1);
79 } 79 }
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..203144762f36 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
149 149
150extern atomic_t dm_global_event_nr; 150extern atomic_t dm_global_event_nr;
151extern wait_queue_head_t dm_global_eventq; 151extern wait_queue_head_t dm_global_eventq;
152void dm_issue_global_event(void);
152 153
153#endif 154#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..96ab46512e1f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
2466 kfree(cipher_api); 2466 kfree(cipher_api);
2467 return ret; 2467 return ret;
2468 } 2468 }
2469 kfree(cipher_api);
2469 2470
2470 return 0; 2471 return 0;
2471bad_mem: 2472bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2584 ti->error = "Invalid feature value for sector_size"; 2585 ti->error = "Invalid feature value for sector_size";
2585 return -EINVAL; 2586 return -EINVAL;
2586 } 2587 }
2588 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2589 ti->error = "Device size is not multiple of sector_size feature";
2590 return -EINVAL;
2591 }
2587 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; 2592 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2588 } else if (!strcasecmp(opt_string, "iv_large_sectors")) 2593 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2589 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 2594 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
477 * Round up the ptr to an 8-byte boundary. 477 * Round up the ptr to an 8-byte boundary.
478 */ 478 */
479#define ALIGN_MASK 7 479#define ALIGN_MASK 7
480static inline size_t align_val(size_t val)
481{
482 return (val + ALIGN_MASK) & ~ALIGN_MASK;
483}
480static inline void *align_ptr(void *ptr) 484static inline void *align_ptr(void *ptr)
481{ 485{
482 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); 486 return (void *)align_val((size_t)ptr);
483} 487}
484 488
485/* 489/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
505 struct hash_cell *hc; 509 struct hash_cell *hc;
506 size_t len, needed = 0; 510 size_t len, needed = 0;
507 struct gendisk *disk; 511 struct gendisk *disk;
508 struct dm_name_list *nl, *old_nl = NULL; 512 struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
509 uint32_t *event_nr; 513 uint32_t *event_nr;
510 514
511 down_write(&_hash_lock); 515 down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
516 */ 520 */
517 for (i = 0; i < NUM_BUCKETS; i++) { 521 for (i = 0; i < NUM_BUCKETS; i++) {
518 list_for_each_entry (hc, _name_buckets + i, name_list) { 522 list_for_each_entry (hc, _name_buckets + i, name_list) {
519 needed += sizeof(struct dm_name_list); 523 needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
520 needed += strlen(hc->name) + 1; 524 needed += align_val(sizeof(uint32_t));
521 needed += ALIGN_MASK;
522 needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
523 } 525 }
524 } 526 }
525 527
526 /* 528 /*
527 * Grab our output buffer. 529 * Grab our output buffer.
528 */ 530 */
529 nl = get_result_buffer(param, param_size, &len); 531 nl = orig_nl = get_result_buffer(param, param_size, &len);
530 if (len < needed) { 532 if (len < needed) {
531 param->flags |= DM_BUFFER_FULL_FLAG; 533 param->flags |= DM_BUFFER_FULL_FLAG;
532 goto out; 534 goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
549 strcpy(nl->name, hc->name); 551 strcpy(nl->name, hc->name);
550 552
551 old_nl = nl; 553 old_nl = nl;
552 event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); 554 event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
553 *event_nr = dm_get_event_nr(hc->md); 555 *event_nr = dm_get_event_nr(hc->md);
554 nl = align_ptr(event_nr + 1); 556 nl = align_ptr(event_nr + 1);
555 } 557 }
556 } 558 }
559 /*
560 * If mismatch happens, security may be compromised due to buffer
561 * overflow, so it's better to crash.
562 */
563 BUG_ON((char *)nl - (char *)orig_nl != needed);
557 564
558 out: 565 out:
559 up_write(&_hash_lock); 566 up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
1621 * which has a variable size, is not used by the function processing 1628 * which has a variable size, is not used by the function processing
1622 * the ioctl. 1629 * the ioctl.
1623 */ 1630 */
1624#define IOCTL_FLAGS_NO_PARAMS 1 1631#define IOCTL_FLAGS_NO_PARAMS 1
1632#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
1625 1633
1626/*----------------------------------------------------------------- 1634/*-----------------------------------------------------------------
1627 * Implementation of open/close/ioctl on the special char 1635 * Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
1635 ioctl_fn fn; 1643 ioctl_fn fn;
1636 } _ioctls[] = { 1644 } _ioctls[] = {
1637 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ 1645 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
1638 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, 1646 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
1639 {DM_LIST_DEVICES_CMD, 0, list_devices}, 1647 {DM_LIST_DEVICES_CMD, 0, list_devices},
1640 1648
1641 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, 1649 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
1642 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, 1650 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
1643 {DM_DEV_RENAME_CMD, 0, dev_rename}, 1651 {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
1644 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, 1652 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
1645 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, 1653 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
1646 {DM_DEV_WAIT_CMD, 0, dev_wait}, 1654 {DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
1869 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) 1877 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
1870 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); 1878 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
1871 1879
1880 if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
1881 dm_issue_global_event();
1882
1872 /* 1883 /*
1873 * Copy the results back to userland. 1884 * Copy the results back to userland.
1874 */ 1885 */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5bfe285ea9d1..2245d06d2045 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
3238 if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) 3238 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3239 return DM_MAPIO_REQUEUE; 3239 return DM_MAPIO_REQUEUE;
3240 3240
3241 mddev->pers->make_request(mddev, bio); 3241 md_handle_request(mddev, bio);
3242 3242
3243 return DM_MAPIO_SUBMITTED; 3243 return DM_MAPIO_SUBMITTED;
3244} 3244}
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
3297static sector_t rs_get_progress(struct raid_set *rs, 3297static sector_t rs_get_progress(struct raid_set *rs,
3298 sector_t resync_max_sectors, bool *array_in_sync) 3298 sector_t resync_max_sectors, bool *array_in_sync)
3299{ 3299{
3300 sector_t r, recovery_cp, curr_resync_completed; 3300 sector_t r, curr_resync_completed;
3301 struct mddev *mddev = &rs->md; 3301 struct mddev *mddev = &rs->md;
3302 3302
3303 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; 3303 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
3304 recovery_cp = mddev->recovery_cp;
3305 *array_in_sync = false; 3304 *array_in_sync = false;
3306 3305
3307 if (rs_is_raid0(rs)) { 3306 if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
3330 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3329 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3331 r = curr_resync_completed; 3330 r = curr_resync_completed;
3332 else 3331 else
3333 r = recovery_cp; 3332 r = mddev->recovery_cp;
3334 3333
3335 if (r == MaxSector) { 3334 if ((r == MaxSector) ||
3335 (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
3336 (mddev->curr_resync_completed == resync_max_sectors))) {
3336 /* 3337 /*
3337 * Sync complete. 3338 * Sync complete.
3338 */ 3339 */
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
3892 3893
3893static struct target_type raid_target = { 3894static struct target_type raid_target = {
3894 .name = "raid", 3895 .name = "raid",
3895 .version = {1, 12, 1}, 3896 .version = {1, 13, 0},
3896 .module = THIS_MODULE, 3897 .module = THIS_MODULE,
3897 .ctr = raid_ctr, 3898 .ctr = raid_ctr,
3898 .dtr = raid_dtr, 3899 .dtr = raid_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..4be85324f44d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
52atomic_t dm_global_event_nr = ATOMIC_INIT(0); 52atomic_t dm_global_event_nr = ATOMIC_INIT(0);
53DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 53DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
54 54
55void dm_issue_global_event(void)
56{
57 atomic_inc(&dm_global_event_nr);
58 wake_up(&dm_global_eventq);
59}
60
55/* 61/*
56 * One of these is allocated per bio. 62 * One of these is allocated per bio.
57 */ 63 */
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
1865 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1871 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1866 1872
1867 atomic_inc(&md->event_nr); 1873 atomic_inc(&md->event_nr);
1868 atomic_inc(&dm_global_event_nr);
1869 wake_up(&md->eventq); 1874 wake_up(&md->eventq);
1870 wake_up(&dm_global_eventq); 1875 dm_issue_global_event();
1871} 1876}
1872 1877
1873/* 1878/*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2283 } 2288 }
2284 2289
2285 map = __bind(md, table, &limits); 2290 map = __bind(md, table, &limits);
2291 dm_issue_global_event();
2286 2292
2287out: 2293out:
2288 mutex_unlock(&md->suspend_lock); 2294 mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 08fcaebc61bd..0ff1bbf6c90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
266 * call has finished, the bio has been linked into some internal structure 266 * call has finished, the bio has been linked into some internal structure
267 * and so is visible to ->quiesce(), so we don't need the refcount any more. 267 * and so is visible to ->quiesce(), so we don't need the refcount any more.
268 */ 268 */
269void md_handle_request(struct mddev *mddev, struct bio *bio)
270{
271check_suspended:
272 rcu_read_lock();
273 if (mddev->suspended) {
274 DEFINE_WAIT(__wait);
275 for (;;) {
276 prepare_to_wait(&mddev->sb_wait, &__wait,
277 TASK_UNINTERRUPTIBLE);
278 if (!mddev->suspended)
279 break;
280 rcu_read_unlock();
281 schedule();
282 rcu_read_lock();
283 }
284 finish_wait(&mddev->sb_wait, &__wait);
285 }
286 atomic_inc(&mddev->active_io);
287 rcu_read_unlock();
288
289 if (!mddev->pers->make_request(mddev, bio)) {
290 atomic_dec(&mddev->active_io);
291 wake_up(&mddev->sb_wait);
292 goto check_suspended;
293 }
294
295 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
296 wake_up(&mddev->sb_wait);
297}
298EXPORT_SYMBOL(md_handle_request);
299
269static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 300static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
270{ 301{
271 const int rw = bio_data_dir(bio); 302 const int rw = bio_data_dir(bio);
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
285 bio_endio(bio); 316 bio_endio(bio);
286 return BLK_QC_T_NONE; 317 return BLK_QC_T_NONE;
287 } 318 }
288check_suspended:
289 rcu_read_lock();
290 if (mddev->suspended) {
291 DEFINE_WAIT(__wait);
292 for (;;) {
293 prepare_to_wait(&mddev->sb_wait, &__wait,
294 TASK_UNINTERRUPTIBLE);
295 if (!mddev->suspended)
296 break;
297 rcu_read_unlock();
298 schedule();
299 rcu_read_lock();
300 }
301 finish_wait(&mddev->sb_wait, &__wait);
302 }
303 atomic_inc(&mddev->active_io);
304 rcu_read_unlock();
305 319
306 /* 320 /*
307 * save the sectors now since our bio can 321 * save the sectors now since our bio can
@@ -310,20 +324,14 @@ check_suspended:
310 sectors = bio_sectors(bio); 324 sectors = bio_sectors(bio);
311 /* bio could be mergeable after passing to underlayer */ 325 /* bio could be mergeable after passing to underlayer */
312 bio->bi_opf &= ~REQ_NOMERGE; 326 bio->bi_opf &= ~REQ_NOMERGE;
313 if (!mddev->pers->make_request(mddev, bio)) { 327
314 atomic_dec(&mddev->active_io); 328 md_handle_request(mddev, bio);
315 wake_up(&mddev->sb_wait);
316 goto check_suspended;
317 }
318 329
319 cpu = part_stat_lock(); 330 cpu = part_stat_lock();
320 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 331 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
321 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 332 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
322 part_stat_unlock(); 333 part_stat_unlock();
323 334
324 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
325 wake_up(&mddev->sb_wait);
326
327 return BLK_QC_T_NONE; 335 return BLK_QC_T_NONE;
328} 336}
329 337
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
439 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 447 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
440 struct bio *bio = mddev->flush_bio; 448 struct bio *bio = mddev->flush_bio;
441 449
450 /*
451 * must reset flush_bio before calling into md_handle_request to avoid a
452 * deadlock, because other bios passed md_handle_request suspend check
453 * could wait for this and below md_handle_request could wait for those
454 * bios because of suspend check
455 */
456 mddev->flush_bio = NULL;
457 wake_up(&mddev->sb_wait);
458
442 if (bio->bi_iter.bi_size == 0) 459 if (bio->bi_iter.bi_size == 0)
443 /* an empty barrier - all done */ 460 /* an empty barrier - all done */
444 bio_endio(bio); 461 bio_endio(bio);
445 else { 462 else {
446 bio->bi_opf &= ~REQ_PREFLUSH; 463 bio->bi_opf &= ~REQ_PREFLUSH;
447 mddev->pers->make_request(mddev, bio); 464 md_handle_request(mddev, bio);
448 } 465 }
449
450 mddev->flush_bio = NULL;
451 wake_up(&mddev->sb_wait);
452} 466}
453 467
454void md_flush_request(struct mddev *mddev, struct bio *bio) 468void md_flush_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 561d22b9a9a8..d8287d3cd1bf 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
692extern int md_rdev_init(struct md_rdev *rdev); 692extern int md_rdev_init(struct md_rdev *rdev);
693extern void md_rdev_clear(struct md_rdev *rdev); 693extern void md_rdev_clear(struct md_rdev *rdev);
694 694
695extern void md_handle_request(struct mddev *mddev, struct bio *bio);
695extern void mddev_suspend(struct mddev *mddev); 696extern void mddev_suspend(struct mddev *mddev);
696extern void mddev_resume(struct mddev *mddev); 697extern void mddev_resume(struct mddev *mddev);
697extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 698extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 076409455b60..928e24a07133 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6575,14 +6575,17 @@ static ssize_t
6575raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6575raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6576{ 6576{
6577 struct r5conf *conf; 6577 struct r5conf *conf;
6578 unsigned long new; 6578 unsigned int new;
6579 int err; 6579 int err;
6580 struct r5worker_group *new_groups, *old_groups; 6580 struct r5worker_group *new_groups, *old_groups;
6581 int group_cnt, worker_cnt_per_group; 6581 int group_cnt, worker_cnt_per_group;
6582 6582
6583 if (len >= PAGE_SIZE) 6583 if (len >= PAGE_SIZE)
6584 return -EINVAL; 6584 return -EINVAL;
6585 if (kstrtoul(page, 10, &new)) 6585 if (kstrtouint(page, 10, &new))
6586 return -EINVAL;
6587 /* 8192 should be big enough */
6588 if (new > 8192)
6586 return -EINVAL; 6589 return -EINVAL;
6587 6590
6588 err = mddev_lock(mddev); 6591 err = mddev_lock(mddev);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index eed6c397d840..f8a808d45034 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1797 */ 1797 */
1798 switch (msg->msg[1]) { 1798 switch (msg->msg[1]) {
1799 case CEC_MSG_GET_CEC_VERSION: 1799 case CEC_MSG_GET_CEC_VERSION:
1800 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1801 case CEC_MSG_ABORT: 1800 case CEC_MSG_ABORT:
1802 case CEC_MSG_GIVE_DEVICE_POWER_STATUS: 1801 case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
1803 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1804 case CEC_MSG_GIVE_OSD_NAME: 1802 case CEC_MSG_GIVE_OSD_NAME:
1803 /*
1804 * These messages reply with a directed message, so ignore if
1805 * the initiator is Unregistered.
1806 */
1807 if (!adap->passthrough && from_unregistered)
1808 return 0;
1809 /* Fall through */
1810 case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
1805 case CEC_MSG_GIVE_FEATURES: 1811 case CEC_MSG_GIVE_FEATURES:
1812 case CEC_MSG_GIVE_PHYSICAL_ADDR:
1806 /* 1813 /*
1807 * Skip processing these messages if the passthrough mode 1814 * Skip processing these messages if the passthrough mode
1808 * is on. 1815 * is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1810 if (adap->passthrough) 1817 if (adap->passthrough)
1811 goto skip_processing; 1818 goto skip_processing;
1812 /* Ignore if addressing is wrong */ 1819 /* Ignore if addressing is wrong */
1813 if (is_broadcast || from_unregistered) 1820 if (is_broadcast)
1814 return 0; 1821 return 0;
1815 break; 1822 break;
1816 1823
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2fcba1616168..9139d01ba7ed 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
141static void dvb_frontend_invoke_release(struct dvb_frontend *fe, 141static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
142 void (*release)(struct dvb_frontend *fe)); 142 void (*release)(struct dvb_frontend *fe));
143 143
144static void dvb_frontend_free(struct kref *ref) 144static void __dvb_frontend_free(struct dvb_frontend *fe)
145{ 145{
146 struct dvb_frontend *fe =
147 container_of(ref, struct dvb_frontend, refcount);
148 struct dvb_frontend_private *fepriv = fe->frontend_priv; 146 struct dvb_frontend_private *fepriv = fe->frontend_priv;
149 147
148 if (!fepriv)
149 return;
150
150 dvb_free_device(fepriv->dvbdev); 151 dvb_free_device(fepriv->dvbdev);
151 152
152 dvb_frontend_invoke_release(fe, fe->ops.release); 153 dvb_frontend_invoke_release(fe, fe->ops.release);
153 154
154 kfree(fepriv); 155 kfree(fepriv);
156 fe->frontend_priv = NULL;
157}
158
159static void dvb_frontend_free(struct kref *ref)
160{
161 struct dvb_frontend *fe =
162 container_of(ref, struct dvb_frontend, refcount);
163
164 __dvb_frontend_free(fe);
155} 165}
156 166
157static void dvb_frontend_put(struct dvb_frontend *fe) 167static void dvb_frontend_put(struct dvb_frontend *fe)
158{ 168{
159 kref_put(&fe->refcount, dvb_frontend_free); 169 /*
170 * Check if the frontend was registered, as otherwise
171 * kref was not initialized yet.
172 */
173 if (fe->frontend_priv)
174 kref_put(&fe->refcount, dvb_frontend_free);
175 else
176 __dvb_frontend_free(fe);
160} 177}
161 178
162static void dvb_frontend_get(struct dvb_frontend *fe) 179static void dvb_frontend_get(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 224283fe100a..4d086a7248e9 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -55,29 +55,57 @@ struct dib3000mc_state {
55 55
56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) 56static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
57{ 57{
58 u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
59 u8 rb[2];
60 struct i2c_msg msg[2] = { 58 struct i2c_msg msg[2] = {
61 { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, 59 { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 },
62 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, 60 { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
63 }; 61 };
62 u16 word;
63 u8 *b;
64
65 b = kmalloc(4, GFP_KERNEL);
66 if (!b)
67 return 0;
68
69 b[0] = (reg >> 8) | 0x80;
70 b[1] = reg;
71 b[2] = 0;
72 b[3] = 0;
73
74 msg[0].buf = b;
75 msg[1].buf = b + 2;
64 76
65 if (i2c_transfer(state->i2c_adap, msg, 2) != 2) 77 if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
66 dprintk("i2c read error on %d\n",reg); 78 dprintk("i2c read error on %d\n",reg);
67 79
68 return (rb[0] << 8) | rb[1]; 80 word = (b[2] << 8) | b[3];
81 kfree(b);
82
83 return word;
69} 84}
70 85
71static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) 86static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
72{ 87{
73 u8 b[4] = {
74 (reg >> 8) & 0xff, reg & 0xff,
75 (val >> 8) & 0xff, val & 0xff,
76 };
77 struct i2c_msg msg = { 88 struct i2c_msg msg = {
78 .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 89 .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
79 }; 90 };
80 return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; 91 int rc;
92 u8 *b;
93
94 b = kmalloc(4, GFP_KERNEL);
95 if (!b)
96 return -ENOMEM;
97
98 b[0] = reg >> 8;
99 b[1] = reg;
100 b[2] = val >> 8;
101 b[3] = val;
102
103 msg.buf = b;
104
105 rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
106 kfree(b);
107
108 return rc;
81} 109}
82 110
83static int dib3000mc_identify(struct dib3000mc_state *state) 111static int dib3000mc_identify(struct dib3000mc_state *state)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 7bec3e028bee..5553b89b804e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
753 struct i2c_adapter *i2c, 753 struct i2c_adapter *i2c,
754 unsigned int pll_desc_id) 754 unsigned int pll_desc_id)
755{ 755{
756 u8 b1 [] = { 0 }; 756 u8 *b1;
757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, 757 struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
758 .buf = b1, .len = 1 };
759 struct dvb_pll_priv *priv = NULL; 758 struct dvb_pll_priv *priv = NULL;
760 int ret; 759 int ret;
761 const struct dvb_pll_desc *desc; 760 const struct dvb_pll_desc *desc;
762 761
762 b1 = kmalloc(1, GFP_KERNEL);
763 if (!b1)
764 return NULL;
765
766 b1[0] = 0;
767 msg.buf = b1;
768
763 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && 769 if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
764 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) 770 (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
765 pll_desc_id = id[dvb_pll_devcount]; 771 pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
773 fe->ops.i2c_gate_ctrl(fe, 1); 779 fe->ops.i2c_gate_ctrl(fe, 1);
774 780
775 ret = i2c_transfer (i2c, &msg, 1); 781 ret = i2c_transfer (i2c, &msg, 1);
776 if (ret != 1) 782 if (ret != 1) {
783 kfree(b1);
777 return NULL; 784 return NULL;
785 }
778 if (fe->ops.i2c_gate_ctrl) 786 if (fe->ops.i2c_gate_ctrl)
779 fe->ops.i2c_gate_ctrl(fe, 0); 787 fe->ops.i2c_gate_ctrl(fe, 0);
780 } 788 }
781 789
782 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); 790 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
783 if (priv == NULL) 791 if (!priv) {
792 kfree(b1);
784 return NULL; 793 return NULL;
794 }
785 795
786 priv->pll_i2c_address = pll_addr; 796 priv->pll_i2c_address = pll_addr;
787 priv->i2c = i2c; 797 priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
811 "insmod option" : "autodetected"); 821 "insmod option" : "autodetected");
812 } 822 }
813 823
824 kfree(b1);
825
814 return fe; 826 return fe;
815} 827}
816EXPORT_SYMBOL(dvb_pll_attach); 828EXPORT_SYMBOL(dvb_pll_attach);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e7cc49b8674..3c4f7fa7b9d8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
112 112
113config VIDEO_QCOM_CAMSS 113config VIDEO_QCOM_CAMSS
114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" 114 tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 115 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST 116 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
117 select VIDEOBUF2_DMA_SG 117 select VIDEOBUF2_DMA_SG
118 select V4L2_FWNODE 118 select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b21b3c2dc77f..b22d2dfcd3c2 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
2660 * 2660 *
2661 * Return -EINVAL or zero on success 2661 * Return -EINVAL or zero on success
2662 */ 2662 */
2663int vfe_set_selection(struct v4l2_subdev *sd, 2663static int vfe_set_selection(struct v4l2_subdev *sd,
2664 struct v4l2_subdev_pad_config *cfg, 2664 struct v4l2_subdev_pad_config *cfg,
2665 struct v4l2_subdev_selection *sel) 2665 struct v4l2_subdev_selection *sel)
2666{ 2666{
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 68933d208063..9b2a401a4891 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
682 hfi_session_abort(inst); 682 hfi_session_abort(inst);
683 683
684 load_scale_clocks(core); 684 load_scale_clocks(core);
685 INIT_LIST_HEAD(&inst->registeredbufs);
685 } 686 }
686 687
687 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); 688 venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..146ae6f25cdb 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
172{ 172{
173 u32 status = 0; 173 u32 status = 0;
174 174
175 status = readb(cec->reg + S5P_CEC_STATUS_0); 175 status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
176 status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
176 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; 177 status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
177 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; 178 status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
178 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; 179 status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 58d200e7c838..8837e2678bde 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
92 dev_dbg(cec->dev, "irq received\n"); 92 dev_dbg(cec->dev, "irq received\n");
93 93
94 if (status & CEC_STATUS_TX_DONE) { 94 if (status & CEC_STATUS_TX_DONE) {
95 if (status & CEC_STATUS_TX_ERROR) { 95 if (status & CEC_STATUS_TX_NACK) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
97 cec->tx = STATE_NACK;
98 } else if (status & CEC_STATUS_TX_ERROR) {
96 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); 99 dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
97 cec->tx = STATE_ERROR; 100 cec->tx = STATE_ERROR;
98 } else { 101 } else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
135 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); 138 cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
136 cec->tx = STATE_IDLE; 139 cec->tx = STATE_IDLE;
137 break; 140 break;
141 case STATE_NACK:
142 cec_transmit_done(cec->adap,
143 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
144 0, 1, 0, 0);
145 cec->tx = STATE_IDLE;
146 break;
138 case STATE_ERROR: 147 case STATE_ERROR:
139 cec_transmit_done(cec->adap, 148 cec_transmit_done(cec->adap,
140 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, 149 CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 8bcd8dc1aeb9..86ded522ef27 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -35,6 +35,7 @@
35#define CEC_STATUS_TX_TRANSFERRING (1 << 1) 35#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
36#define CEC_STATUS_TX_DONE (1 << 2) 36#define CEC_STATUS_TX_DONE (1 << 2)
37#define CEC_STATUS_TX_ERROR (1 << 3) 37#define CEC_STATUS_TX_ERROR (1 << 3)
38#define CEC_STATUS_TX_NACK (1 << 4)
38#define CEC_STATUS_TX_BYTES (0xFF << 8) 39#define CEC_STATUS_TX_BYTES (0xFF << 8)
39#define CEC_STATUS_RX_RUNNING (1 << 16) 40#define CEC_STATUS_RX_RUNNING (1 << 16)
40#define CEC_STATUS_RX_RECEIVING (1 << 17) 41#define CEC_STATUS_RX_RECEIVING (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
55 STATE_IDLE, 56 STATE_IDLE,
56 STATE_BUSY, 57 STATE_BUSY,
57 STATE_DONE, 58 STATE_DONE,
59 STATE_NACK,
58 STATE_ERROR 60 STATE_ERROR
59}; 61};
60 62
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index ed43a4212479..129b558acc92 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init);
245module_exit(ir_sharp_decode_exit); 245module_exit(ir_sharp_decode_exit);
246 246
247MODULE_LICENSE("GPL"); 247MODULE_LICENSE("GPL");
248MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); 248MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
249MODULE_DESCRIPTION("Sharp IR protocol decoder"); 249MODULE_DESCRIPTION("Sharp IR protocol decoder");
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 2e487f9a2cc3..4983eeb39f36 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) 38static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
39{ 39{
40 struct i2c_msg msg[2] = { 40 struct i2c_msg msg[2] = {
41 { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 }, 41 { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, 42 { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
43 }; 43 };
44 int rc = 0;
45 u8 *b;
46
47 b = kmalloc(2, GFP_KERNEL);
48 if (!b)
49 return -ENOMEM;
50
51 b[0] = reg;
52 b[1] = 0;
53
54 msg[0].buf = b;
55 msg[1].buf = b + 1;
44 56
45 if (i2c_transfer(priv->i2c, msg, 2) != 2) { 57 if (i2c_transfer(priv->i2c, msg, 2) != 2) {
46 printk(KERN_WARNING "mt2060 I2C read failed\n"); 58 printk(KERN_WARNING "mt2060 I2C read failed\n");
47 return -EREMOTEIO; 59 rc = -EREMOTEIO;
48 } 60 }
49 return 0; 61 *val = b[1];
62 kfree(b);
63
64 return rc;
50} 65}
51 66
52// Writes a single register 67// Writes a single register
53static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) 68static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
54{ 69{
55 u8 buf[2] = { reg, val };
56 struct i2c_msg msg = { 70 struct i2c_msg msg = {
57 .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 71 .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
58 }; 72 };
73 u8 *buf;
74 int rc = 0;
75
76 buf = kmalloc(2, GFP_KERNEL);
77 if (!buf)
78 return -ENOMEM;
79
80 buf[0] = reg;
81 buf[1] = val;
82
83 msg.buf = buf;
59 84
60 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 85 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
61 printk(KERN_WARNING "mt2060 I2C write failed\n"); 86 printk(KERN_WARNING "mt2060 I2C write failed\n");
62 return -EREMOTEIO; 87 rc = -EREMOTEIO;
63 } 88 }
64 return 0; 89 kfree(buf);
90 return rc;
65} 91}
66 92
67// Writes a set of consecutive registers 93// Writes a set of consecutive registers
68static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) 94static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
69{ 95{
70 int rem, val_len; 96 int rem, val_len;
71 u8 xfer_buf[16]; 97 u8 *xfer_buf;
98 int rc = 0;
72 struct i2c_msg msg = { 99 struct i2c_msg msg = {
73 .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf 100 .addr = priv->cfg->i2c_address, .flags = 0
74 }; 101 };
75 102
103 xfer_buf = kmalloc(16, GFP_KERNEL);
104 if (!xfer_buf)
105 return -ENOMEM;
106
107 msg.buf = xfer_buf;
108
76 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { 109 for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
77 val_len = min_t(int, rem, priv->i2c_max_regs); 110 val_len = min_t(int, rem, priv->i2c_max_regs);
78 msg.len = 1 + val_len; 111 msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
81 114
82 if (i2c_transfer(priv->i2c, &msg, 1) != 1) { 115 if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
83 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); 116 printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
84 return -EREMOTEIO; 117 rc = -EREMOTEIO;
118 break;
85 } 119 }
86 } 120 }
87 121
88 return 0; 122 kfree(xfer_buf);
123 return rc;
89} 124}
90 125
91// Initialisation sequences 126// Initialisation sequences
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 5dba23ca2e5f..dc9bc1807fdf 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
219 219
220 down_read(&mm->mmap_sem); 220 down_read(&mm->mmap_sem);
221 221
222 for (dar = addr; dar < addr + size; dar += page_size) { 222 vma = find_vma(mm, addr);
223 if (!vma || dar < vma->vm_start || dar > vma->vm_end) { 223 if (!vma) {
224 pr_err("Can't find vma for addr %016llx\n", addr);
225 rc = -EFAULT;
226 goto out;
227 }
228 /* get the size of the pages allocated */
229 page_size = vma_kernel_pagesize(vma);
230
231 for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
232 if (dar < vma->vm_start || dar >= vma->vm_end) {
224 vma = find_vma(mm, addr); 233 vma = find_vma(mm, addr);
225 if (!vma) { 234 if (!vma) {
226 pr_err("Can't find vma for addr %016llx\n", addr); 235 pr_err("Can't find vma for addr %016llx\n", addr);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c8307e8b4c16..0ccccbaf530d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
129 129
130#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
131
130#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
131#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ 133#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
132 134
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4ff40d319676..78b3172c8e6e 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
95 95
96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
97
96 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
97 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
98 100
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
226 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 228 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
227 229
228 /* 230 /*
229 * For not wake-able HW runtime pm framework 231 * ME maps runtime suspend/resume to D0i states,
230 * can't be used on pci device level. 232 * hence we need to go around native PCI runtime service which
231 * Use domain runtime pm callbacks instead. 233 * eventually brings the device into D3cold/hot state,
232 */ 234 * but the mei device cannot wake up from D3 unlike from D0i3.
233 if (!pci_dev_run_wake(pdev)) 235 * To get around the PCI device native runtime pm,
234 mei_me_set_pm_domain(dev); 236 * ME uses runtime pm domain handlers which take precedence
237 * over the driver's pm handlers.
238 */
239 mei_me_set_pm_domain(dev);
235 240
236 if (mei_pg_is_enabled(dev)) 241 if (mei_pg_is_enabled(dev))
237 pm_runtime_put_noidle(&pdev->dev); 242 pm_runtime_put_noidle(&pdev->dev);
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
271 dev_dbg(&pdev->dev, "shutdown\n"); 276 dev_dbg(&pdev->dev, "shutdown\n");
272 mei_stop(dev); 277 mei_stop(dev);
273 278
274 if (!pci_dev_run_wake(pdev)) 279 mei_me_unset_pm_domain(dev);
275 mei_me_unset_pm_domain(dev);
276 280
277 mei_disable_interrupts(dev); 281 mei_disable_interrupts(dev);
278 free_irq(pdev->irq, dev); 282 free_irq(pdev->irq, dev);
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev)
300 dev_dbg(&pdev->dev, "stop\n"); 304 dev_dbg(&pdev->dev, "stop\n");
301 mei_stop(dev); 305 mei_stop(dev);
302 306
303 if (!pci_dev_run_wake(pdev)) 307 mei_me_unset_pm_domain(dev);
304 mei_me_unset_pm_domain(dev);
305 308
306 mei_disable_interrupts(dev); 309 mei_disable_interrupts(dev);
307 310
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e38a5f144373..0566f9bfa7de 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
145 145
146 /* 146 /*
147 * For not wake-able HW runtime pm framework 147 * TXE maps runtime suspend/resume to own power gating states,
148 * can't be used on pci device level. 148 * hence we need to go around native PCI runtime service which
149 * Use domain runtime pm callbacks instead. 149 * eventually brings the device into D3cold/hot state.
150 */ 150 * But the TXE device cannot wake up from D3 unlike from own
151 if (!pci_dev_run_wake(pdev)) 151 * power gating. To get around PCI device native runtime pm,
152 mei_txe_set_pm_domain(dev); 152 * TXE uses runtime pm domain handlers which take precedence.
153 */
154 mei_txe_set_pm_domain(dev);
153 155
154 pm_runtime_put_noidle(&pdev->dev); 156 pm_runtime_put_noidle(&pdev->dev);
155 157
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
186 dev_dbg(&pdev->dev, "shutdown\n"); 188 dev_dbg(&pdev->dev, "shutdown\n");
187 mei_stop(dev); 189 mei_stop(dev);
188 190
189 if (!pci_dev_run_wake(pdev)) 191 mei_txe_unset_pm_domain(dev);
190 mei_txe_unset_pm_domain(dev);
191 192
192 mei_disable_interrupts(dev); 193 mei_disable_interrupts(dev);
193 free_irq(pdev->irq, dev); 194 free_irq(pdev->irq, dev);
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev)
215 216
216 mei_stop(dev); 217 mei_stop(dev);
217 218
218 if (!pci_dev_run_wake(pdev)) 219 mei_txe_unset_pm_domain(dev);
219 mei_txe_unset_pm_domain(dev);
220 220
221 mei_disable_interrupts(dev); 221 mei_disable_interrupts(dev);
222 free_irq(pdev->irq, dev); 222 free_irq(pdev->irq, dev);
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
318 else 318 else
319 ret = -EAGAIN; 319 ret = -EAGAIN;
320 320
321 /* 321 /* keep irq on we are staying in D0 */
322 * If everything is okay we're about to enter PCI low
323 * power state (D3) therefor we need to disable the
324 * interrupts towards host.
325 * However if device is not wakeable we do not enter
326 * D-low state and we need to keep the interrupt kicking
327 */
328 if (!ret && pci_dev_run_wake(pdev))
329 mei_disable_interrupts(dev);
330 322
331 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); 323 dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret);
332 324
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 29fc1e662891..2ad7b5c69156 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1634 } 1634 }
1635 1635
1636 mqrq->areq.mrq = &brq->mrq; 1636 mqrq->areq.mrq = &brq->mrq;
1637
1638 mmc_queue_bounce_pre(mqrq);
1639} 1637}
1640 1638
1641static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1639static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1829 brq = &mq_rq->brq; 1827 brq = &mq_rq->brq;
1830 old_req = mmc_queue_req_to_req(mq_rq); 1828 old_req = mmc_queue_req_to_req(mq_rq);
1831 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1829 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1832 mmc_queue_bounce_post(mq_rq);
1833 1830
1834 switch (status) { 1831 switch (status) {
1835 case MMC_BLK_SUCCESS: 1832 case MMC_BLK_SUCCESS:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a7eb623f8daa..36217ad5e9b1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1286,6 +1286,23 @@ out_err:
1286 return err; 1286 return err;
1287} 1287}
1288 1288
1289static void mmc_select_driver_type(struct mmc_card *card)
1290{
1291 int card_drv_type, drive_strength, drv_type;
1292
1293 card_drv_type = card->ext_csd.raw_driver_strength |
1294 mmc_driver_type_mask(0);
1295
1296 drive_strength = mmc_select_drive_strength(card,
1297 card->ext_csd.hs200_max_dtr,
1298 card_drv_type, &drv_type);
1299
1300 card->drive_strength = drive_strength;
1301
1302 if (drv_type)
1303 mmc_set_driver_type(card->host, drv_type);
1304}
1305
1289static int mmc_select_hs400es(struct mmc_card *card) 1306static int mmc_select_hs400es(struct mmc_card *card)
1290{ 1307{
1291 struct mmc_host *host = card->host; 1308 struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
1341 goto out_err; 1358 goto out_err;
1342 } 1359 }
1343 1360
1361 mmc_select_driver_type(card);
1362
1344 /* Switch card to HS400 */ 1363 /* Switch card to HS400 */
1345 val = EXT_CSD_TIMING_HS400 | 1364 val = EXT_CSD_TIMING_HS400 |
1346 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1365 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
1374 return err; 1393 return err;
1375} 1394}
1376 1395
1377static void mmc_select_driver_type(struct mmc_card *card)
1378{
1379 int card_drv_type, drive_strength, drv_type;
1380
1381 card_drv_type = card->ext_csd.raw_driver_strength |
1382 mmc_driver_type_mask(0);
1383
1384 drive_strength = mmc_select_drive_strength(card,
1385 card->ext_csd.hs200_max_dtr,
1386 card_drv_type, &drv_type);
1387
1388 card->drive_strength = drive_strength;
1389
1390 if (drv_type)
1391 mmc_set_driver_type(card->host, drv_type);
1392}
1393
1394/* 1396/*
1395 * For device supporting HS200 mode, the following sequence 1397 * For device supporting HS200 mode, the following sequence
1396 * should be done before executing the tuning process. 1398 * should be done before executing the tuning process.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 74c663b1c0a7..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
23#include "core.h" 23#include "core.h"
24#include "card.h" 24#include "card.h"
25 25
26#define MMC_QUEUE_BOUNCESZ 65536
27
28/* 26/*
29 * Prepare a MMC request. This just filters out odd stuff. 27 * Prepare a MMC request. This just filters out odd stuff.
30 */ 28 */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
150 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 148 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
151} 149}
152 150
153static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
154{
155 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
156
157 if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
158 return 0;
159
160 if (bouncesz > host->max_req_size)
161 bouncesz = host->max_req_size;
162 if (bouncesz > host->max_seg_size)
163 bouncesz = host->max_seg_size;
164 if (bouncesz > host->max_blk_count * 512)
165 bouncesz = host->max_blk_count * 512;
166
167 if (bouncesz <= 512)
168 return 0;
169
170 return bouncesz;
171}
172
173/** 151/**
174 * mmc_init_request() - initialize the MMC-specific per-request data 152 * mmc_init_request() - initialize the MMC-specific per-request data
175 * @q: the request queue 153 * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
184 struct mmc_card *card = mq->card; 162 struct mmc_card *card = mq->card;
185 struct mmc_host *host = card->host; 163 struct mmc_host *host = card->host;
186 164
187 if (card->bouncesz) { 165 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
188 mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); 166 if (!mq_rq->sg)
189 if (!mq_rq->bounce_buf) 167 return -ENOMEM;
190 return -ENOMEM;
191 if (card->bouncesz > 512) {
192 mq_rq->sg = mmc_alloc_sg(1, gfp);
193 if (!mq_rq->sg)
194 return -ENOMEM;
195 mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
196 gfp);
197 if (!mq_rq->bounce_sg)
198 return -ENOMEM;
199 }
200 } else {
201 mq_rq->bounce_buf = NULL;
202 mq_rq->bounce_sg = NULL;
203 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
204 if (!mq_rq->sg)
205 return -ENOMEM;
206 }
207 168
208 return 0; 169 return 0;
209} 170}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
212{ 173{
213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 174 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
214 175
215 /* It is OK to kfree(NULL) so this will be smooth */
216 kfree(mq_rq->bounce_sg);
217 mq_rq->bounce_sg = NULL;
218
219 kfree(mq_rq->bounce_buf);
220 mq_rq->bounce_buf = NULL;
221
222 kfree(mq_rq->sg); 176 kfree(mq_rq->sg);
223 mq_rq->sg = NULL; 177 mq_rq->sg = NULL;
224} 178}
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
242 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
243 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; 197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
244 198
245 /*
246 * mmc_init_request() depends on card->bouncesz so it must be calculated
247 * before blk_init_allocated_queue() starts allocating requests.
248 */
249 card->bouncesz = mmc_queue_calc_bouncesz(host);
250
251 mq->card = card; 199 mq->card = card;
252 mq->queue = blk_alloc_queue(GFP_KERNEL); 200 mq->queue = blk_alloc_queue(GFP_KERNEL);
253 if (!mq->queue) 201 if (!mq->queue)
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
271 if (mmc_can_erase(card)) 219 if (mmc_can_erase(card))
272 mmc_queue_setup_discard(mq->queue, card); 220 mmc_queue_setup_discard(mq->queue, card);
273 221
274 if (card->bouncesz) { 222 blk_queue_bounce_limit(mq->queue, limit);
275 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 223 blk_queue_max_hw_sectors(mq->queue,
276 blk_queue_max_segments(mq->queue, card->bouncesz / 512); 224 min(host->max_blk_count, host->max_req_size / 512));
277 blk_queue_max_segment_size(mq->queue, card->bouncesz); 225 blk_queue_max_segments(mq->queue, host->max_segs);
278 } else { 226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279 blk_queue_bounce_limit(mq->queue, limit);
280 blk_queue_max_hw_sectors(mq->queue,
281 min(host->max_blk_count, host->max_req_size / 512));
282 blk_queue_max_segments(mq->queue, host->max_segs);
283 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
284 }
285 227
286 sema_init(&mq->thread_sem, 1); 228 sema_init(&mq->thread_sem, 1);
287 229
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
370 */ 312 */
371unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 313unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
372{ 314{
373 unsigned int sg_len;
374 size_t buflen;
375 struct scatterlist *sg;
376 struct request *req = mmc_queue_req_to_req(mqrq); 315 struct request *req = mmc_queue_req_to_req(mqrq);
377 int i;
378
379 if (!mqrq->bounce_buf)
380 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
381
382 sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
383
384 mqrq->bounce_sg_len = sg_len;
385
386 buflen = 0;
387 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
388 buflen += sg->length;
389
390 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
391
392 return 1;
393}
394
395/*
396 * If writing, bounce the data to the buffer before the request
397 * is sent to the host driver
398 */
399void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
400{
401 if (!mqrq->bounce_buf)
402 return;
403
404 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
405 return;
406
407 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
408 mqrq->bounce_buf, mqrq->sg[0].length);
409}
410
411/*
412 * If reading, bounce the data from the buffer after the request
413 * has been handled by the host driver
414 */
415void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
416{
417 if (!mqrq->bounce_buf)
418 return;
419
420 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
421 return;
422 316
423 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 317 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
424 mqrq->bounce_buf, mqrq->sg[0].length);
425} 318}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 04fc89360a7a..f18d3f656baa 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -49,9 +49,6 @@ enum mmc_drv_op {
49struct mmc_queue_req { 49struct mmc_queue_req {
50 struct mmc_blk_request brq; 50 struct mmc_blk_request brq;
51 struct scatterlist *sg; 51 struct scatterlist *sg;
52 char *bounce_buf;
53 struct scatterlist *bounce_sg;
54 unsigned int bounce_sg_len;
55 struct mmc_async_req areq; 52 struct mmc_async_req areq;
56 enum mmc_drv_op drv_op; 53 enum mmc_drv_op drv_op;
57 int drv_op_result; 54 int drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
81extern void mmc_cleanup_queue(struct mmc_queue *); 78extern void mmc_cleanup_queue(struct mmc_queue *);
82extern void mmc_queue_suspend(struct mmc_queue *); 79extern void mmc_queue_suspend(struct mmc_queue *);
83extern void mmc_queue_resume(struct mmc_queue *); 80extern void mmc_queue_resume(struct mmc_queue *);
84
85extern unsigned int mmc_queue_map_sg(struct mmc_queue *, 81extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
86 struct mmc_queue_req *); 82 struct mmc_queue_req *);
87extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
88extern void mmc_queue_bounce_post(struct mmc_queue_req *);
89 83
90extern int mmc_access_rpmb(struct mmc_queue *); 84extern int mmc_access_rpmb(struct mmc_queue *);
91 85
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 27fb625cbcf3..fbd29f00fca0 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1038 */ 1038 */
1039 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1039 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1040 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | 1040 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1041 MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; 1041 MMC_CAP_3_3V_DDR;
1042 1042
1043 if (host->use_sg) 1043 if (host->use_sg)
1044 mmc->max_segs = 16; 1044 mmc->max_segs = 16;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c885c2d4b904..85745ef179e2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
531 div->shift = __ffs(CLK_DIV_MASK); 531 div->shift = __ffs(CLK_DIV_MASK);
532 div->width = __builtin_popcountl(CLK_DIV_MASK); 532 div->width = __builtin_popcountl(CLK_DIV_MASK);
533 div->hw.init = &init; 533 div->hw.init = &init;
534 div->flags = (CLK_DIVIDER_ONE_BASED | 534 div->flags = CLK_DIVIDER_ONE_BASED;
535 CLK_DIVIDER_ROUND_CLOSEST);
536 535
537 clk = devm_clk_register(host->dev, &div->hw); 536 clk = devm_clk_register(host->dev, &div->hw);
538 if (WARN_ON(IS_ERR(clk))) 537 if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
717static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 716static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
718{ 717{
719 struct meson_host *host = mmc_priv(mmc); 718 struct meson_host *host = mmc_priv(mmc);
719 int ret;
720
721 /*
722 * If this is the initial tuning, try to get a sane Rx starting
723 * phase before doing the actual tuning.
724 */
725 if (!mmc->doing_retune) {
726 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
727
728 if (ret)
729 return ret;
730 }
731
732 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
733 if (ret)
734 return ret;
720 735
721 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 736 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
722} 737}
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
746 case MMC_POWER_UP: 761 case MMC_POWER_UP:
747 if (!IS_ERR(mmc->supply.vmmc)) 762 if (!IS_ERR(mmc->supply.vmmc))
748 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 763 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
764
765 /* Reset phases */
766 clk_set_phase(host->rx_clk, 0);
767 clk_set_phase(host->tx_clk, 270);
768
749 break; 769 break;
750 770
751 case MMC_POWER_ON: 771 case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
759 host->vqmmc_enabled = true; 779 host->vqmmc_enabled = true;
760 } 780 }
761 781
762 /* Reset rx phase */
763 clk_set_phase(host->rx_clk, 0);
764 break; 782 break;
765 } 783 }
766 784
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 59ab194cb009..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
702 702
703 pxamci_init_ocr(host); 703 pxamci_init_ocr(host);
704 704
705 /* 705 mmc->caps = 0;
706 * This architecture used to disable bounce buffers through its
707 * defconfig, now it is done at runtime as a host property.
708 */
709 mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
710 host->cmdat = 0; 706 host->cmdat = 0;
711 if (!cpu_is_pxa25x()) { 707 if (!cpu_is_pxa25x()) {
712 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 708 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f905f2361d12..8bae88a150fd 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -146,11 +146,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
146 WARN_ON(host->sg_len > 1); 146 WARN_ON(host->sg_len > 1);
147 147
148 /* This DMAC cannot handle if buffer is not 8-bytes alignment */ 148 /* This DMAC cannot handle if buffer is not 8-bytes alignment */
149 if (!IS_ALIGNED(sg->offset, 8)) { 149 if (!IS_ALIGNED(sg->offset, 8))
150 host->force_pio = true; 150 goto force_pio;
151 renesas_sdhi_internal_dmac_enable_dma(host, false);
152 return;
153 }
154 151
155 if (data->flags & MMC_DATA_READ) { 152 if (data->flags & MMC_DATA_READ) {
156 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 153 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
@@ -163,8 +160,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
163 } 160 }
164 161
165 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir); 162 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
166 if (ret < 0) 163 if (ret == 0)
167 return; 164 goto force_pio;
168 165
169 renesas_sdhi_internal_dmac_enable_dma(host, true); 166 renesas_sdhi_internal_dmac_enable_dma(host, true);
170 167
@@ -176,6 +173,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
176 dtran_mode); 173 dtran_mode);
177 renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, 174 renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
178 sg->dma_address); 175 sg->dma_address);
176
177 return;
178
179force_pio:
180 host->force_pio = true;
181 renesas_sdhi_internal_dmac_enable_dma(host, false);
179} 182}
180 183
181static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) 184static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index bbaddf18a1b3..67d787fa3306 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
392 392
393enum { 393enum {
394 INTEL_DSM_FNS = 0, 394 INTEL_DSM_FNS = 0,
395 INTEL_DSM_V18_SWITCH = 3,
395 INTEL_DSM_DRV_STRENGTH = 9, 396 INTEL_DSM_DRV_STRENGTH = 9,
396 INTEL_DSM_D3_RETUNE = 10, 397 INTEL_DSM_D3_RETUNE = 10,
397}; 398};
@@ -447,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
447 int err; 448 int err;
448 u32 val; 449 u32 val;
449 450
451 intel_host->d3_retune = true;
452
450 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); 453 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
451 if (err) { 454 if (err) {
452 pr_debug("%s: DSM not supported, error %d\n", 455 pr_debug("%s: DSM not supported, error %d\n",
@@ -557,6 +560,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
557 sdhci_writel(host, val, INTEL_HS400_ES_REG); 560 sdhci_writel(host, val, INTEL_HS400_ES_REG);
558} 561}
559 562
563static void sdhci_intel_voltage_switch(struct sdhci_host *host)
564{
565 struct sdhci_pci_slot *slot = sdhci_priv(host);
566 struct intel_host *intel_host = sdhci_pci_priv(slot);
567 struct device *dev = &slot->chip->pdev->dev;
568 u32 result = 0;
569 int err;
570
571 err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
572 pr_debug("%s: %s DSM error %d result %u\n",
573 mmc_hostname(host->mmc), __func__, err, result);
574}
575
560static const struct sdhci_ops sdhci_intel_byt_ops = { 576static const struct sdhci_ops sdhci_intel_byt_ops = {
561 .set_clock = sdhci_set_clock, 577 .set_clock = sdhci_set_clock,
562 .set_power = sdhci_intel_set_power, 578 .set_power = sdhci_intel_set_power,
@@ -565,6 +581,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
565 .reset = sdhci_reset, 581 .reset = sdhci_reset,
566 .set_uhs_signaling = sdhci_set_uhs_signaling, 582 .set_uhs_signaling = sdhci_set_uhs_signaling,
567 .hw_reset = sdhci_pci_hw_reset, 583 .hw_reset = sdhci_pci_hw_reset,
584 .voltage_switch = sdhci_intel_voltage_switch,
568}; 585};
569 586
570static void byt_read_dsm(struct sdhci_pci_slot *slot) 587static void byt_read_dsm(struct sdhci_pci_slot *slot)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 2eec2e652c53..0842bbc2d7ad 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
466{ 466{
467 struct sdhci_pltfm_host *pltfm_host; 467 struct sdhci_pltfm_host *pltfm_host;
468 struct sdhci_host *host; 468 struct sdhci_host *host;
469 struct xenon_priv *priv;
469 int err; 470 int err;
470 471
471 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, 472 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
474 return PTR_ERR(host); 475 return PTR_ERR(host);
475 476
476 pltfm_host = sdhci_priv(host); 477 pltfm_host = sdhci_priv(host);
478 priv = sdhci_pltfm_priv(pltfm_host);
477 479
478 /* 480 /*
479 * Link Xenon specific mmc_host_ops function, 481 * Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
491 if (err) 493 if (err)
492 goto free_pltfm; 494 goto free_pltfm;
493 495
496 priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
497 if (IS_ERR(priv->axi_clk)) {
498 err = PTR_ERR(priv->axi_clk);
499 if (err == -EPROBE_DEFER)
500 goto err_clk;
501 } else {
502 err = clk_prepare_enable(priv->axi_clk);
503 if (err)
504 goto err_clk;
505 }
506
494 err = mmc_of_parse(host->mmc); 507 err = mmc_of_parse(host->mmc);
495 if (err) 508 if (err)
496 goto err_clk; 509 goto err_clk_axi;
497 510
498 sdhci_get_of_property(pdev); 511 sdhci_get_of_property(pdev);
499 512
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
502 /* Xenon specific dt parse */ 515 /* Xenon specific dt parse */
503 err = xenon_probe_dt(pdev); 516 err = xenon_probe_dt(pdev);
504 if (err) 517 if (err)
505 goto err_clk; 518 goto err_clk_axi;
506 519
507 err = xenon_sdhc_prepare(host); 520 err = xenon_sdhc_prepare(host);
508 if (err) 521 if (err)
509 goto err_clk; 522 goto err_clk_axi;
510 523
511 pm_runtime_get_noresume(&pdev->dev); 524 pm_runtime_get_noresume(&pdev->dev);
512 pm_runtime_set_active(&pdev->dev); 525 pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
527 pm_runtime_disable(&pdev->dev); 540 pm_runtime_disable(&pdev->dev);
528 pm_runtime_put_noidle(&pdev->dev); 541 pm_runtime_put_noidle(&pdev->dev);
529 xenon_sdhc_unprepare(host); 542 xenon_sdhc_unprepare(host);
543err_clk_axi:
544 clk_disable_unprepare(priv->axi_clk);
530err_clk: 545err_clk:
531 clk_disable_unprepare(pltfm_host->clk); 546 clk_disable_unprepare(pltfm_host->clk);
532free_pltfm: 547free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
538{ 553{
539 struct sdhci_host *host = platform_get_drvdata(pdev); 554 struct sdhci_host *host = platform_get_drvdata(pdev);
540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
556 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
541 557
542 pm_runtime_get_sync(&pdev->dev); 558 pm_runtime_get_sync(&pdev->dev);
543 pm_runtime_disable(&pdev->dev); 559 pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
546 sdhci_remove_host(host, 0); 562 sdhci_remove_host(host, 0);
547 563
548 xenon_sdhc_unprepare(host); 564 xenon_sdhc_unprepare(host);
549 565 clk_disable_unprepare(priv->axi_clk);
550 clk_disable_unprepare(pltfm_host->clk); 566 clk_disable_unprepare(pltfm_host->clk);
551 567
552 sdhci_pltfm_free(pdev); 568 sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 2bc0510c0769..9994995c7c56 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -83,6 +83,7 @@ struct xenon_priv {
83 unsigned char bus_width; 83 unsigned char bus_width;
84 unsigned char timing; 84 unsigned char timing;
85 unsigned int clock; 85 unsigned int clock;
86 struct clk *axi_clk;
86 87
87 int phy_type; 88 int phy_type;
88 /* 89 /*
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 12cf8288d663..9c4e6199b854 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -47,6 +47,7 @@
47#include <linux/mmc/sdio.h> 47#include <linux/mmc/sdio.h>
48#include <linux/scatterlist.h> 48#include <linux/scatterlist.h>
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/swiotlb.h>
50#include <linux/workqueue.h> 51#include <linux/workqueue.h>
51 52
52#include "tmio_mmc.h" 53#include "tmio_mmc.h"
@@ -129,50 +130,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
129 130
130#define CMDREQ_TIMEOUT 5000 131#define CMDREQ_TIMEOUT 5000
131 132
132#ifdef CONFIG_MMC_DEBUG
133
134#define STATUS_TO_TEXT(a, status, i) \
135 do { \
136 if ((status) & TMIO_STAT_##a) { \
137 if ((i)++) \
138 printk(KERN_DEBUG " | "); \
139 printk(KERN_DEBUG #a); \
140 } \
141 } while (0)
142
143static void pr_debug_status(u32 status)
144{
145 int i = 0;
146
147 pr_debug("status: %08x = ", status);
148 STATUS_TO_TEXT(CARD_REMOVE, status, i);
149 STATUS_TO_TEXT(CARD_INSERT, status, i);
150 STATUS_TO_TEXT(SIGSTATE, status, i);
151 STATUS_TO_TEXT(WRPROTECT, status, i);
152 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
153 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
154 STATUS_TO_TEXT(SIGSTATE_A, status, i);
155 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
156 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
157 STATUS_TO_TEXT(ILL_FUNC, status, i);
158 STATUS_TO_TEXT(CMD_BUSY, status, i);
159 STATUS_TO_TEXT(CMDRESPEND, status, i);
160 STATUS_TO_TEXT(DATAEND, status, i);
161 STATUS_TO_TEXT(CRCFAIL, status, i);
162 STATUS_TO_TEXT(DATATIMEOUT, status, i);
163 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
164 STATUS_TO_TEXT(RXOVERFLOW, status, i);
165 STATUS_TO_TEXT(TXUNDERRUN, status, i);
166 STATUS_TO_TEXT(RXRDY, status, i);
167 STATUS_TO_TEXT(TXRQ, status, i);
168 STATUS_TO_TEXT(ILL_ACCESS, status, i);
169 printk("\n");
170}
171
172#else
173#define pr_debug_status(s) do { } while (0)
174#endif
175
176static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 133static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
177{ 134{
178 struct tmio_mmc_host *host = mmc_priv(mmc); 135 struct tmio_mmc_host *host = mmc_priv(mmc);
@@ -762,9 +719,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
762 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); 719 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
763 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; 720 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
764 721
765 pr_debug_status(status);
766 pr_debug_status(ireg);
767
768 /* Clear the status except the interrupt status */ 722 /* Clear the status except the interrupt status */
769 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); 723 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
770 724
@@ -1262,6 +1216,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
1262 mmc->max_blk_count = pdata->max_blk_count ? : 1216 mmc->max_blk_count = pdata->max_blk_count ? :
1263 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; 1217 (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
1264 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1218 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1219 /*
1220 * Since swiotlb has memory size limitation, this will calculate
1221 * the maximum size locally (because we don't have any APIs for it now)
1222 * and check the current max_req_size. And then, this will update
1223 * the max_req_size if needed as a workaround.
1224 */
1225 if (swiotlb_max_segment()) {
1226 unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
1227
1228 if (mmc->max_req_size > max_size)
1229 mmc->max_req_size = max_size;
1230 }
1265 mmc->max_seg_size = mmc->max_req_size; 1231 mmc->max_seg_size = mmc->max_req_size;
1266 1232
1267 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || 1233 _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 5736b0c90b33..a308e707392d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
581 slave->mtd.erasesize = parent->erasesize; 581 slave->mtd.erasesize = parent->erasesize;
582 } 582 }
583 583
584 /*
585 * Slave erasesize might differ from the master one if the master
586 * exposes several regions with different erasesize. Adjust
587 * wr_alignment accordingly.
588 */
589 if (!(slave->mtd.flags & MTD_NO_ERASE))
590 wr_alignment = slave->mtd.erasesize;
591
584 tmp = slave->offset; 592 tmp = slave->offset;
585 remainder = do_div(tmp, wr_alignment); 593 remainder = do_div(tmp, wr_alignment);
586 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 594 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 146af8218314..8268636675ef 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
363 size += (req->ecc.strength + 1) * sizeof(u16); 363 size += (req->ecc.strength + 1) * sizeof(u16);
364 /* Reserve space for mu, dmu and delta. */ 364 /* Reserve space for mu, dmu and delta. */
365 size = ALIGN(size, sizeof(s32)); 365 size = ALIGN(size, sizeof(s32));
366 size += (req->ecc.strength + 1) * sizeof(s32); 366 size += (req->ecc.strength + 1) * sizeof(s32) * 3;
367 367
368 user = kzalloc(size, GFP_KERNEL); 368 user = kzalloc(size, GFP_KERNEL);
369 if (!user) 369 if (!user)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
182/* FLEXCAN hardware feature flags 182/* FLEXCAN hardware feature flags
183 * 183 *
184 * Below is some version info we got: 184 * Below is some version info we got:
185 * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- 185 * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
186 * Filter? connected? detection ception in MB 186 * Filter? connected? Passive detection ception in MB
187 * MX25 FlexCAN2 03.00.00.00 no no no no 187 * MX25 FlexCAN2 03.00.00.00 no no ? no no
188 * MX28 FlexCAN2 03.00.04.00 yes yes no no 188 * MX28 FlexCAN2 03.00.04.00 yes yes no no no
189 * MX35 FlexCAN2 03.00.00.00 no no no no 189 * MX35 FlexCAN2 03.00.00.00 no no ? no no
190 * MX53 FlexCAN2 03.00.00.00 yes no no no 190 * MX53 FlexCAN2 03.00.00.00 yes no no no no
191 * MX6s FlexCAN3 10.00.12.00 yes yes no yes 191 * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
192 * VF610 FlexCAN3 ? no yes yes yes? 192 * VF610 FlexCAN3 ? no yes ? yes yes?
193 * 193 *
194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. 194 * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
195 */ 195 */
196#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ 196#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ 197#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ 198#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ 199#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ 200#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
201#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
201 202
202/* Structure of the message buffer */ 203/* Structure of the message buffer */
203struct flexcan_mb { 204struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
281}; 282};
282 283
283static const struct flexcan_devtype_data fsl_p1010_devtype_data = { 284static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
284 .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, 285 .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
286 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
285}; 287};
286 288
287static const struct flexcan_devtype_data fsl_imx28_devtype_data; 289static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
291};
288 292
289static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { 293static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
290 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | 294 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
291 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, 295 FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
292}; 296};
293 297
294static const struct flexcan_devtype_data fsl_vf610_devtype_data = { 298static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
335} 339}
336#endif 340#endif
337 341
342static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
343{
344 struct flexcan_regs __iomem *regs = priv->regs;
345 u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
346
347 flexcan_write(reg_ctrl, &regs->ctrl);
348}
349
350static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
351{
352 struct flexcan_regs __iomem *regs = priv->regs;
353 u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
354
355 flexcan_write(reg_ctrl, &regs->ctrl);
356}
357
338static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) 358static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
339{ 359{
340 if (!priv->reg_xceiver) 360 if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
713 struct flexcan_regs __iomem *regs = priv->regs; 733 struct flexcan_regs __iomem *regs = priv->regs;
714 irqreturn_t handled = IRQ_NONE; 734 irqreturn_t handled = IRQ_NONE;
715 u32 reg_iflag1, reg_esr; 735 u32 reg_iflag1, reg_esr;
736 enum can_state last_state = priv->can.state;
716 737
717 reg_iflag1 = flexcan_read(&regs->iflag1); 738 reg_iflag1 = flexcan_read(&regs->iflag1);
718 739
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
765 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr); 786 flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
766 } 787 }
767 788
768 /* state change interrupt */ 789 /* state change interrupt or broken error state quirk fix is enabled */
769 if (reg_esr & FLEXCAN_ESR_ERR_STATE) 790 if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
791 (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
792 FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
770 flexcan_irq_state(dev, reg_esr); 793 flexcan_irq_state(dev, reg_esr);
771 794
772 /* bus error IRQ - handle if bus error reporting is activated */ 795 /* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
774 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) 797 (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
775 flexcan_irq_bus_err(dev, reg_esr); 798 flexcan_irq_bus_err(dev, reg_esr);
776 799
800 /* availability of error interrupt among state transitions in case
801 * bus error reporting is de-activated and
802 * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
803 * +--------------------------------------------------------------+
804 * | +----------------------------------------------+ [stopped / |
805 * | | | sleeping] -+
806 * +-+-> active <-> warning <-> passive -> bus off -+
807 * ___________^^^^^^^^^^^^_______________________________
808 * disabled(1) enabled disabled
809 *
810 * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
811 */
812 if ((last_state != priv->can.state) &&
813 (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
814 !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
815 switch (priv->can.state) {
816 case CAN_STATE_ERROR_ACTIVE:
817 if (priv->devtype_data->quirks &
818 FLEXCAN_QUIRK_BROKEN_WERR_STATE)
819 flexcan_error_irq_enable(priv);
820 else
821 flexcan_error_irq_disable(priv);
822 break;
823
824 case CAN_STATE_ERROR_WARNING:
825 flexcan_error_irq_enable(priv);
826 break;
827
828 case CAN_STATE_ERROR_PASSIVE:
829 case CAN_STATE_BUS_OFF:
830 flexcan_error_irq_disable(priv);
831 break;
832
833 default:
834 break;
835 }
836 }
837
777 return handled; 838 return handled;
778} 839}
779 840
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
887 * on most Flexcan cores, too. Otherwise we don't get 948 * on most Flexcan cores, too. Otherwise we don't get
888 * any error warning or passive interrupts. 949 * any error warning or passive interrupts.
889 */ 950 */
890 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || 951 if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
891 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 952 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
892 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; 953 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
893 else 954 else
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4cd821..b0c80859f746 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
342 342
343 /* enter the selected mode */ 343 /* enter the selected mode */
344 mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); 344 mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
345 if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) 345 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
346 mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; 346 mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
347 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 347 else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
348 mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; 348 mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
811 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | 811 priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
812 CAN_CTRLMODE_LISTENONLY | 812 CAN_CTRLMODE_LISTENONLY |
813 CAN_CTRLMODE_LOOPBACK | 813 CAN_CTRLMODE_LOOPBACK |
814 CAN_CTRLMODE_PRESUME_ACK |
815 CAN_CTRLMODE_3_SAMPLES; 814 CAN_CTRLMODE_3_SAMPLES;
816 priv->base = addr; 815 priv->base = addr;
817 priv->clk = clk; 816 priv->clk = clk;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
333 } 333 }
334 334
335 cf->can_id = id & ESD_IDMASK; 335 cf->can_id = id & ESD_IDMASK;
336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); 336 cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
337 337
338 if (id & ESD_EXTID) 338 if (id & ESD_EXTID)
339 cf->can_id |= CAN_EFF_FLAG; 339 cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
375 375
376 gs_free_tx_context(txc); 376 gs_free_tx_context(txc);
377 377
378 atomic_dec(&dev->active_tx_urbs);
379
378 netif_wake_queue(netdev); 380 netif_wake_queue(netdev);
379 } 381 }
380 382
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
463 urb->transfer_buffer_length, 465 urb->transfer_buffer_length,
464 urb->transfer_buffer, 466 urb->transfer_buffer,
465 urb->transfer_dma); 467 urb->transfer_dma);
466
467 atomic_dec(&dev->active_tx_urbs);
468
469 if (!netif_device_present(netdev))
470 return;
471
472 if (netif_queue_stopped(netdev))
473 netif_wake_queue(netdev);
474} 468}
475 469
476static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, 470static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 18cc529fb807..9b18d96ef526 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
137#define CMD_RESET_ERROR_COUNTER 49 137#define CMD_RESET_ERROR_COUNTER 49
138#define CMD_TX_ACKNOWLEDGE 50 138#define CMD_TX_ACKNOWLEDGE 50
139#define CMD_CAN_ERROR_EVENT 51 139#define CMD_CAN_ERROR_EVENT 51
140#define CMD_FLUSH_QUEUE_REPLY 68
140 141
141#define CMD_LEAF_USB_THROTTLE 77 142#define CMD_LEAF_USB_THROTTLE 77
142#define CMD_LEAF_LOG_MESSAGE 106 143#define CMD_LEAF_LOG_MESSAGE 106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
1301 goto warn; 1302 goto warn;
1302 break; 1303 break;
1303 1304
1305 case CMD_FLUSH_QUEUE_REPLY:
1306 if (dev->family != KVASER_LEAF)
1307 goto warn;
1308 break;
1309
1304 default: 1310 default:
1305warn: dev_warn(dev->udev->dev.parent, 1311warn: dev_warn(dev->udev->dev.parent,
1306 "Unhandled message (%d)\n", msg->id); 1312 "Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
1609 if (err) 1615 if (err)
1610 netdev_warn(netdev, "Cannot flush queue, error %d\n", err); 1616 netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
1611 1617
1612 if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel)) 1618 err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
1619 if (err)
1613 netdev_warn(netdev, "Cannot reset card, error %d\n", err); 1620 netdev_warn(netdev, "Cannot reset card, error %d\n", err);
1614 1621
1615 err = kvaser_usb_stop_chip(priv); 1622 err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index dce7fa57eb55..f123ed57630d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
214 214
215static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) 215static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
216{ 216{
217 /* Use the same MAC Address as FD Pause frames for all ports */ 217 u16 val = addr[0] << 8 | addr[1];
218 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]); 218
219 /* The multicast bit is always transmitted as a zero, so the switch uses
220 * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
221 */
222 val &= 0xfeff;
223
224 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
219 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); 225 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
220 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); 226 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
221 227
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c6678aa9b4ef..d74c7335c512 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1100 }; 1100 };
1101 int i, err; 1101 int i, err;
1102 1102
1103 /* DSA and CPU ports have to be members of multiple vlans */
1104 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
1105 return 0;
1106
1103 if (!vid_begin) 1107 if (!vid_begin)
1104 return -EOPNOTSUPP; 1108 return -EOPNOTSUPP;
1105 1109
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
3947 if (chip->irq > 0) { 3951 if (chip->irq > 0) {
3948 if (chip->info->g2_irqs > 0) 3952 if (chip->info->g2_irqs > 0)
3949 mv88e6xxx_g2_irq_free(chip); 3953 mv88e6xxx_g2_irq_free(chip);
3954 mutex_lock(&chip->reg_lock);
3950 mv88e6xxx_g1_irq_free(chip); 3955 mv88e6xxx_g1_irq_free(chip);
3956 mutex_unlock(&chip->reg_lock);
3951 } 3957 }
3952} 3958}
3953 3959
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b1212debc2e1..967020fb26ee 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
742{ 742{
743 struct ena_adapter *adapter = netdev_priv(netdev); 743 struct ena_adapter *adapter = netdev_priv(netdev);
744 744
745 channels->max_rx = ENA_MAX_NUM_IO_QUEUES; 745 channels->max_rx = adapter->num_queues;
746 channels->max_tx = ENA_MAX_NUM_IO_QUEUES; 746 channels->max_tx = adapter->num_queues;
747 channels->max_other = 0; 747 channels->max_other = 0;
748 channels->max_combined = 0; 748 channels->max_combined = 0;
749 channels->rx_count = adapter->num_queues; 749 channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f7dc22f65d9f..c6bd5e24005d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
966 u64_stats_update_begin(&rx_ring->syncp); 966 u64_stats_update_begin(&rx_ring->syncp);
967 rx_ring->rx_stats.bad_csum++; 967 rx_ring->rx_stats.bad_csum++;
968 u64_stats_update_end(&rx_ring->syncp); 968 u64_stats_update_end(&rx_ring->syncp);
969 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 969 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
970 "RX IPv4 header checksum error\n"); 970 "RX IPv4 header checksum error\n");
971 return; 971 return;
972 } 972 }
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
979 u64_stats_update_begin(&rx_ring->syncp); 979 u64_stats_update_begin(&rx_ring->syncp);
980 rx_ring->rx_stats.bad_csum++; 980 rx_ring->rx_stats.bad_csum++;
981 u64_stats_update_end(&rx_ring->syncp); 981 u64_stats_update_end(&rx_ring->syncp);
982 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, 982 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
983 "RX L4 checksum error\n"); 983 "RX L4 checksum error\n");
984 skb->ip_summed = CHECKSUM_NONE; 984 skb->ip_summed = CHECKSUM_NONE;
985 return; 985 return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3064 if (ena_dev->mem_bar) 3064 if (ena_dev->mem_bar)
3065 devm_iounmap(&pdev->dev, ena_dev->mem_bar); 3065 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3066 3066
3067 devm_iounmap(&pdev->dev, ena_dev->reg_bar); 3067 if (ena_dev->reg_bar)
3068 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3068 3069
3069 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 3070 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3070 pci_release_selected_regions(pdev, release_bars); 3071 pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 214986436ece..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
22 22
23#define AQ_CFG_FORCE_LEGACY_INT 0U 23#define AQ_CFG_FORCE_LEGACY_INT 0U
24 24
25#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U 25#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
26#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU 26#define AQ_CFG_INTERRUPT_MODERATION_ON 1
27#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
28
29#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
30
27#define AQ_CFG_IRQ_MASK 0x1FFU 31#define AQ_CFG_IRQ_MASK 0x1FFU
28 32
29#define AQ_CFG_VECS_MAX 8U 33#define AQ_CFG_VECS_MAX 8U
@@ -51,6 +55,10 @@
51 55
52#define AQ_CFG_SKB_FRAGS_MAX 32U 56#define AQ_CFG_SKB_FRAGS_MAX 32U
53 57
58/* Number of descriptors available in one ring to resume this ring queue
59 */
60#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
61
54#define AQ_CFG_NAPI_WEIGHT 64U 62#define AQ_CFG_NAPI_WEIGHT 64U
55 63
56#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U 64#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
56 return aq_nic_set_link_ksettings(aq_nic, cmd); 56 return aq_nic_set_link_ksettings(aq_nic, cmd);
57} 57}
58 58
59/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
60static const unsigned int aq_ethtool_stat_queue_lines = 5U;
61static const unsigned int aq_ethtool_stat_queue_chars =
62 5U * ETH_GSTRING_LEN;
63static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { 59static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
64 "InPackets", 60 "InPackets",
65 "InUCast", 61 "InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
83 "InOctetsDma", 79 "InOctetsDma",
84 "OutOctetsDma", 80 "OutOctetsDma",
85 "InDroppedDma", 81 "InDroppedDma",
86 "Queue[0] InPackets", 82};
87 "Queue[0] OutPackets", 83
88 "Queue[0] InJumboPackets", 84static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
89 "Queue[0] InLroPackets", 85 "Queue[%d] InPackets",
90 "Queue[0] InErrors", 86 "Queue[%d] OutPackets",
91 "Queue[1] InPackets", 87 "Queue[%d] Restarts",
92 "Queue[1] OutPackets", 88 "Queue[%d] InJumboPackets",
93 "Queue[1] InJumboPackets", 89 "Queue[%d] InLroPackets",
94 "Queue[1] InLroPackets", 90 "Queue[%d] InErrors",
95 "Queue[1] InErrors",
96 "Queue[2] InPackets",
97 "Queue[2] OutPackets",
98 "Queue[2] InJumboPackets",
99 "Queue[2] InLroPackets",
100 "Queue[2] InErrors",
101 "Queue[3] InPackets",
102 "Queue[3] OutPackets",
103 "Queue[3] InJumboPackets",
104 "Queue[3] InLroPackets",
105 "Queue[3] InErrors",
106 "Queue[4] InPackets",
107 "Queue[4] OutPackets",
108 "Queue[4] InJumboPackets",
109 "Queue[4] InLroPackets",
110 "Queue[4] InErrors",
111 "Queue[5] InPackets",
112 "Queue[5] OutPackets",
113 "Queue[5] InJumboPackets",
114 "Queue[5] InLroPackets",
115 "Queue[5] InErrors",
116 "Queue[6] InPackets",
117 "Queue[6] OutPackets",
118 "Queue[6] InJumboPackets",
119 "Queue[6] InLroPackets",
120 "Queue[6] InErrors",
121 "Queue[7] InPackets",
122 "Queue[7] OutPackets",
123 "Queue[7] InJumboPackets",
124 "Queue[7] InLroPackets",
125 "Queue[7] InErrors",
126}; 91};
127 92
128static void aq_ethtool_stats(struct net_device *ndev, 93static void aq_ethtool_stats(struct net_device *ndev,
129 struct ethtool_stats *stats, u64 *data) 94 struct ethtool_stats *stats, u64 *data)
130{ 95{
131 struct aq_nic_s *aq_nic = netdev_priv(ndev); 96 struct aq_nic_s *aq_nic = netdev_priv(ndev);
97 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
132 98
133/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ 99 memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
134 BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); 100 ARRAY_SIZE(aq_ethtool_queue_stat_names) *
135 memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); 101 cfg->vecs) * sizeof(u64));
136 aq_nic_get_stats(aq_nic, data); 102 aq_nic_get_stats(aq_nic, data);
137} 103}
138 104
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
154 120
155 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", 121 strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
156 sizeof(drvinfo->bus_info)); 122 sizeof(drvinfo->bus_info));
157 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - 123 drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
158 (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; 124 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
159 drvinfo->testinfo_len = 0; 125 drvinfo->testinfo_len = 0;
160 drvinfo->regdump_len = regs_count; 126 drvinfo->regdump_len = regs_count;
161 drvinfo->eedump_len = 0; 127 drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
164static void aq_ethtool_get_strings(struct net_device *ndev, 130static void aq_ethtool_get_strings(struct net_device *ndev,
165 u32 stringset, u8 *data) 131 u32 stringset, u8 *data)
166{ 132{
133 int i, si;
167 struct aq_nic_s *aq_nic = netdev_priv(ndev); 134 struct aq_nic_s *aq_nic = netdev_priv(ndev);
168 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); 135 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
169 136 u8 *p = data;
170 if (stringset == ETH_SS_STATS) 137
171 memcpy(data, *aq_ethtool_stat_names, 138 if (stringset == ETH_SS_STATS) {
172 sizeof(aq_ethtool_stat_names) - 139 memcpy(p, *aq_ethtool_stat_names,
173 (AQ_CFG_VECS_MAX - cfg->vecs) * 140 sizeof(aq_ethtool_stat_names));
174 aq_ethtool_stat_queue_chars); 141 p = p + sizeof(aq_ethtool_stat_names);
142 for (i = 0; i < cfg->vecs; i++) {
143 for (si = 0;
144 si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
145 si++) {
146 snprintf(p, ETH_GSTRING_LEN,
147 aq_ethtool_queue_stat_names[si], i);
148 p += ETH_GSTRING_LEN;
149 }
150 }
151 }
175} 152}
176 153
177static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) 154static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
182 159
183 switch (stringset) { 160 switch (stringset) {
184 case ETH_SS_STATS: 161 case ETH_SS_STATS:
185 ret = ARRAY_SIZE(aq_ethtool_stat_names) - 162 ret = ARRAY_SIZE(aq_ethtool_stat_names) +
186 (AQ_CFG_VECS_MAX - cfg->vecs) * 163 cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
187 aq_ethtool_stat_queue_lines;
188 break; 164 break;
189 default: 165 default:
190 ret = -EOPNOTSUPP; 166 ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
245 return err; 221 return err;
246} 222}
247 223
224int aq_ethtool_get_coalesce(struct net_device *ndev,
225 struct ethtool_coalesce *coal)
226{
227 struct aq_nic_s *aq_nic = netdev_priv(ndev);
228 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
229
230 if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
231 cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
232 coal->rx_coalesce_usecs = cfg->rx_itr;
233 coal->tx_coalesce_usecs = cfg->tx_itr;
234 coal->rx_max_coalesced_frames = 0;
235 coal->tx_max_coalesced_frames = 0;
236 } else {
237 coal->rx_coalesce_usecs = 0;
238 coal->tx_coalesce_usecs = 0;
239 coal->rx_max_coalesced_frames = 1;
240 coal->tx_max_coalesced_frames = 1;
241 }
242 return 0;
243}
244
245int aq_ethtool_set_coalesce(struct net_device *ndev,
246 struct ethtool_coalesce *coal)
247{
248 struct aq_nic_s *aq_nic = netdev_priv(ndev);
249 struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
250
251 /* This is not yet supported
252 */
253 if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
254 return -EOPNOTSUPP;
255
256 /* Atlantic only supports timing based coalescing
257 */
258 if (coal->rx_max_coalesced_frames > 1 ||
259 coal->rx_coalesce_usecs_irq ||
260 coal->rx_max_coalesced_frames_irq)
261 return -EOPNOTSUPP;
262
263 if (coal->tx_max_coalesced_frames > 1 ||
264 coal->tx_coalesce_usecs_irq ||
265 coal->tx_max_coalesced_frames_irq)
266 return -EOPNOTSUPP;
267
268 /* We do not support frame counting. Check this
269 */
270 if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
271 return -EOPNOTSUPP;
272 if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
273 return -EOPNOTSUPP;
274
275 if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
276 coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
277 return -EINVAL;
278
279 cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
280
281 cfg->rx_itr = coal->rx_coalesce_usecs;
282 cfg->tx_itr = coal->tx_coalesce_usecs;
283
284 return aq_nic_update_interrupt_moderation_settings(aq_nic);
285}
286
248const struct ethtool_ops aq_ethtool_ops = { 287const struct ethtool_ops aq_ethtool_ops = {
249 .get_link = aq_ethtool_get_link, 288 .get_link = aq_ethtool_get_link,
250 .get_regs_len = aq_ethtool_get_regs_len, 289 .get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
259 .get_ethtool_stats = aq_ethtool_stats, 298 .get_ethtool_stats = aq_ethtool_stats,
260 .get_link_ksettings = aq_ethtool_get_link_ksettings, 299 .get_link_ksettings = aq_ethtool_get_link_ksettings,
261 .set_link_ksettings = aq_ethtool_set_link_ksettings, 300 .set_link_ksettings = aq_ethtool_set_link_ksettings,
301 .get_coalesce = aq_ethtool_get_coalesce,
302 .set_coalesce = aq_ethtool_set_coalesce,
262}; 303};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
151 [ETH_ALEN], 151 [ETH_ALEN],
152 u32 count); 152 u32 count);
153 153
154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, 154 int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
155 bool itr_enabled);
156 155
157 int (*hw_rss_set)(struct aq_hw_s *self, 156 int (*hw_rss_set)(struct aq_hw_s *self,
158 struct aq_rss_parameters *rss_params); 157 struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
163 int (*hw_get_regs)(struct aq_hw_s *self, 162 int (*hw_get_regs)(struct aq_hw_s *self,
164 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); 163 struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
165 164
165 int (*hw_update_stats)(struct aq_hw_s *self);
166
166 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
167 unsigned int *p_count); 168 unsigned int *p_count);
168 169
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6ac9e2602d6d..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
16#include "aq_pci_func.h" 16#include "aq_pci_func.h"
17#include "aq_nic_internal.h" 17#include "aq_nic_internal.h"
18 18
19#include <linux/moduleparam.h>
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
21#include <linux/timer.h> 22#include <linux/timer.h>
@@ -24,6 +25,18 @@
24#include <linux/tcp.h> 25#include <linux/tcp.h>
25#include <net/ip.h> 26#include <net/ip.h>
26 27
28static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
29module_param_named(aq_itr, aq_itr, uint, 0644);
30MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
31
32static unsigned int aq_itr_tx;
33module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
34MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
35
36static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39
27static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
28{ 41{
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
61 74
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF; 75 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
63 76
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; 77 cfg->itr = aq_itr;
65 cfg->itr = cfg->is_interrupt_moderation ? 78 cfg->tx_itr = aq_itr_tx;
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; 79 cfg->rx_itr = aq_itr_rx;
67 80
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF; 81 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; 82 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -119,6 +132,37 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
119 return 0; 132 return 0;
120} 133}
121 134
135static int aq_nic_update_link_status(struct aq_nic_s *self)
136{
137 int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
138
139 if (err)
140 return err;
141
142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
143 pr_info("%s: link change old %d new %d\n",
144 AQ_CFG_DRV_NAME, self->link_status.mbps,
145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147 }
148
149 self->link_status = self->aq_hw->aq_link_status;
150 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
151 aq_utils_obj_set(&self->header.flags,
152 AQ_NIC_FLAG_STARTED);
153 aq_utils_obj_clear(&self->header.flags,
154 AQ_NIC_LINK_DOWN);
155 netif_carrier_on(self->ndev);
156 netif_tx_wake_all_queues(self->ndev);
157 }
158 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
159 netif_carrier_off(self->ndev);
160 netif_tx_disable(self->ndev);
161 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
162 }
163 return 0;
164}
165
122static void aq_nic_service_timer_cb(unsigned long param) 166static void aq_nic_service_timer_cb(unsigned long param)
123{ 167{
124 struct aq_nic_s *self = (struct aq_nic_s *)param; 168 struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,25 +175,12 @@ static void aq_nic_service_timer_cb(unsigned long param)
131 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 175 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
132 goto err_exit; 176 goto err_exit;
133 177
134 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); 178 err = aq_nic_update_link_status(self);
135 if (err < 0) 179 if (err)
136 goto err_exit; 180 goto err_exit;
137 181
138 self->link_status = self->aq_hw->aq_link_status; 182 if (self->aq_hw_ops.hw_update_stats)
139 183 self->aq_hw_ops.hw_update_stats(self->aq_hw);
140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation);
142
143 if (self->link_status.mbps) {
144 aq_utils_obj_set(&self->header.flags,
145 AQ_NIC_FLAG_STARTED);
146 aq_utils_obj_clear(&self->header.flags,
147 AQ_NIC_LINK_DOWN);
148 netif_carrier_on(self->ndev);
149 } else {
150 netif_carrier_off(self->ndev);
151 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
152 }
153 184
154 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
155 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -214,7 +245,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
214 SET_NETDEV_DEV(ndev, dev); 245 SET_NETDEV_DEV(ndev, dev);
215 246
216 ndev->if_port = port; 247 ndev->if_port = port;
217 ndev->min_mtu = ETH_MIN_MTU;
218 self->ndev = ndev; 248 self->ndev = ndev;
219 249
220 self->aq_pci_func = aq_pci_func; 250 self->aq_pci_func = aq_pci_func;
@@ -241,7 +271,6 @@ err_exit:
241int aq_nic_ndev_register(struct aq_nic_s *self) 271int aq_nic_ndev_register(struct aq_nic_s *self)
242{ 272{
243 int err = 0; 273 int err = 0;
244 unsigned int i = 0U;
245 274
246 if (!self->ndev) { 275 if (!self->ndev) {
247 err = -EINVAL; 276 err = -EINVAL;
@@ -263,8 +292,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
263 292
264 netif_carrier_off(self->ndev); 293 netif_carrier_off(self->ndev);
265 294
266 for (i = AQ_CFG_VECS_MAX; i--;) 295 netif_tx_disable(self->ndev);
267 aq_nic_ndev_queue_stop(self, i);
268 296
269 err = register_netdev(self->ndev); 297 err = register_netdev(self->ndev);
270 if (err < 0) 298 if (err < 0)
@@ -283,6 +311,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
283 self->ndev->features = aq_hw_caps->hw_features; 311 self->ndev->features = aq_hw_caps->hw_features;
284 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 312 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
285 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 313 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
314 self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
286 315
287 return 0; 316 return 0;
288} 317}
@@ -318,12 +347,9 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
318 err = -EINVAL; 347 err = -EINVAL;
319 goto err_exit; 348 goto err_exit;
320 } 349 }
321 if (netif_running(ndev)) { 350 if (netif_running(ndev))
322 unsigned int i; 351 netif_tx_disable(ndev);
323 352 netif_carrier_off(self->ndev);
324 for (i = AQ_CFG_VECS_MAX; i--;)
325 netif_stop_subqueue(ndev, i);
326 }
327 353
328 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 354 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
329 self->aq_vecs++) { 355 self->aq_vecs++) {
@@ -383,16 +409,6 @@ err_exit:
383 return err; 409 return err;
384} 410}
385 411
386void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
387{
388 netif_start_subqueue(self->ndev, idx);
389}
390
391void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
392{
393 netif_stop_subqueue(self->ndev, idx);
394}
395
396int aq_nic_start(struct aq_nic_s *self) 412int aq_nic_start(struct aq_nic_s *self)
397{ 413{
398 struct aq_vec_s *aq_vec = NULL; 414 struct aq_vec_s *aq_vec = NULL;
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
421 if (err < 0) 437 if (err < 0)
422 goto err_exit; 438 goto err_exit;
423 439
424 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 440 err = aq_nic_update_interrupt_moderation_settings(self);
425 self->aq_nic_cfg.is_interrupt_moderation); 441 if (err)
426 if (err < 0)
427 goto err_exit; 442 goto err_exit;
428 setup_timer(&self->service_timer, &aq_nic_service_timer_cb, 443 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
429 (unsigned long)self); 444 (unsigned long)self);
@@ -451,10 +466,6 @@ int aq_nic_start(struct aq_nic_s *self)
451 goto err_exit; 466 goto err_exit;
452 } 467 }
453 468
454 for (i = 0U, aq_vec = self->aq_vec[0];
455 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
456 aq_nic_ndev_queue_start(self, i);
457
458 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 469 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
459 if (err < 0) 470 if (err < 0)
460 goto err_exit; 471 goto err_exit;
@@ -463,6 +474,8 @@ int aq_nic_start(struct aq_nic_s *self)
463 if (err < 0) 474 if (err < 0)
464 goto err_exit; 475 goto err_exit;
465 476
477 netif_tx_start_all_queues(self->ndev);
478
466err_exit: 479err_exit:
467 return err; 480 return err;
468} 481}
@@ -475,6 +488,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
475 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 488 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
476 unsigned int frag_count = 0U; 489 unsigned int frag_count = 0U;
477 unsigned int dx = ring->sw_tail; 490 unsigned int dx = ring->sw_tail;
491 struct aq_ring_buff_s *first = NULL;
478 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 492 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
479 493
480 if (unlikely(skb_is_gso(skb))) { 494 if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +499,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
485 dx_buff->len_l4 = tcp_hdrlen(skb); 499 dx_buff->len_l4 = tcp_hdrlen(skb);
486 dx_buff->mss = skb_shinfo(skb)->gso_size; 500 dx_buff->mss = skb_shinfo(skb)->gso_size;
487 dx_buff->is_txc = 1U; 501 dx_buff->is_txc = 1U;
502 dx_buff->eop_index = 0xffffU;
488 503
489 dx_buff->is_ipv6 = 504 dx_buff->is_ipv6 =
490 (ip_hdr(skb)->version == 6) ? 1U : 0U; 505 (ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +519,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
504 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 519 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
505 goto exit; 520 goto exit;
506 521
522 first = dx_buff;
507 dx_buff->len_pkt = skb->len; 523 dx_buff->len_pkt = skb->len;
508 dx_buff->is_sop = 1U; 524 dx_buff->is_sop = 1U;
509 dx_buff->is_mapped = 1U; 525 dx_buff->is_mapped = 1U;
@@ -532,40 +548,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
532 548
533 for (; nr_frags--; ++frag_count) { 549 for (; nr_frags--; ++frag_count) {
534 unsigned int frag_len = 0U; 550 unsigned int frag_len = 0U;
551 unsigned int buff_offset = 0U;
552 unsigned int buff_size = 0U;
535 dma_addr_t frag_pa; 553 dma_addr_t frag_pa;
536 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 554 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
537 555
538 frag_len = skb_frag_size(frag); 556 frag_len = skb_frag_size(frag);
539 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
540 frag_len, DMA_TO_DEVICE);
541 557
542 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) 558 while (frag_len) {
543 goto mapping_error; 559 if (frag_len > AQ_CFG_TX_FRAME_MAX)
560 buff_size = AQ_CFG_TX_FRAME_MAX;
561 else
562 buff_size = frag_len;
563
564 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
565 frag,
566 buff_offset,
567 buff_size,
568 DMA_TO_DEVICE);
569
570 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
571 frag_pa)))
572 goto mapping_error;
544 573
545 while (frag_len > AQ_CFG_TX_FRAME_MAX) {
546 dx = aq_ring_next_dx(ring, dx); 574 dx = aq_ring_next_dx(ring, dx);
547 dx_buff = &ring->buff_ring[dx]; 575 dx_buff = &ring->buff_ring[dx];
548 576
549 dx_buff->flags = 0U; 577 dx_buff->flags = 0U;
550 dx_buff->len = AQ_CFG_TX_FRAME_MAX; 578 dx_buff->len = buff_size;
551 dx_buff->pa = frag_pa; 579 dx_buff->pa = frag_pa;
552 dx_buff->is_mapped = 1U; 580 dx_buff->is_mapped = 1U;
581 dx_buff->eop_index = 0xffffU;
582
583 frag_len -= buff_size;
584 buff_offset += buff_size;
553 585
554 frag_len -= AQ_CFG_TX_FRAME_MAX;
555 frag_pa += AQ_CFG_TX_FRAME_MAX;
556 ++ret; 586 ++ret;
557 } 587 }
558
559 dx = aq_ring_next_dx(ring, dx);
560 dx_buff = &ring->buff_ring[dx];
561
562 dx_buff->flags = 0U;
563 dx_buff->len = frag_len;
564 dx_buff->pa = frag_pa;
565 dx_buff->is_mapped = 1U;
566 ++ret;
567 } 588 }
568 589
590 first->eop_index = dx;
569 dx_buff->is_eop = 1U; 591 dx_buff->is_eop = 1U;
570 dx_buff->skb = skb; 592 dx_buff->skb = skb;
571 goto exit; 593 goto exit;
@@ -602,7 +624,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
602 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 624 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
603 unsigned int tc = 0U; 625 unsigned int tc = 0U;
604 int err = NETDEV_TX_OK; 626 int err = NETDEV_TX_OK;
605 bool is_nic_in_bad_state;
606 627
607 frags = skb_shinfo(skb)->nr_frags + 1; 628 frags = skb_shinfo(skb)->nr_frags + 1;
608 629
@@ -613,13 +634,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
613 goto err_exit; 634 goto err_exit;
614 } 635 }
615 636
616 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, 637 aq_ring_update_queue_state(ring);
617 AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
618 (aq_ring_avail_dx(ring) <
619 AQ_CFG_SKB_FRAGS_MAX);
620 638
621 if (is_nic_in_bad_state) { 639 /* Above status update may stop the queue. Check this. */
622 aq_nic_ndev_queue_stop(self, ring->idx); 640 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
623 err = NETDEV_TX_BUSY; 641 err = NETDEV_TX_BUSY;
624 goto err_exit; 642 goto err_exit;
625 } 643 }
@@ -631,9 +649,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
631 ring, 649 ring,
632 frags); 650 frags);
633 if (err >= 0) { 651 if (err >= 0) {
634 if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635 aq_nic_ndev_queue_stop(self, ring->idx);
636
637 ++ring->stats.tx.packets; 652 ++ring->stats.tx.packets;
638 ring->stats.tx.bytes += skb->len; 653 ring->stats.tx.bytes += skb->len;
639 } 654 }
@@ -645,6 +660,11 @@ err_exit:
645 return err; 660 return err;
646} 661}
647 662
663int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
664{
665 return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
666}
667
648int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) 668int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
649{ 669{
650 int err = 0; 670 int err = 0;
@@ -693,16 +713,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
693 713
694int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 714int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
695{ 715{
696 int err = 0;
697
698 if (new_mtu > self->aq_hw_caps.mtu) {
699 err = -EINVAL;
700 goto err_exit;
701 }
702 self->aq_nic_cfg.mtu = new_mtu; 716 self->aq_nic_cfg.mtu = new_mtu;
703 717
704err_exit: 718 return 0;
705 return err;
706} 719}
707 720
708int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 721int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +918,8 @@ int aq_nic_stop(struct aq_nic_s *self)
905 struct aq_vec_s *aq_vec = NULL; 918 struct aq_vec_s *aq_vec = NULL;
906 unsigned int i = 0U; 919 unsigned int i = 0U;
907 920
908 for (i = 0U, aq_vec = self->aq_vec[0]; 921 netif_tx_disable(self->ndev);
909 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) 922 netif_carrier_off(self->ndev);
910 aq_nic_ndev_queue_stop(self, i);
911 923
912 del_timer_sync(&self->service_timer); 924 del_timer_sync(&self->service_timer);
913 925
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 7fc2a5ecb2b7..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
40 u32 vecs; /* vecs==allocated irqs */ 40 u32 vecs; /* vecs==allocated irqs */
41 u32 irq_type; 41 u32 irq_type;
42 u32 itr; 42 u32 itr;
43 u16 rx_itr;
44 u16 tx_itr;
43 u32 num_rss_queues; 45 u32 num_rss_queues;
44 u32 mtu; 46 u32 mtu;
45 u32 ucp_0x364; 47 u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
49 u16 is_mc_list_enabled; 51 u16 is_mc_list_enabled;
50 u16 mc_list_count; 52 u16 mc_list_count;
51 bool is_autoneg; 53 bool is_autoneg;
52 bool is_interrupt_moderation;
53 bool is_polling; 54 bool is_polling;
54 bool is_rss; 55 bool is_rss;
55 bool is_lro; 56 bool is_lro;
@@ -83,8 +84,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
83int aq_nic_init(struct aq_nic_s *self); 84int aq_nic_init(struct aq_nic_s *self);
84int aq_nic_cfg_start(struct aq_nic_s *self); 85int aq_nic_cfg_start(struct aq_nic_s *self);
85int aq_nic_ndev_register(struct aq_nic_s *self); 86int aq_nic_ndev_register(struct aq_nic_s *self);
86void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
87void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
88void aq_nic_ndev_free(struct aq_nic_s *self); 87void aq_nic_ndev_free(struct aq_nic_s *self);
89int aq_nic_start(struct aq_nic_s *self); 88int aq_nic_start(struct aq_nic_s *self);
90int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); 89int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
@@ -106,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
106struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); 105struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
107u32 aq_nic_get_fw_version(struct aq_nic_s *self); 106u32 aq_nic_get_fw_version(struct aq_nic_s *self);
108int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 107int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
108int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
109 109
110#endif /* AQ_NIC_H */ 110#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
85 int err = 0; 85 int err = 0;
86 unsigned int bar = 0U; 86 unsigned int bar = 0U;
87 unsigned int port = 0U; 87 unsigned int port = 0U;
88 unsigned int numvecs = 0U;
88 89
89 err = pci_enable_device(self->pdev); 90 err = pci_enable_device(self->pdev);
90 if (err < 0) 91 if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
142 } 143 }
143 } 144 }
144 145
145 /*enable interrupts */ 146 numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
147 numvecs = min(numvecs, num_online_cpus());
148
149 /* enable interrupts */
146#if !AQ_CFG_FORCE_LEGACY_INT 150#if !AQ_CFG_FORCE_LEGACY_INT
147 err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, 151 err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
148 self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
149 152
150 if (err < 0) { 153 if (err < 0) {
151 err = pci_alloc_irq_vectors(self->pdev, 1, 1, 154 err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
153 if (err < 0) 156 if (err < 0)
154 goto err_exit; 157 goto err_exit;
155 } 158 }
156#endif 159#endif /* AQ_CFG_FORCE_LEGACY_INT */
157 160
158 /* net device init */ 161 /* net device init */
159 for (port = 0; port < self->ports; ++port) { 162 for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
265 aq_nic_ndev_free(self->port[port]); 268 aq_nic_ndev_free(self->port[port]);
266 } 269 }
267 270
271 if (self->mmio)
272 iounmap(self->mmio);
273
268 kfree(self); 274 kfree(self);
269 275
270err_exit:; 276err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4eee1996a825..0654e0c76bc2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
104 return 0; 104 return 0;
105} 105}
106 106
107static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
108 unsigned int t)
109{
110 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
111}
112
113void aq_ring_update_queue_state(struct aq_ring_s *ring)
114{
115 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
116 aq_ring_queue_stop(ring);
117 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
118 aq_ring_queue_wake(ring);
119}
120
121void aq_ring_queue_wake(struct aq_ring_s *ring)
122{
123 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
124
125 if (__netif_subqueue_stopped(ndev, ring->idx)) {
126 netif_wake_subqueue(ndev, ring->idx);
127 ring->stats.tx.queue_restarts++;
128 }
129}
130
131void aq_ring_queue_stop(struct aq_ring_s *ring)
132{
133 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
134
135 if (!__netif_subqueue_stopped(ndev, ring->idx))
136 netif_stop_subqueue(ndev, ring->idx);
137}
138
107void aq_ring_tx_clean(struct aq_ring_s *self) 139void aq_ring_tx_clean(struct aq_ring_s *self)
108{ 140{
109 struct device *dev = aq_nic_get_dev(self->aq_nic); 141 struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
113 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
114 146
115 if (likely(buff->is_mapped)) { 147 if (likely(buff->is_mapped)) {
116 if (unlikely(buff->is_sop)) 148 if (unlikely(buff->is_sop)) {
149 if (!buff->is_eop &&
150 buff->eop_index != 0xffffU &&
151 (!aq_ring_dx_in_range(self->sw_head,
152 buff->eop_index,
153 self->hw_head)))
154 break;
155
117 dma_unmap_single(dev, buff->pa, buff->len, 156 dma_unmap_single(dev, buff->pa, buff->len,
118 DMA_TO_DEVICE); 157 DMA_TO_DEVICE);
119 else 158 } else {
120 dma_unmap_page(dev, buff->pa, buff->len, 159 dma_unmap_page(dev, buff->pa, buff->len,
121 DMA_TO_DEVICE); 160 DMA_TO_DEVICE);
161 }
122 } 162 }
123 163
124 if (unlikely(buff->is_eop)) 164 if (unlikely(buff->is_eop))
125 dev_kfree_skb_any(buff->skb); 165 dev_kfree_skb_any(buff->skb);
126 }
127}
128 166
129static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, 167 buff->pa = 0U;
130 unsigned int t) 168 buff->eop_index = 0xffffU;
131{ 169 }
132 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
133} 170}
134 171
135#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 172#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 782176c5f4f8..5844078764bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
65 }; 65 };
66 union { 66 union {
67 struct { 67 struct {
68 u32 len:16; 68 u16 len;
69 u32 is_ip_cso:1; 69 u32 is_ip_cso:1;
70 u32 is_udp_cso:1; 70 u32 is_udp_cso:1;
71 u32 is_tcp_cso:1; 71 u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
77 u32 is_cleaned:1; 77 u32 is_cleaned:1;
78 u32 is_error:1; 78 u32 is_error:1;
79 u32 rsvd3:6; 79 u32 rsvd3:6;
80 u16 eop_index;
81 u16 rsvd4;
80 }; 82 };
81 u32 flags; 83 u64 flags;
82 }; 84 };
83}; 85};
84 86
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
94 u64 errors; 96 u64 errors;
95 u64 packets; 97 u64 packets;
96 u64 bytes; 98 u64 bytes;
99 u64 queue_restarts;
97}; 100};
98 101
99union aq_ring_stats_s { 102union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
147int aq_ring_init(struct aq_ring_s *self); 150int aq_ring_init(struct aq_ring_s *self);
148void aq_ring_rx_deinit(struct aq_ring_s *self); 151void aq_ring_rx_deinit(struct aq_ring_s *self);
149void aq_ring_free(struct aq_ring_s *self); 152void aq_ring_free(struct aq_ring_s *self);
153void aq_ring_update_queue_state(struct aq_ring_s *ring);
154void aq_ring_queue_wake(struct aq_ring_s *ring);
155void aq_ring_queue_stop(struct aq_ring_s *ring);
150void aq_ring_tx_clean(struct aq_ring_s *self); 156void aq_ring_tx_clean(struct aq_ring_s *self);
151int aq_ring_rx_clean(struct aq_ring_s *self, 157int aq_ring_rx_clean(struct aq_ring_s *self,
152 struct napi_struct *napi, 158 struct napi_struct *napi,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ebf588004c46..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
59 if (ring[AQ_VEC_TX_ID].sw_head != 59 if (ring[AQ_VEC_TX_ID].sw_head !=
60 ring[AQ_VEC_TX_ID].hw_head) { 60 ring[AQ_VEC_TX_ID].hw_head) {
61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
62 62 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
63 if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
64 AQ_CFG_SKB_FRAGS_MAX) {
65 aq_nic_ndev_queue_start(self->aq_nic,
66 ring[AQ_VEC_TX_ID].idx);
67 }
68 was_tx_cleaned = true; 63 was_tx_cleaned = true;
69 } 64 }
70 65
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
364 stats_tx->packets += tx->packets; 359 stats_tx->packets += tx->packets;
365 stats_tx->bytes += tx->bytes; 360 stats_tx->bytes += tx->bytes;
366 stats_tx->errors += tx->errors; 361 stats_tx->errors += tx->errors;
362 stats_tx->queue_restarts += tx->queue_restarts;
367 } 363 }
368} 364}
369 365
@@ -377,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
377 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
378 aq_vec_add_stats(self, &stats_rx, &stats_tx); 374 aq_vec_add_stats(self, &stats_rx, &stats_tx);
379 375
376 /* This data should mimic aq_ethtool_queue_stat_names structure
377 */
380 data[count] += stats_rx.packets; 378 data[count] += stats_rx.packets;
381 data[++count] += stats_tx.packets; 379 data[++count] += stats_tx.packets;
380 data[++count] += stats_tx.queue_restarts;
382 data[++count] += stats_rx.jumbo_packets; 381 data[++count] += stats_rx.jumbo_packets;
383 data[++count] += stats_rx.lro_packets; 382 data[++count] += stats_rx.lro_packets;
384 data[++count] += stats_rx.errors; 383 data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
765 return err; 765 return err;
766} 766}
767 767
768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, 768static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
769 bool itr_enabled)
770{ 769{
771 unsigned int i = 0U; 770 unsigned int i = 0U;
771 u32 itr_rx;
772 772
773 if (itr_enabled && self->aq_nic_cfg->itr) { 773 if (self->aq_nic_cfg->itr) {
774 if (self->aq_nic_cfg->itr != 0xFFFFU) { 774 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
775 u32 itr_ = (self->aq_nic_cfg->itr >> 1); 775 u32 itr_ = (self->aq_nic_cfg->itr >> 1);
776 776
777 itr_ = min(AQ_CFG_IRQ_MASK, itr_); 777 itr_ = min(AQ_CFG_IRQ_MASK, itr_);
778 778
779 PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | 779 itr_rx = 0x80000000U | (itr_ << 0x10);
780 (itr_ << 0x10);
781 } else { 780 } else {
782 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); 781 u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
783 782
784 if (n < self->aq_link_status.mbps) { 783 if (n < self->aq_link_status.mbps) {
785 PHAL_ATLANTIC_A0->itr_rx = 0U; 784 itr_rx = 0U;
786 } else { 785 } else {
787 static unsigned int hw_timers_tbl_[] = { 786 static unsigned int hw_timers_tbl_[] = {
788 0x01CU, /* 10Gbit */ 787 0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
797 hw_atl_utils_mbps_2_speed_index( 796 hw_atl_utils_mbps_2_speed_index(
798 self->aq_link_status.mbps); 797 self->aq_link_status.mbps);
799 798
800 PHAL_ATLANTIC_A0->itr_rx = 799 itr_rx = 0x80000000U |
801 0x80000000U |
802 (hw_timers_tbl_[speed_index] << 0x10U); 800 (hw_timers_tbl_[speed_index] << 0x10U);
803 } 801 }
804 802
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
806 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); 804 aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
807 } 805 }
808 } else { 806 } else {
809 PHAL_ATLANTIC_A0->itr_rx = 0U; 807 itr_rx = 0U;
810 } 808 }
811 809
812 for (i = HW_ATL_A0_RINGS_MAX; i--;) 810 for (i = HW_ATL_A0_RINGS_MAX; i--;)
813 reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); 811 reg_irq_thr_set(self, itr_rx, i);
814 812
815 return aq_hw_err_from_flags(self); 813 return aq_hw_err_from_flags(self);
816} 814}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
885 .hw_rss_set = hw_atl_a0_hw_rss_set, 883 .hw_rss_set = hw_atl_a0_hw_rss_set,
886 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, 884 .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
887 .hw_get_regs = hw_atl_utils_hw_get_regs, 885 .hw_get_regs = hw_atl_utils_hw_get_regs,
886 .hw_update_stats = hw_atl_utils_update_stats,
888 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 887 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
889 .hw_get_fw_version = hw_atl_utils_get_fw_version, 888 .hw_get_fw_version = hw_atl_utils_get_fw_version,
890}; 889};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
788 return err; 788 return err;
789} 789}
790 790
791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, 791static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
792 bool itr_enabled)
793{ 792{
794 unsigned int i = 0U; 793 unsigned int i = 0U;
794 u32 itr_tx = 2U;
795 u32 itr_rx = 2U;
795 796
796 if (itr_enabled && self->aq_nic_cfg->itr) { 797 switch (self->aq_nic_cfg->itr) {
798 case AQ_CFG_INTERRUPT_MODERATION_ON:
799 case AQ_CFG_INTERRUPT_MODERATION_AUTO:
797 tdm_tx_desc_wr_wb_irq_en_set(self, 0U); 800 tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
798 tdm_tdm_intr_moder_en_set(self, 1U); 801 tdm_tdm_intr_moder_en_set(self, 1U);
799 rdm_rx_desc_wr_wb_irq_en_set(self, 0U); 802 rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
800 rdm_rdm_intr_moder_en_set(self, 1U); 803 rdm_rdm_intr_moder_en_set(self, 1U);
801 804
802 PHAL_ATLANTIC_B0->itr_tx = 2U; 805 if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
803 PHAL_ATLANTIC_B0->itr_rx = 2U; 806 /* HW timers are in 2us units */
807 int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
808 int tx_min_timer = tx_max_timer / 2;
804 809
805 if (self->aq_nic_cfg->itr != 0xFFFFU) { 810 int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
806 unsigned int max_timer = self->aq_nic_cfg->itr / 2U; 811 int rx_min_timer = rx_max_timer / 2;
807 unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
808 812
809 max_timer = min(0x1FFU, max_timer); 813 tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
810 min_timer = min(0xFFU, min_timer); 814 tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
815 rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
816 rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
811 817
812 PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; 818 itr_tx |= tx_min_timer << 0x8U;
813 PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; 819 itr_tx |= tx_max_timer << 0x10U;
814 PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; 820 itr_rx |= rx_min_timer << 0x8U;
815 PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; 821 itr_rx |= rx_max_timer << 0x10U;
816 } else { 822 } else {
817 static unsigned int hw_atl_b0_timers_table_tx_[][2] = { 823 static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
818 {0xffU, 0xffU}, /* 10Gbit */ 824 {0xfU, 0xffU}, /* 10Gbit */
819 {0xffU, 0x1ffU}, /* 5Gbit */ 825 {0xfU, 0x1ffU}, /* 5Gbit */
820 {0xffU, 0x1ffU}, /* 5Gbit 5GS */ 826 {0xfU, 0x1ffU}, /* 5Gbit 5GS */
821 {0xffU, 0x1ffU}, /* 2.5Gbit */ 827 {0xfU, 0x1ffU}, /* 2.5Gbit */
822 {0xffU, 0x1ffU}, /* 1Gbit */ 828 {0xfU, 0x1ffU}, /* 1Gbit */
823 {0xffU, 0x1ffU}, /* 100Mbit */ 829 {0xfU, 0x1ffU}, /* 100Mbit */
824 }; 830 };
825 831
826 static unsigned int hw_atl_b0_timers_table_rx_[][2] = { 832 static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
836 hw_atl_utils_mbps_2_speed_index( 842 hw_atl_utils_mbps_2_speed_index(
837 self->aq_link_status.mbps); 843 self->aq_link_status.mbps);
838 844
839 PHAL_ATLANTIC_B0->itr_tx |= 845 /* Update user visible ITR settings */
840 hw_atl_b0_timers_table_tx_[speed_index] 846 self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
841 [0] << 0x8U; /* set min timer value */ 847 [speed_index][1] * 2;
842 PHAL_ATLANTIC_B0->itr_tx |= 848 self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
843 hw_atl_b0_timers_table_tx_[speed_index] 849 [speed_index][1] * 2;
844 [1] << 0x10U; /* set max timer value */ 850
845 851 itr_tx |= hw_atl_b0_timers_table_tx_
846 PHAL_ATLANTIC_B0->itr_rx |= 852 [speed_index][0] << 0x8U;
847 hw_atl_b0_timers_table_rx_[speed_index] 853 itr_tx |= hw_atl_b0_timers_table_tx_
848 [0] << 0x8U; /* set min timer value */ 854 [speed_index][1] << 0x10U;
849 PHAL_ATLANTIC_B0->itr_rx |= 855
850 hw_atl_b0_timers_table_rx_[speed_index] 856 itr_rx |= hw_atl_b0_timers_table_rx_
851 [1] << 0x10U; /* set max timer value */ 857 [speed_index][0] << 0x8U;
858 itr_rx |= hw_atl_b0_timers_table_rx_
859 [speed_index][1] << 0x10U;
852 } 860 }
853 } else { 861 break;
862 case AQ_CFG_INTERRUPT_MODERATION_OFF:
854 tdm_tx_desc_wr_wb_irq_en_set(self, 1U); 863 tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
855 tdm_tdm_intr_moder_en_set(self, 0U); 864 tdm_tdm_intr_moder_en_set(self, 0U);
856 rdm_rx_desc_wr_wb_irq_en_set(self, 1U); 865 rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
857 rdm_rdm_intr_moder_en_set(self, 0U); 866 rdm_rdm_intr_moder_en_set(self, 0U);
858 PHAL_ATLANTIC_B0->itr_tx = 0U; 867 itr_tx = 0U;
859 PHAL_ATLANTIC_B0->itr_rx = 0U; 868 itr_rx = 0U;
869 break;
860 } 870 }
861 871
862 for (i = HW_ATL_B0_RINGS_MAX; i--;) { 872 for (i = HW_ATL_B0_RINGS_MAX; i--;) {
863 reg_tx_intr_moder_ctrl_set(self, 873 reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
864 PHAL_ATLANTIC_B0->itr_tx, i); 874 reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
865 reg_rx_intr_moder_ctrl_set(self,
866 PHAL_ATLANTIC_B0->itr_rx, i);
867 } 875 }
868 876
869 return aq_hw_err_from_flags(self); 877 return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
939 .hw_rss_set = hw_atl_b0_hw_rss_set, 947 .hw_rss_set = hw_atl_b0_hw_rss_set,
940 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, 948 .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
941 .hw_get_regs = hw_atl_utils_hw_get_regs, 949 .hw_get_regs = hw_atl_utils_hw_get_regs,
950 .hw_update_stats = hw_atl_utils_update_stats,
942 .hw_get_hw_stats = hw_atl_utils_get_hw_stats, 951 .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
943 .hw_get_fw_version = hw_atl_utils_get_fw_version, 952 .hw_get_fw_version = hw_atl_utils_get_fw_version,
944}; 953};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index f3957e930340..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -16,7 +16,7 @@
16 16
17#include "../aq_common.h" 17#include "../aq_common.h"
18 18
19#define HW_ATL_B0_MTU_JUMBO (16000U) 19#define HW_ATL_B0_MTU_JUMBO 16352U
20#define HW_ATL_B0_MTU 1514U 20#define HW_ATL_B0_MTU 1514U
21 21
22#define HW_ATL_B0_TX_RINGS 4U 22#define HW_ATL_B0_TX_RINGS 4U
@@ -139,6 +139,9 @@
139 139
140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U 140#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
141 141
142#define HW_ATL_INTR_MODER_MAX 0x1FF
143#define HW_ATL_INTR_MODER_MIN 0xFF
144
142/* Hardware tx descriptor */ 145/* Hardware tx descriptor */
143struct __packed hw_atl_txd_s { 146struct __packed hw_atl_txd_s {
144 u64 buf_addr; 147 u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 4f5ec9a0fbfb..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
255 return err; 255 return err;
256} 256}
257 257
258int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox_header *pmbox)
260{
261 return hw_atl_utils_fw_downld_dwords(self,
262 PHAL_ATLANTIC->mbox_addr,
263 (u32 *)(void *)pmbox,
264 sizeof(*pmbox) / sizeof(u32));
265}
266
258void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 267void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
259 struct hw_aq_atl_utils_mbox *pmbox) 268 struct hw_aq_atl_utils_mbox *pmbox)
260{ 269{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
267 if (err < 0) 276 if (err < 0)
268 goto err_exit; 277 goto err_exit;
269 278
270 if (pmbox != &PHAL_ATLANTIC->mbox)
271 memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
272
273 if (IS_CHIP_FEATURE(REVISION_A0)) { 279 if (IS_CHIP_FEATURE(REVISION_A0)) {
274 unsigned int mtu = self->aq_nic_cfg ? 280 unsigned int mtu = self->aq_nic_cfg ?
275 self->aq_nic_cfg->mtu : 1514U; 281 self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
299{ 305{
300 int err = 0; 306 int err = 0;
301 u32 transaction_id = 0; 307 u32 transaction_id = 0;
308 struct hw_aq_atl_utils_mbox_header mbox;
302 309
303 if (state == MPI_RESET) { 310 if (state == MPI_RESET) {
304 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); 311 hw_atl_utils_mpi_read_mbox(self, &mbox);
305 312
306 transaction_id = PHAL_ATLANTIC->mbox.transaction_id; 313 transaction_id = mbox.transaction_id;
307 314
308 AQ_HW_WAIT_FOR(transaction_id != 315 AQ_HW_WAIT_FOR(transaction_id !=
309 (hw_atl_utils_mpi_read_stats 316 (hw_atl_utils_mpi_read_mbox(self, &mbox),
310 (self, &PHAL_ATLANTIC->mbox), 317 mbox.transaction_id),
311 PHAL_ATLANTIC->mbox.transaction_id), 318 1000U, 100U);
312 1000U, 100U);
313 if (err < 0) 319 if (err < 0)
314 goto err_exit; 320 goto err_exit;
315 } 321 }
@@ -351,8 +357,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
351 break; 357 break;
352 358
353 default: 359 default:
354 link_status->mbps = 0U; 360 return -EBUSY;
355 break;
356 } 361 }
357 } 362 }
358 363
@@ -493,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
493 return 0; 498 return 0;
494} 499}
495 500
501int hw_atl_utils_update_stats(struct aq_hw_s *self)
502{
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox;
505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox);
510
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_)
513
514 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt);
518
519 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr);
523
524 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc);
531
532#undef AQ_SDELTA
533
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535
536 return 0;
537}
538
496int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
497 u64 *data, unsigned int *p_count) 540 u64 *data, unsigned int *p_count)
498{ 541{
499 struct hw_atl_stats_s *stats = NULL; 542 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
500 int i = 0; 544 int i = 0;
501 545
502 hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
503
504 stats = &PHAL_ATLANTIC->mbox.stats;
505
506 data[i] = stats->uprc + stats->mprc + stats->bprc; 546 data[i] = stats->uprc + stats->mprc + stats->bprc;
507 data[++i] = stats->uprc; 547 data[++i] = stats->uprc;
508 data[++i] = stats->mprc; 548 data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
115 }; 115 };
116}; 116};
117 117
118struct __packed hw_aq_atl_utils_mbox { 118struct __packed hw_aq_atl_utils_mbox_header {
119 u32 version; 119 u32 version;
120 u32 transaction_id; 120 u32 transaction_id;
121 int error; 121 u32 error;
122};
123
124struct __packed hw_aq_atl_utils_mbox {
125 struct hw_aq_atl_utils_mbox_header header;
122 struct hw_atl_stats_s stats; 126 struct hw_atl_stats_s stats;
123}; 127};
124 128
125struct __packed hw_atl_s { 129struct __packed hw_atl_s {
126 struct aq_hw_s base; 130 struct aq_hw_s base;
127 struct hw_aq_atl_utils_mbox mbox; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats;
128 u64 speed; 133 u64 speed;
129 u32 itr_tx;
130 u32 itr_rx;
131 unsigned int chip_features; 134 unsigned int chip_features;
132 u32 fw_ver_actual; 135 u32 fw_ver_actual;
133 atomic_t dpc; 136 atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
170 173
171void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); 174void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
172 175
176int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
177 struct hw_aq_atl_utils_mbox_header *pmbox);
178
173void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, 179void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
174 struct hw_aq_atl_utils_mbox *pmbox); 180 struct hw_aq_atl_utils_mbox *pmbox);
175 181
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
199 205
200int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); 206int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
201 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209
202int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
203 u64 *data, 211 u64 *data,
204 unsigned int *p_count); 212 unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aacec8bc19d5..dc5de275352a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
215}; 215};
216 216
217static struct workqueue_struct *bnxt_pf_wq;
218
217static bool bnxt_vf_pciid(enum board_idx idx) 219static bool bnxt_vf_pciid(enum board_idx idx)
218{ 220{
219 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); 221 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1024 return 0; 1026 return 0;
1025} 1027}
1026 1028
1029static void bnxt_queue_sp_work(struct bnxt *bp)
1030{
1031 if (BNXT_PF(bp))
1032 queue_work(bnxt_pf_wq, &bp->sp_task);
1033 else
1034 schedule_work(&bp->sp_task);
1035}
1036
1037static void bnxt_cancel_sp_work(struct bnxt *bp)
1038{
1039 if (BNXT_PF(bp))
1040 flush_workqueue(bnxt_pf_wq);
1041 else
1042 cancel_work_sync(&bp->sp_task);
1043}
1044
1027static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 1045static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1028{ 1046{
1029 if (!rxr->bnapi->in_reset) { 1047 if (!rxr->bnapi->in_reset) {
1030 rxr->bnapi->in_reset = true; 1048 rxr->bnapi->in_reset = true;
1031 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 1049 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1032 schedule_work(&bp->sp_task); 1050 bnxt_queue_sp_work(bp);
1033 } 1051 }
1034 rxr->rx_next_cons = 0xffff; 1052 rxr->rx_next_cons = 0xffff;
1035} 1053}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
1717 default: 1735 default:
1718 goto async_event_process_exit; 1736 goto async_event_process_exit;
1719 } 1737 }
1720 schedule_work(&bp->sp_task); 1738 bnxt_queue_sp_work(bp);
1721async_event_process_exit: 1739async_event_process_exit:
1722 bnxt_ulp_async_events(bp, cmpl); 1740 bnxt_ulp_async_events(bp, cmpl);
1723 return 0; 1741 return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1751 1769
1752 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 1770 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1753 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); 1771 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1754 schedule_work(&bp->sp_task); 1772 bnxt_queue_sp_work(bp);
1755 break; 1773 break;
1756 1774
1757 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 1775 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3448 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); 3466 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3449} 3467}
3450 3468
3469int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3470 int timeout)
3471{
3472 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3473}
3474
3451int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) 3475int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3452{ 3476{
3453 int rc; 3477 int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6327 } 6351 }
6328 6352
6329 if (link_re_init) { 6353 if (link_re_init) {
6354 mutex_lock(&bp->link_lock);
6330 rc = bnxt_update_phy_setting(bp); 6355 rc = bnxt_update_phy_setting(bp);
6356 mutex_unlock(&bp->link_lock);
6331 if (rc) 6357 if (rc)
6332 netdev_warn(bp->dev, "failed to update phy settings\n"); 6358 netdev_warn(bp->dev, "failed to update phy settings\n");
6333 } 6359 }
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
6647 vnic->rx_mask = mask; 6673 vnic->rx_mask = mask;
6648 6674
6649 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); 6675 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6650 schedule_work(&bp->sp_task); 6676 bnxt_queue_sp_work(bp);
6651 } 6677 }
6652} 6678}
6653 6679
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
6920 6946
6921 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 6947 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6922 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 6948 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6923 schedule_work(&bp->sp_task); 6949 bnxt_queue_sp_work(bp);
6924} 6950}
6925 6951
6926#ifdef CONFIG_NET_POLL_CONTROLLER 6952#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
6952 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && 6978 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6953 bp->stats_coal_ticks) { 6979 bp->stats_coal_ticks) {
6954 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); 6980 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6955 schedule_work(&bp->sp_task); 6981 bnxt_queue_sp_work(bp);
6956 } 6982 }
6957bnxt_restart_timer: 6983bnxt_restart_timer:
6958 mod_timer(&bp->timer, jiffies + bp->current_interval); 6984 mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
7025 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) 7051 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7026 bnxt_hwrm_port_qstats(bp); 7052 bnxt_hwrm_port_qstats(bp);
7027 7053
7028 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7029 * must be the last functions to be called before exiting.
7030 */
7031 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 7054 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7032 int rc = 0; 7055 int rc;
7033 7056
7057 mutex_lock(&bp->link_lock);
7034 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 7058 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7035 &bp->sp_event)) 7059 &bp->sp_event))
7036 bnxt_hwrm_phy_qcaps(bp); 7060 bnxt_hwrm_phy_qcaps(bp);
7037 7061
7038 bnxt_rtnl_lock_sp(bp); 7062 rc = bnxt_update_link(bp, true);
7039 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7063 mutex_unlock(&bp->link_lock);
7040 rc = bnxt_update_link(bp, true);
7041 bnxt_rtnl_unlock_sp(bp);
7042 if (rc) 7064 if (rc)
7043 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 7065 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7044 rc); 7066 rc);
7045 } 7067 }
7046 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 7068 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7047 bnxt_rtnl_lock_sp(bp); 7069 mutex_lock(&bp->link_lock);
7048 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 7070 bnxt_get_port_module_status(bp);
7049 bnxt_get_port_module_status(bp); 7071 mutex_unlock(&bp->link_lock);
7050 bnxt_rtnl_unlock_sp(bp);
7051 } 7072 }
7073 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7074 * must be the last functions to be called before exiting.
7075 */
7052 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 7076 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7053 bnxt_reset(bp, false); 7077 bnxt_reset(bp, false);
7054 7078
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7433 spin_unlock_bh(&bp->ntp_fltr_lock); 7457 spin_unlock_bh(&bp->ntp_fltr_lock);
7434 7458
7435 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); 7459 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7436 schedule_work(&bp->sp_task); 7460 bnxt_queue_sp_work(bp);
7437 7461
7438 return new_fltr->sw_id; 7462 return new_fltr->sw_id;
7439 7463
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7516 if (bp->vxlan_port_cnt == 1) { 7540 if (bp->vxlan_port_cnt == 1) {
7517 bp->vxlan_port = ti->port; 7541 bp->vxlan_port = ti->port;
7518 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); 7542 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7519 schedule_work(&bp->sp_task); 7543 bnxt_queue_sp_work(bp);
7520 } 7544 }
7521 break; 7545 break;
7522 case UDP_TUNNEL_TYPE_GENEVE: 7546 case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
7533 return; 7557 return;
7534 } 7558 }
7535 7559
7536 schedule_work(&bp->sp_task); 7560 bnxt_queue_sp_work(bp);
7537} 7561}
7538 7562
7539static void bnxt_udp_tunnel_del(struct net_device *dev, 7563static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
7572 return; 7596 return;
7573 } 7597 }
7574 7598
7575 schedule_work(&bp->sp_task); 7599 bnxt_queue_sp_work(bp);
7576} 7600}
7577 7601
7578static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 7602static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
7720 pci_disable_pcie_error_reporting(pdev); 7744 pci_disable_pcie_error_reporting(pdev);
7721 unregister_netdev(dev); 7745 unregister_netdev(dev);
7722 bnxt_shutdown_tc(bp); 7746 bnxt_shutdown_tc(bp);
7723 cancel_work_sync(&bp->sp_task); 7747 bnxt_cancel_sp_work(bp);
7724 bp->sp_event = 0; 7748 bp->sp_event = 0;
7725 7749
7726 bnxt_clear_int_mode(bp); 7750 bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
7748 rc); 7772 rc);
7749 return rc; 7773 return rc;
7750 } 7774 }
7775 mutex_init(&bp->link_lock);
7751 7776
7752 rc = bnxt_update_link(bp, false); 7777 rc = bnxt_update_link(bp, false);
7753 if (rc) { 7778 if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7946 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; 7971 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7947 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; 7972 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7948 7973
7949 if (pcie_get_minimum_link(bp->pdev, &speed, &width) || 7974 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7950 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) 7975 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7951 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); 7976 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7952 else 7977 else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8138 else 8163 else
8139 device_set_wakeup_capable(&pdev->dev, false); 8164 device_set_wakeup_capable(&pdev->dev, false);
8140 8165
8141 if (BNXT_PF(bp)) 8166 if (BNXT_PF(bp)) {
8167 if (!bnxt_pf_wq) {
8168 bnxt_pf_wq =
8169 create_singlethread_workqueue("bnxt_pf_wq");
8170 if (!bnxt_pf_wq) {
8171 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8172 goto init_err_pci_clean;
8173 }
8174 }
8142 bnxt_init_tc(bp); 8175 bnxt_init_tc(bp);
8176 }
8143 8177
8144 rc = register_netdev(dev); 8178 rc = register_netdev(dev);
8145 if (rc) 8179 if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
8375#endif 8409#endif
8376}; 8410};
8377 8411
8378module_pci_driver(bnxt_pci_driver); 8412static int __init bnxt_init(void)
8413{
8414 return pci_register_driver(&bnxt_pci_driver);
8415}
8416
8417static void __exit bnxt_exit(void)
8418{
8419 pci_unregister_driver(&bnxt_pci_driver);
8420 if (bnxt_pf_wq)
8421 destroy_workqueue(bnxt_pf_wq);
8422}
8423
8424module_init(bnxt_init);
8425module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
1290 unsigned long *ntp_fltr_bmap; 1290 unsigned long *ntp_fltr_bmap;
1291 int ntp_fltr_count; 1291 int ntp_fltr_count;
1292 1292
1293 /* To protect link related settings during link changes and
1294 * ethtool settings changes.
1295 */
1296 struct mutex link_lock;
1293 struct bnxt_link_info link_info; 1297 struct bnxt_link_info link_info;
1294 struct ethtool_eee eee; 1298 struct ethtool_eee eee;
1295 u32 lpi_tmr_lo; 1299 u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
1358int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); 1362int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
1359void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); 1363void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
1360int _hwrm_send_message(struct bnxt *, void *, u32, int); 1364int _hwrm_send_message(struct bnxt *, void *, u32, int);
1365int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
1361int hwrm_send_message(struct bnxt *, void *, u32, int); 1366int hwrm_send_message(struct bnxt *, void *, u32, int);
1362int hwrm_send_message_silent(struct bnxt *, void *, u32, int); 1367int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
1363int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, 1368int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
50 50
51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 51 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 52 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
53 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 53
54 mutex_lock(&bp->hwrm_cmd_lock);
55 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
54 if (!rc) { 56 if (!rc) {
55 u8 *pri2cos = &resp->pri0_cos_queue_id; 57 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 int i, j; 58 int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
66 } 68 }
67 } 69 }
68 } 70 }
71 mutex_unlock(&bp->hwrm_cmd_lock);
69 return rc; 72 return rc;
70} 73}
71 74
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
119 int rc, i; 122 int rc, i;
120 123
121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); 124 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 125
123 if (rc) 126 mutex_lock(&bp->hwrm_cmd_lock);
127 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
128 if (rc) {
129 mutex_unlock(&bp->hwrm_cmd_lock);
124 return rc; 130 return rc;
131 }
125 132
126 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); 133 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
127 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { 134 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
143 } 150 }
144 } 151 }
145 } 152 }
153 mutex_unlock(&bp->hwrm_cmd_lock);
146 return 0; 154 return 0;
147} 155}
148 156
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
240 int rc; 248 int rc;
241 249
242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); 250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
243 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 251
244 if (rc) 252 mutex_lock(&bp->hwrm_cmd_lock);
253 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
254 if (rc) {
255 mutex_unlock(&bp->hwrm_cmd_lock);
245 return rc; 256 return rc;
257 }
246 258
247 pri_mask = le32_to_cpu(resp->flags); 259 pri_mask = le32_to_cpu(resp->flags);
248 pfc->pfc_en = pri_mask; 260 pfc->pfc_en = pri_mask;
261 mutex_unlock(&bp->hwrm_cmd_lock);
249 return 0; 262 return 0;
250} 263}
251 264
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1052 u32 ethtool_speed; 1052 u32 ethtool_speed;
1053 1053
1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 1054 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1055 mutex_lock(&bp->link_lock);
1055 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 1056 bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1056 1057
1057 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 1058 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
1099 base->port = PORT_FIBRE; 1100 base->port = PORT_FIBRE;
1100 } 1101 }
1101 base->phy_address = link_info->phy_addr; 1102 base->phy_address = link_info->phy_addr;
1103 mutex_unlock(&bp->link_lock);
1102 1104
1103 return 0; 1105 return 0;
1104} 1106}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1190 if (!BNXT_SINGLE_PF(bp)) 1192 if (!BNXT_SINGLE_PF(bp))
1191 return -EOPNOTSUPP; 1193 return -EOPNOTSUPP;
1192 1194
1195 mutex_lock(&bp->link_lock);
1193 if (base->autoneg == AUTONEG_ENABLE) { 1196 if (base->autoneg == AUTONEG_ENABLE) {
1194 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 1197 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
1195 advertising); 1198 advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
1234 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); 1237 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1235 1238
1236set_setting_exit: 1239set_setting_exit:
1240 mutex_unlock(&bp->link_lock);
1237 return rc; 1241 return rc;
1238} 1242}
1239 1243
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1805 req.dir_ordinal = cpu_to_le16(ordinal); 1809 req.dir_ordinal = cpu_to_le16(ordinal);
1806 req.dir_ext = cpu_to_le16(ext); 1810 req.dir_ext = cpu_to_le16(ext);
1807 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; 1811 req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
1808 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1812 mutex_lock(&bp->hwrm_cmd_lock);
1813 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1809 if (rc == 0) { 1814 if (rc == 0) {
1810 if (index) 1815 if (index)
1811 *index = le16_to_cpu(output->dir_idx); 1816 *index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
1814 if (data_length) 1819 if (data_length)
1815 *data_length = le32_to_cpu(output->dir_data_length); 1820 *data_length = le32_to_cpu(output->dir_data_length);
1816 } 1821 }
1822 mutex_unlock(&bp->hwrm_cmd_lock);
1817 return rc; 1823 return rc;
1818} 1824}
1819 1825
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
502 int rc = 0, vfs_supported; 502 int rc = 0, vfs_supported;
503 int min_rx_rings, min_tx_rings, min_rss_ctxs; 503 int min_rx_rings, min_tx_rings, min_rss_ctxs;
504 int tx_ok = 0, rx_ok = 0, rss_ok = 0; 504 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
505 int avail_cp, avail_stat;
505 506
506 /* Check if we can enable requested num of vf's. At a mininum 507 /* Check if we can enable requested num of vf's. At a mininum
507 * we require 1 RX 1 TX rings for each VF. In this minimum conf 508 * we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
509 */ 510 */
510 vfs_supported = *num_vfs; 511 vfs_supported = *num_vfs;
511 512
513 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
514 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
515 avail_cp = min_t(int, avail_cp, avail_stat);
516
512 while (vfs_supported) { 517 while (vfs_supported) {
513 min_rx_rings = vfs_supported; 518 min_rx_rings = vfs_supported;
514 min_tx_rings = vfs_supported; 519 min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
523 min_rx_rings) 528 min_rx_rings)
524 rx_ok = 1; 529 rx_ok = 1;
525 } 530 }
526 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) 531 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
532 avail_cp < min_rx_rings)
527 rx_ok = 0; 533 rx_ok = 0;
528 534
529 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) 535 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
536 avail_cp >= min_tx_rings)
530 tx_ok = 1; 537 tx_ok = 1;
531 538
532 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) 539 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cec94bbb2ea5..8bc126a156e8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1278 1278
1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1280 if (ret) 1280 if (ret)
1281 return -ENOMEM; 1281 goto error;
1282 1282
1283 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1283 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1284 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1284 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e7f54948173f..5b19826a7e16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1847 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1847 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1849 1849
1850 ns = timespec_to_ns(ts); 1850 ns = timespec64_to_ns(ts);
1851 1851
1852 spin_lock_irqsave(&lio->ptp_lock, flags); 1852 spin_lock_irqsave(&lio->ptp_lock, flags);
1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 49b80da51ba7..805ab45e9b5a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
565 return true; 565 return true;
566 default: 566 default:
567 bpf_warn_invalid_xdp_action(action); 567 bpf_warn_invalid_xdp_action(action);
568 /* fall through */
568 case XDP_ABORTED: 569 case XDP_ABORTED:
569 trace_xdp_exception(nic->netdev, prog, action); 570 trace_xdp_exception(nic->netdev, prog, action);
571 /* fall through */
570 case XDP_DROP: 572 case XDP_DROP:
571 /* Check if it's a recycled page, if not 573 /* Check if it's a recycled page, if not
572 * unmap the DMA mapping. 574 * unmap the DMA mapping.
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 36520634c96a..e77192683dba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2369,8 +2369,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2369 priv->enet_ver = AE_VERSION_2; 2369 priv->enet_ver = AE_VERSION_2;
2370 2370
2371 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); 2371 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2372 if (IS_ERR_OR_NULL(ae_node)) { 2372 if (!ae_node) {
2373 ret = PTR_ERR(ae_node); 2373 ret = -ENODEV;
2374 dev_err(dev, "not find ae-handle\n"); 2374 dev_err(dev, "not find ae-handle\n");
2375 goto out_read_prop_fail; 2375 goto out_read_prop_fail;
2376 } 2376 }
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e0685e630afe..c1cdbfd83bdb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2652,7 +2652,8 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2652 dev_err(&hdev->pdev->dev, 2652 dev_err(&hdev->pdev->dev,
2653 "Configure rss tc size failed, invalid TC_SIZE = %d\n", 2653 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2654 rss_size); 2654 rss_size);
2655 return -EINVAL; 2655 ret = -EINVAL;
2656 goto err;
2656 } 2657 }
2657 2658
2658 roundup_size = roundup_pow_of_two(rss_size); 2659 roundup_size = roundup_pow_of_two(rss_size);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cb8182f4fdfa..c66abd476023 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1093 * places them in a descriptor array, scrq_arr 1093 * places them in a descriptor array, scrq_arr
1094 */ 1094 */
1095 1095
1096static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, 1096static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1097 union sub_crq *scrq_arr) 1097 union sub_crq *scrq_arr)
1098{ 1098{
1099 union sub_crq hdr_desc; 1099 union sub_crq hdr_desc;
1100 int tmp_len = len; 1100 int tmp_len = len;
1101 int num_descs = 0;
1101 u8 *data, *cur; 1102 u8 *data, *cur;
1102 int tmp; 1103 int tmp;
1103 1104
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1126 tmp_len -= tmp; 1127 tmp_len -= tmp;
1127 *scrq_arr = hdr_desc; 1128 *scrq_arr = hdr_desc;
1128 scrq_arr++; 1129 scrq_arr++;
1130 num_descs++;
1129 } 1131 }
1132
1133 return num_descs;
1130} 1134}
1131 1135
1132/** 1136/**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1144 int *num_entries, u8 hdr_field) 1148 int *num_entries, u8 hdr_field)
1145{ 1149{
1146 int hdr_len[3] = {0, 0, 0}; 1150 int hdr_len[3] = {0, 0, 0};
1147 int tot_len, len; 1151 int tot_len;
1148 u8 *hdr_data = txbuff->hdr_data; 1152 u8 *hdr_data = txbuff->hdr_data;
1149 1153
1150 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, 1154 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1151 txbuff->hdr_data); 1155 txbuff->hdr_data);
1152 len = tot_len; 1156 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1153 len -= 24;
1154 if (len > 0)
1155 num_entries += len % 29 ? len / 29 + 1 : len / 29;
1156 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1157 txbuff->indir_arr + 1); 1157 txbuff->indir_arr + 1);
1158} 1158}
1159 1159
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ec8aa4562cc9..3b3983a1ffbb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1824{ 1824{
1825 struct e1000_adapter *adapter = netdev_priv(netdev); 1825 struct e1000_adapter *adapter = netdev_priv(netdev);
1826 int i; 1826 int i;
1827 char *p = NULL;
1828 const struct e1000_stats *stat = e1000_gstrings_stats; 1827 const struct e1000_stats *stat = e1000_gstrings_stats;
1829 1828
1830 e1000_update_stats(adapter); 1829 e1000_update_stats(adapter);
1831 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1830 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
1831 char *p;
1832
1832 switch (stat->type) { 1833 switch (stat->type) {
1833 case NETDEV_STATS: 1834 case NETDEV_STATS:
1834 p = (char *)netdev + stat->stat_offset; 1835 p = (char *)netdev + stat->stat_offset;
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1839 default: 1840 default:
1840 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", 1841 WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
1841 stat->type, i); 1842 stat->type, i);
1842 break; 1843 continue;
1843 } 1844 }
1844 1845
1845 if (stat->sizeof_stat == sizeof(u64)) 1846 if (stat->sizeof_stat == sizeof(u64))
1846 data[i] = *(u64 *)p; 1847 data[i] = *(u64 *)p;
1847 else 1848 else
1848 data[i] = *(u32 *)p; 1849 data[i] = *(u32 *)p;
1849
1850 stat++;
1851 } 1850 }
1852/* BUG_ON(i != E1000_STATS_LEN); */ 1851/* BUG_ON(i != E1000_STATS_LEN); */
1853} 1852}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 98375e1e1185..1982f7917a8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
520 struct net_device *netdev = adapter->netdev; 520 struct net_device *netdev = adapter->netdev;
521 u32 rctl, tctl; 521 u32 rctl, tctl;
522 522
523 netif_carrier_off(netdev);
524
525 /* disable receives in the hardware */ 523 /* disable receives in the hardware */
526 rctl = er32(RCTL); 524 rctl = er32(RCTL);
527 ew32(RCTL, rctl & ~E1000_RCTL_EN); 525 ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
537 E1000_WRITE_FLUSH(); 535 E1000_WRITE_FLUSH();
538 msleep(10); 536 msleep(10);
539 537
538 /* Set the carrier off after transmits have been disabled in the
539 * hardware, to avoid race conditions with e1000_watchdog() (which
540 * may be running concurrently to us, checking for the carrier
541 * bit to decide whether it should enable transmits again). Such
542 * a race condition would result into transmission being disabled
543 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
544 */
545 netif_carrier_off(netdev);
546
540 napi_disable(&adapter->napi); 547 napi_disable(&adapter->napi);
541 548
542 e1000_irq_disable(adapter); 549 e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 57505b1df98d..d591b3e6bd7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
298} 298}
299 299
300/** 300/**
301 * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking 301 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
302 * @hw: pointer to the HW structure 302 * @hw: pointer to the HW structure
303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) 303 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
304 * @data: word read from the Shadow RAM 304 * @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1519dfb851d0..120c68f78951 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
1038} 1038}
1039 1039
1040/** 1040/**
1041 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1042 * @rx_ring: rx descriptor ring to store buffers on
1043 * @old_buff: donor buffer to have page reused
1044 *
1045 * Synchronizes page for reuse by the adapter
1046 **/
1047static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1048 struct i40e_rx_buffer *old_buff)
1049{
1050 struct i40e_rx_buffer *new_buff;
1051 u16 nta = rx_ring->next_to_alloc;
1052
1053 new_buff = &rx_ring->rx_bi[nta];
1054
1055 /* update, and store next to alloc */
1056 nta++;
1057 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1058
1059 /* transfer page from old buffer to new buffer */
1060 new_buff->dma = old_buff->dma;
1061 new_buff->page = old_buff->page;
1062 new_buff->page_offset = old_buff->page_offset;
1063 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1064}
1065
1066/**
1041 * i40e_rx_is_programming_status - check for programming status descriptor 1067 * i40e_rx_is_programming_status - check for programming status descriptor
1042 * @qw: qword representing status_error_len in CPU ordering 1068 * @qw: qword representing status_error_len in CPU ordering
1043 * 1069 *
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 union i40e_rx_desc *rx_desc, 1097 union i40e_rx_desc *rx_desc,
1072 u64 qw) 1098 u64 qw)
1073{ 1099{
1074 u32 ntc = rx_ring->next_to_clean + 1; 1100 struct i40e_rx_buffer *rx_buffer;
1101 u32 ntc = rx_ring->next_to_clean;
1075 u8 id; 1102 u8 id;
1076 1103
1077 /* fetch, update, and store next to clean */ 1104 /* fetch, update, and store next to clean */
1105 rx_buffer = &rx_ring->rx_bi[ntc++];
1078 ntc = (ntc < rx_ring->count) ? ntc : 0; 1106 ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 rx_ring->next_to_clean = ntc; 1107 rx_ring->next_to_clean = ntc;
1080 1108
1081 prefetch(I40E_RX_DESC(rx_ring, ntc)); 1109 prefetch(I40E_RX_DESC(rx_ring, ntc));
1082 1110
1111 /* place unused page back on the ring */
1112 i40e_reuse_rx_page(rx_ring, rx_buffer);
1113 rx_ring->rx_stats.page_reuse_count++;
1114
1115 /* clear contents of buffer_info */
1116 rx_buffer->page = NULL;
1117
1083 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> 1118 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 1119 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1085 1120
@@ -1639,32 +1674,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1639} 1674}
1640 1675
1641/** 1676/**
1642 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1643 * @rx_ring: rx descriptor ring to store buffers on
1644 * @old_buff: donor buffer to have page reused
1645 *
1646 * Synchronizes page for reuse by the adapter
1647 **/
1648static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1649 struct i40e_rx_buffer *old_buff)
1650{
1651 struct i40e_rx_buffer *new_buff;
1652 u16 nta = rx_ring->next_to_alloc;
1653
1654 new_buff = &rx_ring->rx_bi[nta];
1655
1656 /* update, and store next to alloc */
1657 nta++;
1658 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1659
1660 /* transfer page from old buffer to new buffer */
1661 new_buff->dma = old_buff->dma;
1662 new_buff->page = old_buff->page;
1663 new_buff->page_offset = old_buff->page_offset;
1664 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1665}
1666
1667/**
1668 * i40e_page_is_reusable - check if any reuse is possible 1677 * i40e_page_is_reusable - check if any reuse is possible
1669 * @page: page struct to check 1678 * @page: page struct to check
1670 * 1679 *
@@ -2093,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2093 2102
2094 if (unlikely(i40e_rx_is_programming_status(qword))) { 2103 if (unlikely(i40e_rx_is_programming_status(qword))) {
2095 i40e_clean_programming_status(rx_ring, rx_desc, qword); 2104 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2105 cleaned_count++;
2096 continue; 2106 continue;
2097 } 2107 }
2098 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 2108 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -2260,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2260 goto enable_int; 2270 goto enable_int;
2261 } 2271 }
2262 2272
2263 if (ITR_IS_DYNAMIC(tx_itr_setting)) { 2273 if (ITR_IS_DYNAMIC(rx_itr_setting)) {
2264 rx = i40e_set_new_dynamic_itr(&q_vector->rx); 2274 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2265 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); 2275 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2266 } 2276 }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd4a46b03cc8..ea69af267d63 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5326,7 +5326,7 @@ dma_error:
5326 DMA_TO_DEVICE); 5326 DMA_TO_DEVICE);
5327 dma_unmap_len_set(tx_buffer, len, 0); 5327 dma_unmap_len_set(tx_buffer, len, 0);
5328 5328
5329 if (i--) 5329 if (i-- == 0)
5330 i += tx_ring->count; 5330 i += tx_ring->count;
5331 tx_buffer = &tx_ring->tx_buffer_info[i]; 5331 tx_buffer = &tx_ring->tx_buffer_info[i];
5332 } 5332 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 523f9d05a810..8a32eb7d47b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
175 **/ 175 **/
176static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) 176static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
177{ 177{
178#ifndef CONFIG_SPARC
179 u32 regval;
180 u32 i;
181#endif
182 s32 ret_val; 178 s32 ret_val;
183 179
184 ret_val = ixgbe_start_hw_generic(hw); 180 ret_val = ixgbe_start_hw_generic(hw);
185
186#ifndef CONFIG_SPARC
187 /* Disable relaxed ordering */
188 for (i = 0; ((i < hw->mac.max_tx_queues) &&
189 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
190 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
191 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
192 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
193 }
194
195 for (i = 0; ((i < hw->mac.max_rx_queues) &&
196 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
197 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
198 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
199 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
200 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
201 }
202#endif
203 if (ret_val) 181 if (ret_val)
204 return ret_val; 182 return ret_val;
205 183
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2c19070d2a0b..6e6ab6f6875e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
366 } 366 }
367 IXGBE_WRITE_FLUSH(hw); 367 IXGBE_WRITE_FLUSH(hw);
368 368
369#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
370 /* Disable relaxed ordering */
371 for (i = 0; i < hw->mac.max_tx_queues; i++) {
372 u32 regval;
373
374 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
375 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
377 }
378
379 for (i = 0; i < hw->mac.max_rx_queues; i++) {
380 u32 regval;
381
382 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
383 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
384 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
385 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
386 }
387#endif
388 return 0; 369 return 0;
389} 370}
390 371
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 72c565712a5f..c3e7a8191128 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1048{ 1048{
1049 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1049 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1050 struct ixgbe_ring *temp_ring; 1050 struct ixgbe_ring *temp_ring;
1051 int i, err = 0; 1051 int i, j, err = 0;
1052 u32 new_rx_count, new_tx_count; 1052 u32 new_rx_count, new_tx_count;
1053 1053
1054 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 1054 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1085 } 1085 }
1086 1086
1087 /* allocate temporary buffer to store rings in */ 1087 /* allocate temporary buffer to store rings in */
1088 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); 1088 i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
1089 i = max_t(int, i, adapter->num_xdp_queues); 1089 adapter->num_rx_queues);
1090 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); 1090 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
1091 1091
1092 if (!temp_ring) { 1092 if (!temp_ring) {
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1118 } 1118 }
1119 } 1119 }
1120 1120
1121 for (i = 0; i < adapter->num_xdp_queues; i++) { 1121 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1122 memcpy(&temp_ring[i], adapter->xdp_ring[i], 1122 memcpy(&temp_ring[i], adapter->xdp_ring[j],
1123 sizeof(struct ixgbe_ring)); 1123 sizeof(struct ixgbe_ring));
1124 1124
1125 temp_ring[i].count = new_tx_count; 1125 temp_ring[i].count = new_tx_count;
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
1139 memcpy(adapter->tx_ring[i], &temp_ring[i], 1139 memcpy(adapter->tx_ring[i], &temp_ring[i],
1140 sizeof(struct ixgbe_ring)); 1140 sizeof(struct ixgbe_ring));
1141 } 1141 }
1142 for (i = 0; i < adapter->num_xdp_queues; i++) { 1142 for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
1143 ixgbe_free_tx_resources(adapter->xdp_ring[i]); 1143 ixgbe_free_tx_resources(adapter->xdp_ring[j]);
1144 1144
1145 memcpy(adapter->xdp_ring[i], &temp_ring[i], 1145 memcpy(adapter->xdp_ring[j], &temp_ring[i],
1146 sizeof(struct ixgbe_ring)); 1146 sizeof(struct ixgbe_ring));
1147 } 1147 }
1148 1148
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d962368d08d0..6d5f31e94358 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
4881 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) 4881 IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
4882 return; 4882 return;
4883 4883
4884 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; 4884 vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
4885 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); 4885 IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
4886 4886
4887 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) 4887 if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
8020 return 0; 8020 return 0;
8021dma_error: 8021dma_error:
8022 dev_err(tx_ring->dev, "TX DMA map failed\n"); 8022 dev_err(tx_ring->dev, "TX DMA map failed\n");
8023 tx_buffer = &tx_ring->tx_buffer_info[i];
8024 8023
8025 /* clear dma mappings for failed tx_buffer_info map */ 8024 /* clear dma mappings for failed tx_buffer_info map */
8026 while (tx_buffer != first) { 8025 for (;;) {
8026 tx_buffer = &tx_ring->tx_buffer_info[i];
8027 if (dma_unmap_len(tx_buffer, len)) 8027 if (dma_unmap_len(tx_buffer, len))
8028 dma_unmap_page(tx_ring->dev, 8028 dma_unmap_page(tx_ring->dev,
8029 dma_unmap_addr(tx_buffer, dma), 8029 dma_unmap_addr(tx_buffer, dma),
8030 dma_unmap_len(tx_buffer, len), 8030 dma_unmap_len(tx_buffer, len),
8031 DMA_TO_DEVICE); 8031 DMA_TO_DEVICE);
8032 dma_unmap_len_set(tx_buffer, len, 0); 8032 dma_unmap_len_set(tx_buffer, len, 0);
8033 8033 if (tx_buffer == first)
8034 if (i--) 8034 break;
8035 if (i == 0)
8035 i += tx_ring->count; 8036 i += tx_ring->count;
8036 tx_buffer = &tx_ring->tx_buffer_info[i]; 8037 i--;
8037 } 8038 }
8038 8039
8039 if (dma_unmap_len(tx_buffer, len))
8040 dma_unmap_single(tx_ring->dev,
8041 dma_unmap_addr(tx_buffer, dma),
8042 dma_unmap_len(tx_buffer, len),
8043 DMA_TO_DEVICE);
8044 dma_unmap_len_set(tx_buffer, len, 0);
8045
8046 dev_kfree_skb_any(first->skb); 8040 dev_kfree_skb_any(first->skb);
8047 first->skb = NULL; 8041 first->skb = NULL;
8048 8042
@@ -8529,6 +8523,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
8529 return ixgbe_ptp_set_ts_config(adapter, req); 8523 return ixgbe_ptp_set_ts_config(adapter, req);
8530 case SIOCGHWTSTAMP: 8524 case SIOCGHWTSTAMP:
8531 return ixgbe_ptp_get_ts_config(adapter, req); 8525 return ixgbe_ptp_get_ts_config(adapter, req);
8526 case SIOCGMIIPHY:
8527 if (!adapter->hw.phy.ops.read_reg)
8528 return -EOPNOTSUPP;
8529 /* fall through */
8532 default: 8530 default:
8533 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 8531 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
8534 } 8532 }
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index dd0ee2691c86..a37af5813f33 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -333,7 +333,7 @@
333#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 333#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
334#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) 334#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
335#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 335#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
336#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 336#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
337#define MVPP2_GMAC_DISABLE_PADDING BIT(5) 337#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
338#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 338#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
339#define MVPP2_GMAC_AUTONEG_CONFIG 0xc 339#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
@@ -676,6 +676,7 @@ enum mvpp2_tag_type {
676#define MVPP2_PRS_RI_L3_MCAST BIT(15) 676#define MVPP2_PRS_RI_L3_MCAST BIT(15)
677#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 677#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
678#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 678#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
679#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
679#define MVPP2_PRS_RI_UDF3_MASK 0x300000 680#define MVPP2_PRS_RI_UDF3_MASK 0x300000
680#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 681#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
681#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 682#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
@@ -792,6 +793,7 @@ struct mvpp2 {
792 struct clk *pp_clk; 793 struct clk *pp_clk;
793 struct clk *gop_clk; 794 struct clk *gop_clk;
794 struct clk *mg_clk; 795 struct clk *mg_clk;
796 struct clk *axi_clk;
795 797
796 /* List of pointers to port structures */ 798 /* List of pointers to port structures */
797 struct mvpp2_port **port_list; 799 struct mvpp2_port **port_list;
@@ -1165,6 +1167,11 @@ struct mvpp2_bm_pool {
1165 u32 port_map; 1167 u32 port_map;
1166}; 1168};
1167 1169
1170#define IS_TSO_HEADER(txq_pcpu, addr) \
1171 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1172 (addr) < (txq_pcpu)->tso_headers_dma + \
1173 (txq_pcpu)->size * TSO_HEADER_SIZE)
1174
1168/* Queue modes */ 1175/* Queue modes */
1169#define MVPP2_QDIST_SINGLE_MODE 0 1176#define MVPP2_QDIST_SINGLE_MODE 0
1170#define MVPP2_QDIST_MULTI_MODE 1 1177#define MVPP2_QDIST_MULTI_MODE 1
@@ -1532,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1532 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); 1539 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1533 u16 tcam_data; 1540 u16 tcam_data;
1534 1541
1535 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; 1542 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
1536 if (tcam_data != data) 1543 if (tcam_data != data)
1537 return false; 1544 return false;
1538 return true; 1545 return true;
@@ -2315,7 +2322,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2315 (proto != IPPROTO_IGMP)) 2322 (proto != IPPROTO_IGMP))
2316 return -EINVAL; 2323 return -EINVAL;
2317 2324
2318 /* Fragmented packet */ 2325 /* Not fragmented packet */
2319 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2326 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2320 MVPP2_PE_LAST_FREE_TID); 2327 MVPP2_PE_LAST_FREE_TID);
2321 if (tid < 0) 2328 if (tid < 0)
@@ -2334,8 +2341,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2334 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2341 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2335 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 2342 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2336 MVPP2_PRS_IPV4_DIP_AI_BIT); 2343 MVPP2_PRS_IPV4_DIP_AI_BIT);
2337 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, 2344 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2338 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 2345
2346 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2347 MVPP2_PRS_TCAM_PROTO_MASK_L);
2348 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2349 MVPP2_PRS_TCAM_PROTO_MASK);
2339 2350
2340 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); 2351 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2341 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 2352 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
@@ -2346,7 +2357,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2346 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2357 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2347 mvpp2_prs_hw_write(priv, &pe); 2358 mvpp2_prs_hw_write(priv, &pe);
2348 2359
2349 /* Not fragmented packet */ 2360 /* Fragmented packet */
2350 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2361 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2351 MVPP2_PE_LAST_FREE_TID); 2362 MVPP2_PE_LAST_FREE_TID);
2352 if (tid < 0) 2363 if (tid < 0)
@@ -2358,8 +2369,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2358 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2369 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2359 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 2370 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2360 2371
2361 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); 2372 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2362 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); 2373 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2374
2375 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2376 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2363 2377
2364 /* Update shadow table and hw entry */ 2378 /* Update shadow table and hw entry */
2365 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2379 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
@@ -2600,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2600 /* place holders only - no ports */ 2614 /* place holders only - no ports */
2601 mvpp2_prs_mac_drop_all_set(priv, 0, false); 2615 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2602 mvpp2_prs_mac_promisc_set(priv, 0, false); 2616 mvpp2_prs_mac_promisc_set(priv, 0, false);
2603 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 2617 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
2604 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 2618 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
2605} 2619}
2606 2620
2607/* Set default entries for various types of dsa packets */ 2621/* Set default entries for various types of dsa packets */
@@ -3382,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3382 struct mvpp2_prs_entry *pe; 3396 struct mvpp2_prs_entry *pe;
3383 int tid; 3397 int tid;
3384 3398
3385 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3399 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3386 if (!pe) 3400 if (!pe)
3387 return NULL; 3401 return NULL;
3388 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3402 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -3444,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3444 if (tid < 0) 3458 if (tid < 0)
3445 return tid; 3459 return tid;
3446 3460
3447 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3461 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
3448 if (!pe) 3462 if (!pe)
3449 return -ENOMEM; 3463 return -ENOMEM;
3450 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3464 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -4591,7 +4605,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4591 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; 4605 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4592 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4606 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4593 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 4607 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4594 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4595 } 4608 }
4596 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4609 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4597 4610
@@ -5313,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5313 struct mvpp2_txq_pcpu_buf *tx_buf = 5326 struct mvpp2_txq_pcpu_buf *tx_buf =
5314 txq_pcpu->buffs + txq_pcpu->txq_get_index; 5327 txq_pcpu->buffs + txq_pcpu->txq_get_index;
5315 5328
5316 dma_unmap_single(port->dev->dev.parent, tx_buf->dma, 5329 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5317 tx_buf->size, DMA_TO_DEVICE); 5330 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5331 tx_buf->size, DMA_TO_DEVICE);
5318 if (tx_buf->skb) 5332 if (tx_buf->skb)
5319 dev_kfree_skb_any(tx_buf->skb); 5333 dev_kfree_skb_any(tx_buf->skb);
5320 5334
@@ -5601,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5601 5615
5602 txq_pcpu->tso_headers = 5616 txq_pcpu->tso_headers =
5603 dma_alloc_coherent(port->dev->dev.parent, 5617 dma_alloc_coherent(port->dev->dev.parent,
5604 MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, 5618 txq_pcpu->size * TSO_HEADER_SIZE,
5605 &txq_pcpu->tso_headers_dma, 5619 &txq_pcpu->tso_headers_dma,
5606 GFP_KERNEL); 5620 GFP_KERNEL);
5607 if (!txq_pcpu->tso_headers) 5621 if (!txq_pcpu->tso_headers)
@@ -5615,7 +5629,7 @@ cleanup:
5615 kfree(txq_pcpu->buffs); 5629 kfree(txq_pcpu->buffs);
5616 5630
5617 dma_free_coherent(port->dev->dev.parent, 5631 dma_free_coherent(port->dev->dev.parent,
5618 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5632 txq_pcpu->size * TSO_HEADER_SIZE,
5619 txq_pcpu->tso_headers, 5633 txq_pcpu->tso_headers,
5620 txq_pcpu->tso_headers_dma); 5634 txq_pcpu->tso_headers_dma);
5621 } 5635 }
@@ -5639,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
5639 kfree(txq_pcpu->buffs); 5653 kfree(txq_pcpu->buffs);
5640 5654
5641 dma_free_coherent(port->dev->dev.parent, 5655 dma_free_coherent(port->dev->dev.parent,
5642 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 5656 txq_pcpu->size * TSO_HEADER_SIZE,
5643 txq_pcpu->tso_headers, 5657 txq_pcpu->tso_headers,
5644 txq_pcpu->tso_headers_dma); 5658 txq_pcpu->tso_headers_dma);
5645 } 5659 }
@@ -6204,12 +6218,15 @@ static inline void
6204tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 6218tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
6205 struct mvpp2_tx_desc *desc) 6219 struct mvpp2_tx_desc *desc)
6206{ 6220{
6221 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6222
6207 dma_addr_t buf_dma_addr = 6223 dma_addr_t buf_dma_addr =
6208 mvpp2_txdesc_dma_addr_get(port, desc); 6224 mvpp2_txdesc_dma_addr_get(port, desc);
6209 size_t buf_sz = 6225 size_t buf_sz =
6210 mvpp2_txdesc_size_get(port, desc); 6226 mvpp2_txdesc_size_get(port, desc);
6211 dma_unmap_single(port->dev->dev.parent, buf_dma_addr, 6227 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6212 buf_sz, DMA_TO_DEVICE); 6228 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6229 buf_sz, DMA_TO_DEVICE);
6213 mvpp2_txq_desc_put(txq); 6230 mvpp2_txq_desc_put(txq);
6214} 6231}
6215 6232
@@ -6482,7 +6499,7 @@ out:
6482 } 6499 }
6483 6500
6484 /* Finalize TX processing */ 6501 /* Finalize TX processing */
6485 if (txq_pcpu->count >= txq->done_pkts_coal) 6502 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
6486 mvpp2_txq_done(port, txq, txq_pcpu); 6503 mvpp2_txq_done(port, txq, txq_pcpu);
6487 6504
6488 /* Set the timer in case not all frags were processed */ 6505 /* Set the timer in case not all frags were processed */
@@ -7496,7 +7513,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
7496/* Ports initialization */ 7513/* Ports initialization */
7497static int mvpp2_port_probe(struct platform_device *pdev, 7514static int mvpp2_port_probe(struct platform_device *pdev,
7498 struct device_node *port_node, 7515 struct device_node *port_node,
7499 struct mvpp2 *priv) 7516 struct mvpp2 *priv, int index)
7500{ 7517{
7501 struct device_node *phy_node; 7518 struct device_node *phy_node;
7502 struct phy *comphy; 7519 struct phy *comphy;
@@ -7670,7 +7687,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
7670 } 7687 }
7671 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 7688 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7672 7689
7673 priv->port_list[id] = port; 7690 priv->port_list[index] = port;
7674 return 0; 7691 return 0;
7675 7692
7676err_free_port_pcpu: 7693err_free_port_pcpu:
@@ -7963,6 +7980,18 @@ static int mvpp2_probe(struct platform_device *pdev)
7963 err = clk_prepare_enable(priv->mg_clk); 7980 err = clk_prepare_enable(priv->mg_clk);
7964 if (err < 0) 7981 if (err < 0)
7965 goto err_gop_clk; 7982 goto err_gop_clk;
7983
7984 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
7985 if (IS_ERR(priv->axi_clk)) {
7986 err = PTR_ERR(priv->axi_clk);
7987 if (err == -EPROBE_DEFER)
7988 goto err_gop_clk;
7989 priv->axi_clk = NULL;
7990 } else {
7991 err = clk_prepare_enable(priv->axi_clk);
7992 if (err < 0)
7993 goto err_gop_clk;
7994 }
7966 } 7995 }
7967 7996
7968 /* Get system's tclk rate */ 7997 /* Get system's tclk rate */
@@ -8005,16 +8034,19 @@ static int mvpp2_probe(struct platform_device *pdev)
8005 } 8034 }
8006 8035
8007 /* Initialize ports */ 8036 /* Initialize ports */
8037 i = 0;
8008 for_each_available_child_of_node(dn, port_node) { 8038 for_each_available_child_of_node(dn, port_node) {
8009 err = mvpp2_port_probe(pdev, port_node, priv); 8039 err = mvpp2_port_probe(pdev, port_node, priv, i);
8010 if (err < 0) 8040 if (err < 0)
8011 goto err_mg_clk; 8041 goto err_mg_clk;
8042 i++;
8012 } 8043 }
8013 8044
8014 platform_set_drvdata(pdev, priv); 8045 platform_set_drvdata(pdev, priv);
8015 return 0; 8046 return 0;
8016 8047
8017err_mg_clk: 8048err_mg_clk:
8049 clk_disable_unprepare(priv->axi_clk);
8018 if (priv->hw_version == MVPP22) 8050 if (priv->hw_version == MVPP22)
8019 clk_disable_unprepare(priv->mg_clk); 8051 clk_disable_unprepare(priv->mg_clk);
8020err_gop_clk: 8052err_gop_clk:
@@ -8052,6 +8084,7 @@ static int mvpp2_remove(struct platform_device *pdev)
8052 aggr_txq->descs_dma); 8084 aggr_txq->descs_dma);
8053 } 8085 }
8054 8086
8087 clk_disable_unprepare(priv->axi_clk);
8055 clk_disable_unprepare(priv->mg_clk); 8088 clk_disable_unprepare(priv->mg_clk);
8056 clk_disable_unprepare(priv->pp_clk); 8089 clk_disable_unprepare(priv->pp_clk);
8057 clk_disable_unprepare(priv->gop_clk); 8090 clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ff60cf7342ca..fc281712869b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
77 list_add_tail(&delayed_event->list, &priv->waiting_events_list); 77 list_add_tail(&delayed_event->list, &priv->waiting_events_list);
78} 78}
79 79
80static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, 80static void delayed_event_release(struct mlx5_device_context *dev_ctx,
81 struct mlx5_core_dev *dev, 81 struct mlx5_priv *priv)
82 struct mlx5_priv *priv)
83{ 82{
83 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
84 struct mlx5_delayed_event *de; 84 struct mlx5_delayed_event *de;
85 struct mlx5_delayed_event *n; 85 struct mlx5_delayed_event *n;
86 struct list_head temp;
86 87
87 /* stop delaying events */ 88 INIT_LIST_HEAD(&temp);
88 priv->is_accum_events = false; 89
90 spin_lock_irq(&priv->ctx_lock);
89 91
90 /* fire all accumulated events before new event comes */ 92 priv->is_accum_events = false;
91 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { 93 list_splice_init(&priv->waiting_events_list, &temp);
94 if (!dev_ctx->context)
95 goto out;
96 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
92 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); 97 dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
98
99out:
100 spin_unlock_irq(&priv->ctx_lock);
101
102 list_for_each_entry_safe(de, n, &temp, list) {
93 list_del(&de->list); 103 list_del(&de->list);
94 kfree(de); 104 kfree(de);
95 } 105 }
96} 106}
97 107
98static void cleanup_delayed_evets(struct mlx5_priv *priv) 108/* accumulating events that can come after mlx5_ib calls to
109 * ib_register_device, till adding that interface to the events list.
110 */
111static void delayed_event_start(struct mlx5_priv *priv)
99{ 112{
100 struct mlx5_delayed_event *de;
101 struct mlx5_delayed_event *n;
102
103 spin_lock_irq(&priv->ctx_lock); 113 spin_lock_irq(&priv->ctx_lock);
104 priv->is_accum_events = false; 114 priv->is_accum_events = true;
105 list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
106 list_del(&de->list);
107 kfree(de);
108 }
109 spin_unlock_irq(&priv->ctx_lock); 115 spin_unlock_irq(&priv->ctx_lock);
110} 116}
111 117
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
122 return; 128 return;
123 129
124 dev_ctx->intf = intf; 130 dev_ctx->intf = intf;
125 /* accumulating events that can come after mlx5_ib calls to
126 * ib_register_device, till adding that interface to the events list.
127 */
128 131
129 priv->is_accum_events = true; 132 delayed_event_start(priv);
130 133
131 dev_ctx->context = intf->add(dev); 134 dev_ctx->context = intf->add(dev);
132 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 135 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
137 spin_lock_irq(&priv->ctx_lock); 140 spin_lock_irq(&priv->ctx_lock);
138 list_add_tail(&dev_ctx->list, &priv->ctx_list); 141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
139 142
140 fire_delayed_event_locked(dev_ctx, dev, priv);
141
142#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 143#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
143 if (dev_ctx->intf->pfault) { 144 if (dev_ctx->intf->pfault) {
144 if (priv->pfault) { 145 if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
150 } 151 }
151#endif 152#endif
152 spin_unlock_irq(&priv->ctx_lock); 153 spin_unlock_irq(&priv->ctx_lock);
153 } else {
154 kfree(dev_ctx);
155 /* delete all accumulated events */
156 cleanup_delayed_evets(priv);
157 } 154 }
155
156 delayed_event_release(dev_ctx, priv);
157
158 if (!dev_ctx->context)
159 kfree(dev_ctx);
158} 160}
159 161
160static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, 162static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
205 if (!dev_ctx) 207 if (!dev_ctx)
206 return; 208 return;
207 209
210 delayed_event_start(priv);
208 if (intf->attach) { 211 if (intf->attach) {
209 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
210 return; 213 goto out;
211 intf->attach(dev, dev_ctx->context); 214 intf->attach(dev, dev_ctx->context);
212 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 215 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
213 } else { 216 } else {
214 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 217 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
215 return; 218 goto out;
216 dev_ctx->context = intf->add(dev); 219 dev_ctx->context = intf->add(dev);
217 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 220 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
218 } 221 }
222
223out:
224 delayed_event_release(dev_ctx, priv);
219} 225}
220 226
221void mlx5_attach_device(struct mlx5_core_dev *dev) 227void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
414 if (priv->is_accum_events) 420 if (priv->is_accum_events)
415 add_delayed_event(priv, dev, event, param); 421 add_delayed_event(priv, dev, event, param);
416 422
423 /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
424 * still in priv->ctx_list. In this case, only notify the dev_ctx if its
425 * ADDED or ATTACHED bit are set.
426 */
417 list_for_each_entry(dev_ctx, &priv->ctx_list, list) 427 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
418 if (dev_ctx->intf->event) 428 if (dev_ctx->intf->event &&
429 (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
430 test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
419 dev_ctx->intf->event(dev, dev_ctx->context, event, param); 431 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
420 432
421 spin_unlock_irqrestore(&priv->ctx_lock, flags); 433 spin_unlock_irqrestore(&priv->ctx_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 1e3a6c3e4132..80eef4163f52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
139 {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} 139 {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
140 140
141TRACE_EVENT(mlx5_fs_set_fte, 141TRACE_EVENT(mlx5_fs_set_fte,
142 TP_PROTO(const struct fs_fte *fte, bool new_fte), 142 TP_PROTO(const struct fs_fte *fte, int new_fte),
143 TP_ARGS(fte, new_fte), 143 TP_ARGS(fte, new_fte),
144 TP_STRUCT__entry( 144 TP_STRUCT__entry(
145 __field(const struct fs_fte *, fte) 145 __field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
149 __field(u32, action) 149 __field(u32, action)
150 __field(u32, flow_tag) 150 __field(u32, flow_tag)
151 __field(u8, mask_enable) 151 __field(u8, mask_enable)
152 __field(bool, new_fte) 152 __field(int, new_fte)
153 __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) 153 __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
154 __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) 154 __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
155 __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) 155 __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c1d384fca4dc..51c4cc00a186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -41,6 +41,11 @@
41#define MLX5E_CEE_STATE_UP 1 41#define MLX5E_CEE_STATE_UP 1
42#define MLX5E_CEE_STATE_DOWN 0 42#define MLX5E_CEE_STATE_DOWN 0
43 43
44enum {
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
47};
48
44/* If dcbx mode is non-host set the dcbx mode to host. 49/* If dcbx mode is non-host set the dcbx mode to host.
45 */ 50 */
46static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, 51static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
85{ 90{
86 struct mlx5e_priv *priv = netdev_priv(netdev); 91 struct mlx5e_priv *priv = netdev_priv(netdev);
87 struct mlx5_core_dev *mdev = priv->mdev; 92 struct mlx5_core_dev *mdev = priv->mdev;
93 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
94 bool is_tc_group_6_exist = false;
95 bool is_zero_bw_ets_tc = false;
88 int err = 0; 96 int err = 0;
89 int i; 97 int i;
90 98
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
96 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); 104 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
97 if (err) 105 if (err)
98 return err; 106 return err;
99 }
100 107
101 for (i = 0; i < ets->ets_cap; i++) { 108 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
109 if (err)
110 return err;
111
102 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); 112 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
103 if (err) 113 if (err)
104 return err; 114 return err;
115
116 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
117 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
118 is_zero_bw_ets_tc = true;
119
120 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
121 is_tc_group_6_exist = true;
122 }
123
124 /* Report 0% ets tc if exits*/
125 if (is_zero_bw_ets_tc) {
126 for (i = 0; i < ets->ets_cap; i++)
127 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
128 ets->tc_tx_bw[i] = 0;
129 }
130
131 /* Update tc_tsa based on fw setting*/
132 for (i = 0; i < ets->ets_cap; i++) {
105 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) 133 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
106 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; 134 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
135 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
136 !is_tc_group_6_exist)
137 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
107 } 138 }
108
109 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); 139 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
110 140
111 return err; 141 return err;
112} 142}
113 143
114enum {
115 MLX5E_VENDOR_TC_GROUP_NUM = 7,
116 MLX5E_ETS_TC_GROUP_NUM = 0,
117};
118
119static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) 144static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
120{ 145{
121 bool any_tc_mapped_to_ets = false; 146 bool any_tc_mapped_to_ets = false;
147 bool ets_zero_bw = false;
122 int strict_group; 148 int strict_group;
123 int i; 149 int i;
124 150
125 for (i = 0; i <= max_tc; i++) 151 for (i = 0; i <= max_tc; i++) {
126 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 152 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
127 any_tc_mapped_to_ets = true; 153 any_tc_mapped_to_ets = true;
154 if (!ets->tc_tx_bw[i])
155 ets_zero_bw = true;
156 }
157 }
128 158
129 strict_group = any_tc_mapped_to_ets ? 1 : 0; 159 /* strict group has higher priority than ets group */
160 strict_group = MLX5E_LOWEST_PRIO_GROUP;
161 if (any_tc_mapped_to_ets)
162 strict_group++;
163 if (ets_zero_bw)
164 strict_group++;
130 165
131 for (i = 0; i <= max_tc; i++) { 166 for (i = 0; i <= max_tc; i++) {
132 switch (ets->tc_tsa[i]) { 167 switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
137 tc_group[i] = strict_group++; 172 tc_group[i] = strict_group++;
138 break; 173 break;
139 case IEEE_8021QAZ_TSA_ETS: 174 case IEEE_8021QAZ_TSA_ETS:
140 tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; 175 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
176 if (ets->tc_tx_bw[i] && ets_zero_bw)
177 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
141 break; 178 break;
142 } 179 }
143 } 180 }
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
146static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, 183static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
147 u8 *tc_group, int max_tc) 184 u8 *tc_group, int max_tc)
148{ 185{
186 int bw_for_ets_zero_bw_tc = 0;
187 int last_ets_zero_bw_tc = -1;
188 int num_ets_zero_bw = 0;
149 int i; 189 int i;
150 190
151 for (i = 0; i <= max_tc; i++) { 191 for (i = 0; i <= max_tc; i++) {
192 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
193 !ets->tc_tx_bw[i]) {
194 num_ets_zero_bw++;
195 last_ets_zero_bw_tc = i;
196 }
197 }
198
199 if (num_ets_zero_bw)
200 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
201
202 for (i = 0; i <= max_tc; i++) {
152 switch (ets->tc_tsa[i]) { 203 switch (ets->tc_tsa[i]) {
153 case IEEE_8021QAZ_TSA_VENDOR: 204 case IEEE_8021QAZ_TSA_VENDOR:
154 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 205 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
157 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 208 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
158 break; 209 break;
159 case IEEE_8021QAZ_TSA_ETS: 210 case IEEE_8021QAZ_TSA_ETS:
160 tc_tx_bw[i] = ets->tc_tx_bw[i]; 211 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
212 ets->tc_tx_bw[i] :
213 bw_for_ets_zero_bw_tc;
161 break; 214 break;
162 } 215 }
163 } 216 }
217
218 /* Make sure the total bw for ets zero bw group is 100% */
219 if (last_ets_zero_bw_tc != -1)
220 tc_tx_bw[last_ets_zero_bw_tc] +=
221 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
164} 222}
165 223
224/* If there are ETS BW 0,
225 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
226 * Set group #0 to all the ETS BW 0 tcs and
227 * equally splits the 100% BW between them
228 * Report both group #0 and #1 as ETS type.
229 * All the tcs in group #0 will be reported with 0% BW.
230 */
166int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) 231int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
167{ 232{
168 struct mlx5_core_dev *mdev = priv->mdev; 233 struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
188 return err; 253 return err;
189 254
190 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); 255 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
191
192 return err; 256 return err;
193} 257}
194 258
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
209 } 273 }
210 274
211 /* Validate Bandwidth Sum */ 275 /* Validate Bandwidth Sum */
212 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 276 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
213 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { 277 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
214 if (!ets->tc_tx_bw[i]) {
215 netdev_err(netdev,
216 "Failed to validate ETS: BW 0 is illegal\n");
217 return -EINVAL;
218 }
219
220 bw_sum += ets->tc_tx_bw[i]; 278 bw_sum += ets->tc_tx_bw[i];
221 }
222 }
223 279
224 if (bw_sum != 0 && bw_sum != 100) { 280 if (bw_sum != 0 && bw_sum != 100) {
225 netdev_err(netdev, 281 netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
533static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, 589static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
534 int pgid, u8 *bw_pct) 590 int pgid, u8 *bw_pct)
535{ 591{
536 struct mlx5e_priv *priv = netdev_priv(netdev); 592 struct ieee_ets ets;
537 struct mlx5_core_dev *mdev = priv->mdev;
538 593
539 if (pgid >= CEE_DCBX_MAX_PGS) { 594 if (pgid >= CEE_DCBX_MAX_PGS) {
540 netdev_err(netdev, 595 netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
542 return; 597 return;
543 } 598 }
544 599
545 if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) 600 mlx5e_dcbnl_ieee_getets(netdev, &ets);
546 *bw_pct = 0; 601 *bw_pct = ets.tc_tx_bw[pgid];
547} 602}
548 603
549static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, 604static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
739 ets.prio_tc[i] = i; 794 ets.prio_tc[i] = i;
740 } 795 }
741 796
742 memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
743
744 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ 797 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
745 ets.prio_tc[0] = 1; 798 ets.prio_tc[0] = 1;
746 ets.prio_tc[1] = 0; 799 ets.prio_tc[1] = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f11fd07ac4dd..850cdc980ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
291 priv->fs.vlan.filter_disabled = false; 291 priv->fs.vlan.filter_disabled = false;
292 if (priv->netdev->flags & IFF_PROMISC) 292 if (priv->netdev->flags & IFF_PROMISC)
293 return; 293 return;
294 mlx5e_del_any_vid_rules(priv); 294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295} 295}
296 296
297void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 297void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
302 priv->fs.vlan.filter_disabled = true; 302 priv->fs.vlan.filter_disabled = true;
303 if (priv->netdev->flags & IFF_PROMISC) 303 if (priv->netdev->flags & IFF_PROMISC)
304 return; 304 return;
305 mlx5e_add_any_vid_rules(priv); 305 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
306} 306}
307 307
308int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 308int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dfc29720ab77..cc11bbbd0309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
184 struct mlx5e_sw_stats temp, *s = &temp; 184 struct mlx5e_sw_stats temp, *s = &temp;
185 struct mlx5e_rq_stats *rq_stats; 185 struct mlx5e_rq_stats *rq_stats;
186 struct mlx5e_sq_stats *sq_stats; 186 struct mlx5e_sq_stats *sq_stats;
187 u64 tx_offload_none = 0;
188 int i, j; 187 int i, j;
189 188
190 memset(s, 0, sizeof(*s)); 189 memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
199 s->rx_lro_bytes += rq_stats->lro_bytes; 198 s->rx_lro_bytes += rq_stats->lro_bytes;
200 s->rx_csum_none += rq_stats->csum_none; 199 s->rx_csum_none += rq_stats->csum_none;
201 s->rx_csum_complete += rq_stats->csum_complete; 200 s->rx_csum_complete += rq_stats->csum_complete;
201 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
202 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 202 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
203 s->rx_xdp_drop += rq_stats->xdp_drop; 203 s->rx_xdp_drop += rq_stats->xdp_drop;
204 s->rx_xdp_tx += rq_stats->xdp_tx; 204 s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
229 s->tx_queue_dropped += sq_stats->dropped; 229 s->tx_queue_dropped += sq_stats->dropped;
230 s->tx_xmit_more += sq_stats->xmit_more; 230 s->tx_xmit_more += sq_stats->xmit_more;
231 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 231 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
232 tx_offload_none += sq_stats->csum_none; 232 s->tx_csum_none += sq_stats->csum_none;
233 s->tx_csum_partial += sq_stats->csum_partial;
233 } 234 }
234 } 235 }
235 236
236 /* Update calculated offload counters */
237 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
238 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
239
240 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 237 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
241 priv->stats.pport.phy_counters, 238 priv->stats.pport.phy_counters,
242 counter_set.phys_layer_cntrs.link_down_events); 239 counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
3333 3330
3334 err = feature_handler(netdev, enable); 3331 err = feature_handler(netdev, enable);
3335 if (err) { 3332 if (err) {
3336 netdev_err(netdev, "%s feature 0x%llx failed err %d\n", 3333 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3337 enable ? "Enable" : "Disable", feature, err); 3334 enable ? "Enable" : "Disable", &feature, err);
3338 return err; 3335 return err;
3339 } 3336 }
3340 3337
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f1dd638384d3..15a1687483cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
627 627
628 if (lro) { 628 if (lro) {
629 skb->ip_summed = CHECKSUM_UNNECESSARY; 629 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 rq->stats.csum_unnecessary++;
630 return; 631 return;
631 } 632 }
632 633
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
644 skb->csum_level = 1; 645 skb->csum_level = 1;
645 skb->encapsulation = 1; 646 skb->encapsulation = 1;
646 rq->stats.csum_unnecessary_inner++; 647 rq->stats.csum_unnecessary_inner++;
648 return;
647 } 649 }
650 rq->stats.csum_unnecessary++;
648 return; 651 return;
649 } 652 }
650csum_none: 653csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 6d199ffb1c0b..f8637213afc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
68 u64 rx_xdp_drop; 68 u64 rx_xdp_drop;
69 u64 rx_xdp_tx; 69 u64 rx_xdp_tx;
70 u64 rx_xdp_tx_full; 70 u64 rx_xdp_tx_full;
71 u64 tx_csum_none;
71 u64 tx_csum_partial; 72 u64 tx_csum_partial;
72 u64 tx_csum_partial_inner; 73 u64 tx_csum_partial_inner;
73 u64 tx_queue_stopped; 74 u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
339 u64 packets; 341 u64 packets;
340 u64 bytes; 342 u64 bytes;
341 u64 csum_complete; 343 u64 csum_complete;
344 u64 csum_unnecessary;
342 u64 csum_unnecessary_inner; 345 u64 csum_unnecessary_inner;
343 u64 csum_none; 346 u64 csum_none;
344 u64 lro_packets; 347 u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
363 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
364 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
365 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
369 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 370 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 371 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 372 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
392 u64 tso_bytes; 396 u64 tso_bytes;
393 u64 tso_inner_packets; 397 u64 tso_inner_packets;
394 u64 tso_inner_bytes; 398 u64 tso_inner_bytes;
399 u64 csum_partial;
395 u64 csum_partial_inner; 400 u64 csum_partial_inner;
396 u64 nop; 401 u64 nop;
397 /* less likely accessed in data path */ 402 /* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
408 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
409 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 414 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
410 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 415 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
416 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
411 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 417 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
412 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 418 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 419 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index da503e6411da..9ba1f72060aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
78}; 78};
79 79
80struct mlx5e_tc_flow_parse_attr { 80struct mlx5e_tc_flow_parse_attr {
81 struct ip_tunnel_info tun_info;
81 struct mlx5_flow_spec spec; 82 struct mlx5_flow_spec spec;
82 int num_mod_hdr_actions; 83 int num_mod_hdr_actions;
83 void *mod_hdr_actions; 84 void *mod_hdr_actions;
85 int mirred_ifindex;
84}; 86};
85 87
86enum { 88enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
322static void mlx5e_detach_encap(struct mlx5e_priv *priv, 324static void mlx5e_detach_encap(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow); 325 struct mlx5e_tc_flow *flow);
324 326
327static int mlx5e_attach_encap(struct mlx5e_priv *priv,
328 struct ip_tunnel_info *tun_info,
329 struct net_device *mirred_dev,
330 struct net_device **encap_dev,
331 struct mlx5e_tc_flow *flow);
332
325static struct mlx5_flow_handle * 333static struct mlx5_flow_handle *
326mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, 334mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
327 struct mlx5e_tc_flow_parse_attr *parse_attr, 335 struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
329{ 337{
330 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 338 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
331 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 339 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
332 struct mlx5_flow_handle *rule; 340 struct net_device *out_dev, *encap_dev = NULL;
341 struct mlx5_flow_handle *rule = NULL;
342 struct mlx5e_rep_priv *rpriv;
343 struct mlx5e_priv *out_priv;
333 int err; 344 int err;
334 345
346 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
347 out_dev = __dev_get_by_index(dev_net(priv->netdev),
348 attr->parse_attr->mirred_ifindex);
349 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
350 out_dev, &encap_dev, flow);
351 if (err) {
352 rule = ERR_PTR(err);
353 if (err != -EAGAIN)
354 goto err_attach_encap;
355 }
356 out_priv = netdev_priv(encap_dev);
357 rpriv = out_priv->ppriv;
358 attr->out_rep = rpriv->rep;
359 }
360
335 err = mlx5_eswitch_add_vlan_action(esw, attr); 361 err = mlx5_eswitch_add_vlan_action(esw, attr);
336 if (err) { 362 if (err) {
337 rule = ERR_PTR(err); 363 rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
347 } 373 }
348 } 374 }
349 375
350 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); 376 /* we get here if (1) there's no error (rule being null) or when
351 if (IS_ERR(rule)) 377 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
352 goto err_add_rule; 378 */
353 379 if (rule != ERR_PTR(-EAGAIN)) {
380 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
381 if (IS_ERR(rule))
382 goto err_add_rule;
383 }
354 return rule; 384 return rule;
355 385
356err_add_rule: 386err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
361err_add_vlan: 391err_add_vlan:
362 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) 392 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
363 mlx5e_detach_encap(priv, flow); 393 mlx5e_detach_encap(priv, flow);
394err_attach_encap:
364 return rule; 395 return rule;
365} 396}
366 397
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
389void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, 420void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
390 struct mlx5e_encap_entry *e) 421 struct mlx5e_encap_entry *e)
391{ 422{
423 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5_esw_flow_attr *esw_attr;
392 struct mlx5e_tc_flow *flow; 425 struct mlx5e_tc_flow *flow;
393 int err; 426 int err;
394 427
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
404 mlx5e_rep_queue_neigh_stats_work(priv); 437 mlx5e_rep_queue_neigh_stats_work(priv);
405 438
406 list_for_each_entry(flow, &e->flows, encap) { 439 list_for_each_entry(flow, &e->flows, encap) {
407 flow->esw_attr->encap_id = e->encap_id; 440 esw_attr = flow->esw_attr;
408 flow->rule = mlx5e_tc_add_fdb_flow(priv, 441 esw_attr->encap_id = e->encap_id;
409 flow->esw_attr->parse_attr, 442 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
410 flow);
411 if (IS_ERR(flow->rule)) { 443 if (IS_ERR(flow->rule)) {
412 err = PTR_ERR(flow->rule); 444 err = PTR_ERR(flow->rule);
413 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", 445 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
421void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, 453void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
422 struct mlx5e_encap_entry *e) 454 struct mlx5e_encap_entry *e)
423{ 455{
456 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
424 struct mlx5e_tc_flow *flow; 457 struct mlx5e_tc_flow *flow;
425 struct mlx5_fc *counter;
426 458
427 list_for_each_entry(flow, &e->flows, encap) { 459 list_for_each_entry(flow, &e->flows, encap) {
428 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { 460 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
429 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; 461 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
430 counter = mlx5_flow_rule_counter(flow->rule); 462 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
431 mlx5_del_flow_rules(flow->rule);
432 mlx5_fc_destroy(priv->mdev, counter);
433 } 463 }
434 } 464 }
435 465
@@ -1317,6 +1347,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
1317 return true; 1347 return true;
1318} 1348}
1319 1349
1350static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1351 struct tcf_exts *exts)
1352{
1353 const struct tc_action *a;
1354 bool modify_ip_header;
1355 LIST_HEAD(actions);
1356 u8 htype, ip_proto;
1357 void *headers_v;
1358 u16 ethertype;
1359 int nkeys, i;
1360
1361 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1362 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1363
1364 /* for non-IP we only re-write MACs, so we're okay */
1365 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1366 goto out_ok;
1367
1368 modify_ip_header = false;
1369 tcf_exts_to_list(exts, &actions);
1370 list_for_each_entry(a, &actions, list) {
1371 if (!is_tcf_pedit(a))
1372 continue;
1373
1374 nkeys = tcf_pedit_nkeys(a);
1375 for (i = 0; i < nkeys; i++) {
1376 htype = tcf_pedit_htype(a, i);
1377 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1378 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1379 modify_ip_header = true;
1380 break;
1381 }
1382 }
1383 }
1384
1385 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1386 if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
1387 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1388 return false;
1389 }
1390
1391out_ok:
1392 return true;
1393}
1394
1395static bool actions_match_supported(struct mlx5e_priv *priv,
1396 struct tcf_exts *exts,
1397 struct mlx5e_tc_flow_parse_attr *parse_attr,
1398 struct mlx5e_tc_flow *flow)
1399{
1400 u32 actions;
1401
1402 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1403 actions = flow->esw_attr->action;
1404 else
1405 actions = flow->nic_attr->action;
1406
1407 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1408 return modify_header_match_supported(&parse_attr->spec, exts);
1409
1410 return true;
1411}
1412
1320static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1413static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1321 struct mlx5e_tc_flow_parse_attr *parse_attr, 1414 struct mlx5e_tc_flow_parse_attr *parse_attr,
1322 struct mlx5e_tc_flow *flow) 1415 struct mlx5e_tc_flow *flow)
@@ -1378,6 +1471,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1378 return -EINVAL; 1471 return -EINVAL;
1379 } 1472 }
1380 1473
1474 if (!actions_match_supported(priv, exts, parse_attr, flow))
1475 return -EOPNOTSUPP;
1476
1381 return 0; 1477 return 0;
1382} 1478}
1383 1479
@@ -1564,7 +1660,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1564 break; 1660 break;
1565 default: 1661 default:
1566 err = -EOPNOTSUPP; 1662 err = -EOPNOTSUPP;
1567 goto out; 1663 goto free_encap;
1568 } 1664 }
1569 fl4.flowi4_tos = tun_key->tos; 1665 fl4.flowi4_tos = tun_key->tos;
1570 fl4.daddr = tun_key->u.ipv4.dst; 1666 fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1669,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1573 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, 1669 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1574 &fl4, &n, &ttl); 1670 &fl4, &n, &ttl);
1575 if (err) 1671 if (err)
1576 goto out; 1672 goto free_encap;
1577 1673
1578 /* used by mlx5e_detach_encap to lookup a neigh hash table 1674 /* used by mlx5e_detach_encap to lookup a neigh hash table
1579 * entry in the neigh hash table when a user deletes a rule 1675 * entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1686,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1590 */ 1686 */
1591 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 1687 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1592 if (err) 1688 if (err)
1593 goto out; 1689 goto free_encap;
1594 1690
1595 read_lock_bh(&n->lock); 1691 read_lock_bh(&n->lock);
1596 nud_state = n->nud_state; 1692 nud_state = n->nud_state;
@@ -1630,8 +1726,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1630 1726
1631destroy_neigh_entry: 1727destroy_neigh_entry:
1632 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 1728 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1633out: 1729free_encap:
1634 kfree(encap_header); 1730 kfree(encap_header);
1731out:
1635 if (n) 1732 if (n)
1636 neigh_release(n); 1733 neigh_release(n);
1637 return err; 1734 return err;
@@ -1668,7 +1765,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1668 break; 1765 break;
1669 default: 1766 default:
1670 err = -EOPNOTSUPP; 1767 err = -EOPNOTSUPP;
1671 goto out; 1768 goto free_encap;
1672 } 1769 }
1673 1770
1674 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 1771 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1775,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1678 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, 1775 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1679 &fl6, &n, &ttl); 1776 &fl6, &n, &ttl);
1680 if (err) 1777 if (err)
1681 goto out; 1778 goto free_encap;
1682 1779
1683 /* used by mlx5e_detach_encap to lookup a neigh hash table 1780 /* used by mlx5e_detach_encap to lookup a neigh hash table
1684 * entry in the neigh hash table when a user deletes a rule 1781 * entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1792,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1695 */ 1792 */
1696 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 1793 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1697 if (err) 1794 if (err)
1698 goto out; 1795 goto free_encap;
1699 1796
1700 read_lock_bh(&n->lock); 1797 read_lock_bh(&n->lock);
1701 nud_state = n->nud_state; 1798 nud_state = n->nud_state;
@@ -1736,8 +1833,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1736 1833
1737destroy_neigh_entry: 1834destroy_neigh_entry:
1738 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 1835 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1739out: 1836free_encap:
1740 kfree(encap_header); 1837 kfree(encap_header);
1838out:
1741 if (n) 1839 if (n)
1742 neigh_release(n); 1840 neigh_release(n);
1743 return err; 1841 return err;
@@ -1791,6 +1889,7 @@ vxlan_encap_offload_err:
1791 } 1889 }
1792 } 1890 }
1793 1891
1892 /* must verify if encap is valid or not */
1794 if (found) 1893 if (found)
1795 goto attach_flow; 1894 goto attach_flow;
1796 1895
@@ -1817,6 +1916,8 @@ attach_flow:
1817 *encap_dev = e->out_dev; 1916 *encap_dev = e->out_dev;
1818 if (e->flags & MLX5_ENCAP_ENTRY_VALID) 1917 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1819 attr->encap_id = e->encap_id; 1918 attr->encap_id = e->encap_id;
1919 else
1920 err = -EAGAIN;
1820 1921
1821 return err; 1922 return err;
1822 1923
@@ -1871,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1871 1972
1872 if (is_tcf_mirred_egress_redirect(a)) { 1973 if (is_tcf_mirred_egress_redirect(a)) {
1873 int ifindex = tcf_mirred_ifindex(a); 1974 int ifindex = tcf_mirred_ifindex(a);
1874 struct net_device *out_dev, *encap_dev = NULL; 1975 struct net_device *out_dev;
1875 struct mlx5e_priv *out_priv; 1976 struct mlx5e_priv *out_priv;
1876 1977
1877 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); 1978 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1884,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1884 rpriv = out_priv->ppriv; 1985 rpriv = out_priv->ppriv;
1885 attr->out_rep = rpriv->rep; 1986 attr->out_rep = rpriv->rep;
1886 } else if (encap) { 1987 } else if (encap) {
1887 err = mlx5e_attach_encap(priv, info, 1988 parse_attr->mirred_ifindex = ifindex;
1888 out_dev, &encap_dev, flow); 1989 parse_attr->tun_info = *info;
1889 if (err && err != -EAGAIN) 1990 attr->parse_attr = parse_attr;
1890 return err;
1891 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | 1991 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1892 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 1992 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1893 MLX5_FLOW_CONTEXT_ACTION_COUNT; 1993 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1894 out_priv = netdev_priv(encap_dev); 1994 /* attr->out_rep is resolved when we handle encap */
1895 rpriv = out_priv->ppriv;
1896 attr->out_rep = rpriv->rep;
1897 attr->parse_attr = parse_attr;
1898 } else { 1995 } else {
1899 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", 1996 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1900 priv->netdev->name, out_dev->name); 1997 priv->netdev->name, out_dev->name);
@@ -1934,6 +2031,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1934 2031
1935 return -EINVAL; 2032 return -EINVAL;
1936 } 2033 }
2034
2035 if (!actions_match_supported(priv, exts, parse_attr, flow))
2036 return -EOPNOTSUPP;
2037
1937 return err; 2038 return err;
1938} 2039}
1939 2040
@@ -1972,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
1972 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 2073 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1973 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); 2074 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1974 if (err < 0) 2075 if (err < 0)
1975 goto err_handle_encap_flow; 2076 goto err_free;
1976 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); 2077 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1977 } else { 2078 } else {
1978 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); 2079 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -1983,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
1983 2084
1984 if (IS_ERR(flow->rule)) { 2085 if (IS_ERR(flow->rule)) {
1985 err = PTR_ERR(flow->rule); 2086 err = PTR_ERR(flow->rule);
1986 goto err_free; 2087 if (err != -EAGAIN)
2088 goto err_free;
1987 } 2089 }
1988 2090
1989 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; 2091 if (err != -EAGAIN)
2092 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2093
1990 err = rhashtable_insert_fast(&tc->ht, &flow->node, 2094 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1991 tc->ht_params); 2095 tc->ht_params);
1992 if (err) 2096 if (err)
@@ -2000,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
2000err_del_rule: 2104err_del_rule:
2001 mlx5e_tc_del_flow(priv, flow); 2105 mlx5e_tc_del_flow(priv, flow);
2002 2106
2003err_handle_encap_flow:
2004 if (err == -EAGAIN) {
2005 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2006 tc->ht_params);
2007 if (err)
2008 mlx5e_tc_del_flow(priv, flow);
2009 else
2010 return 0;
2011 }
2012
2013err_free: 2107err_free:
2014 kvfree(parse_attr); 2108 kvfree(parse_attr);
2015 kfree(flow); 2109 kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fee43e40fa16..1d6925d4369a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
193 sq->stats.csum_partial_inner++; 193 sq->stats.csum_partial_inner++;
194 } else { 194 } else {
195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
196 sq->stats.csum_partial++;
196 } 197 }
197 } else 198 } else
198 sq->stats.csum_none++; 199 sq->stats.csum_none++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index e37453d838db..c0fd2212e890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
71 return 0; 71 return 0;
72} 72}
73 73
74int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) 74int mlx5_fpga_caps(struct mlx5_core_dev *dev)
75{ 75{
76 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; 76 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
77 77
78 return mlx5_core_access_reg(dev, in, sizeof(in), caps, 78 return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
79 MLX5_ST_SZ_BYTES(fpga_cap), 79 MLX5_ST_SZ_BYTES(fpga_cap),
80 MLX5_REG_FPGA_CAP, 0, 0); 80 MLX5_REG_FPGA_CAP, 0, 0);
81} 81}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index 94bdfd47c3f0..d05233c9b4f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
65 u64 rx_total_drop; 65 u64 rx_total_drop;
66}; 66};
67 67
68int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); 68int mlx5_fpga_caps(struct mlx5_core_dev *dev);
69int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); 69int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
70int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); 70int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
71int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, 71int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 9034e9960a76..dc8970346521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
139 if (err) 139 if (err)
140 goto out; 140 goto out;
141 141
142 err = mlx5_fpga_caps(fdev->mdev, 142 err = mlx5_fpga_caps(fdev->mdev);
143 fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
144 if (err) 143 if (err)
145 goto out; 144 goto out;
146 145
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e0d0efd903bc..36ecc2b2e187 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
293 } 293 }
294 294
295 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 295 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
296 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
297 log_max_flow_counter,
298 ft->type));
296 int list_size = 0; 299 int list_size = 0;
297 300
298 list_for_each_entry(dst, &fte->node.children, node.list) { 301 list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
305 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); 308 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
306 list_size++; 309 list_size++;
307 } 310 }
311 if (list_size > max_list_size) {
312 err = -EINVAL;
313 goto err_out;
314 }
308 315
309 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, 316 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
310 list_size); 317 list_size);
311 } 318 }
312 319
313 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 320 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
321err_out:
314 kvfree(in); 322 kvfree(in);
315 return err; 323 return err;
316} 324}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5509a752f98e..48dd78975062 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -52,6 +52,7 @@ enum fs_flow_table_type {
52 FS_FT_FDB = 0X4, 52 FS_FT_FDB = 0X4,
53 FS_FT_SNIFFER_RX = 0X5, 53 FS_FT_SNIFFER_RX = 0X5,
54 FS_FT_SNIFFER_TX = 0X6, 54 FS_FT_SNIFFER_TX = 0X6,
55 FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
55}; 56};
56 57
57enum fs_flow_table_op_mod { 58enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
260#define fs_for_each_dst(pos, fte) \ 261#define fs_for_each_dst(pos, fte) \
261 fs_list_for_each_entry(pos, &(fte)->node.children) 262 fs_list_for_each_entry(pos, &(fte)->node.children)
262 263
264#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
265 (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
266 (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
267 (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
268 (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
269 (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
270 (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
271 (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
272 )
273
263#endif 274#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8aea0a065e56..db86e1506c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) 356void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
357{ 357{
358 struct mlx5_core_health *health = &dev->priv.health; 358 struct mlx5_core_health *health = &dev->priv.health;
359 unsigned long flags;
359 360
360 spin_lock(&health->wq_lock); 361 spin_lock_irqsave(&health->wq_lock, flags);
361 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); 362 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
362 spin_unlock(&health->wq_lock); 363 spin_unlock_irqrestore(&health->wq_lock, flags);
363 cancel_delayed_work_sync(&dev->priv.health.recover_work); 364 cancel_delayed_work_sync(&dev->priv.health.recover_work);
364} 365}
365 366
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 85298051a3e4..145e392ab849 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
572{ 572{
573 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 573 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
574 const struct mlx5e_profile *profile = priv->profile; 574 const struct mlx5e_profile *profile = priv->profile;
575 struct mlx5_core_dev *mdev = priv->mdev;
575 576
576 mlx5e_detach_netdev(priv); 577 mlx5e_detach_netdev(priv);
577 profile->cleanup(priv); 578 profile->cleanup(priv);
578 destroy_workqueue(priv->wq); 579 destroy_workqueue(priv->wq);
579 free_netdev(netdev); 580 free_netdev(netdev);
580 581
581 mlx5e_destroy_mdev_resources(priv->mdev); 582 mlx5e_destroy_mdev_resources(mdev);
582} 583}
583EXPORT_SYMBOL(mlx5_rdma_netdev_free); 584EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1975d4388d4f..e07061f565d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
677} 677}
678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); 678EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
679 679
680int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
681 u8 tc, u8 *tc_group)
682{
683 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
684 void *ets_tcn_conf;
685 int err;
686
687 err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
688 if (err)
689 return err;
690
691 ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
692 tc_configuration[tc]);
693
694 *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
695 group);
696
697 return 0;
698}
699EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
700
680int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) 701int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
681{ 702{
682 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; 703 u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6c48e9959b65..2a8b529ce6dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
109 mlx5_core_warn(dev, 109 mlx5_core_warn(dev,
110 "failed to restore VF %d settings, err %d\n", 110 "failed to restore VF %d settings, err %d\n",
111 vf, err); 111 vf, err);
112 continue; 112 continue;
113 } 113 }
114 } 114 }
115 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); 115 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
96 const struct mlxsw_bus *bus; 96 const struct mlxsw_bus *bus;
97 void *bus_priv; 97 void *bus_priv;
98 const struct mlxsw_bus_info *bus_info; 98 const struct mlxsw_bus_info *bus_info;
99 struct workqueue_struct *emad_wq;
99 struct list_head rx_listener_list; 100 struct list_head rx_listener_list;
100 struct list_head event_listener_list; 101 struct list_head event_listener_list;
101 struct { 102 struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
465{ 466{
466 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); 467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
467 468
468 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); 469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
469} 470}
470 471
471static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, 472static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
587 588
588static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) 589static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
589{ 590{
591 struct workqueue_struct *emad_wq;
590 u64 tid; 592 u64 tid;
591 int err; 593 int err;
592 594
593 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
594 return 0; 596 return 0;
595 597
598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
599 if (!emad_wq)
600 return -ENOMEM;
601 mlxsw_core->emad_wq = emad_wq;
602
596 /* Set the upper 32 bits of the transaction ID field to a random 603 /* Set the upper 32 bits of the transaction ID field to a random
597 * number. This allows us to discard EMADs addressed to other 604 * number. This allows us to discard EMADs addressed to other
598 * devices. 605 * devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
619err_emad_trap_set: 626err_emad_trap_set:
620 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
621 mlxsw_core); 628 mlxsw_core);
629 destroy_workqueue(mlxsw_core->emad_wq);
622 return err; 630 return err;
623} 631}
624 632
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
631 mlxsw_core->emad.use_emad = false; 639 mlxsw_core->emad.use_emad = false;
632 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, 640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
633 mlxsw_core); 641 mlxsw_core);
642 destroy_workqueue(mlxsw_core->emad_wq);
634} 643}
635 644
636static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, 645static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 12c3a4449120..c0dcfa05b077 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -294,7 +294,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
294 write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size; 294 write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
295 mlxsw_i2c_set_slave_addr(tran_buf, off); 295 mlxsw_i2c_set_slave_addr(tran_buf, off);
296 memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox + 296 memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
297 chunk_size * i, chunk_size); 297 MLXSW_I2C_BLK_MAX * i, chunk_size);
298 298
299 j = 0; 299 j = 0;
300 end = jiffies + timeout; 300 end = jiffies + timeout;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index cc27c5de5a1d..5acfbe5b8b9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5827,6 +5827,29 @@ MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
5827 */ 5827 */
5828MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); 5828MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
5829 5829
5830/* reg_mtmp_tee
5831 * Temperature Event Enable.
5832 * 0 - Do not generate event
5833 * 1 - Generate event
5834 * 2 - Generate single event
5835 * Access: RW
5836 */
5837MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
5838
5839#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
5840
5841/* reg_mtmp_temperature_threshold_hi
5842 * High threshold for Temperature Warning Event. In 0.125 Celsius.
5843 * Access: RW
5844 */
5845MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
5846
5847/* reg_mtmp_temperature_threshold_lo
5848 * Low threshold for Temperature Warning Event. In 0.125 Celsius.
5849 * Access: RW
5850 */
5851MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
5852
5830#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 5853#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
5831 5854
5832/* reg_mtmp_sensor_name 5855/* reg_mtmp_sensor_name
@@ -5843,6 +5866,8 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
5843 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); 5866 mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
5844 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); 5867 mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
5845 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); 5868 mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
5869 mlxsw_reg_mtmp_temperature_threshold_hi_set(payload,
5870 MLXSW_REG_MTMP_THRESH_HI);
5846} 5871}
5847 5872
5848static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, 5873static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
@@ -6401,6 +6426,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
6401 mlxsw_reg_mgpc_opcode_set(payload, opcode); 6426 mlxsw_reg_mgpc_opcode_set(payload, opcode);
6402} 6427}
6403 6428
6429/* TIGCR - Tunneling IPinIP General Configuration Register
6430 * -------------------------------------------------------
6431 * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
6432 */
6433#define MLXSW_REG_TIGCR_ID 0xA801
6434#define MLXSW_REG_TIGCR_LEN 0x10
6435
6436MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
6437
6438/* reg_tigcr_ipip_ttlc
6439 * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
6440 * header.
6441 * Access: RW
6442 */
6443MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
6444
6445/* reg_tigcr_ipip_ttl_uc
6446 * The TTL for IPinIP Tunnel encapsulation of unicast packets if
6447 * reg_tigcr_ipip_ttlc is unset.
6448 * Access: RW
6449 */
6450MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
6451
6452static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
6453{
6454 MLXSW_REG_ZERO(tigcr, payload);
6455 mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
6456 mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
6457}
6458
6404/* SBPR - Shared Buffer Pools Register 6459/* SBPR - Shared Buffer Pools Register
6405 * ----------------------------------- 6460 * -----------------------------------
6406 * The SBPR configures and retrieves the shared buffer pools and configuration. 6461 * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6936,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
6881 MLXSW_REG(mcc), 6936 MLXSW_REG(mcc),
6882 MLXSW_REG(mcda), 6937 MLXSW_REG(mcda),
6883 MLXSW_REG(mgpc), 6938 MLXSW_REG(mgpc),
6939 MLXSW_REG(tigcr),
6884 MLXSW_REG(sbpr), 6940 MLXSW_REG(sbpr),
6885 MLXSW_REG(sbcm), 6941 MLXSW_REG(sbcm),
6886 MLXSW_REG(sbpm), 6942 MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2cfb3f5d092d..5189022a1c8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
2723 mlxsw_sp_nexthop_rif_fini(nh); 2723 mlxsw_sp_nexthop_rif_fini(nh);
2724 break; 2724 break;
2725 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 2725 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2726 mlxsw_sp_nexthop_rif_fini(nh);
2726 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); 2727 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
2727 break; 2728 break;
2728 } 2729 }
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
2742 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 2743 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
2743 MLXSW_SP_L3_PROTO_IPV4)) { 2744 MLXSW_SP_L3_PROTO_IPV4)) {
2744 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 2745 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
2745 return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); 2746 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
2747 if (err)
2748 return err;
2749 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
2750 return 0;
2746 } 2751 }
2747 2752
2748 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 2753 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3500,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
3500static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, 3505static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
3501 struct mlxsw_sp_fib *fib) 3506 struct mlxsw_sp_fib *fib)
3502{ 3507{
3503 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
3504 struct mlxsw_sp_lpm_tree *lpm_tree;
3505
3506 /* Aggregate prefix lengths across all virtual routers to make
3507 * sure we only have used prefix lengths in the LPM tree.
3508 */
3509 mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
3510 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
3511 fib->proto);
3512 if (IS_ERR(lpm_tree))
3513 goto err_tree_get;
3514 mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
3515
3516err_tree_get:
3517 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) 3508 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
3518 return; 3509 return;
3519 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); 3510 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
@@ -4009,7 +4000,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4009 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 4000 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4010 MLXSW_SP_L3_PROTO_IPV6)) { 4001 MLXSW_SP_L3_PROTO_IPV6)) {
4011 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 4002 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4012 return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); 4003 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
4004 if (err)
4005 return err;
4006 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4007 return 0;
4013 } 4008 }
4014 4009
4015 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 4010 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -5068,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
5068 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); 5063 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
5069 if (IS_ERR(vr)) 5064 if (IS_ERR(vr))
5070 return ERR_CAST(vr); 5065 return ERR_CAST(vr);
5066 vr->rif_count++;
5071 5067
5072 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 5068 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
5073 if (err) 5069 if (err)
@@ -5099,7 +5095,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
5099 5095
5100 mlxsw_sp_rif_counters_alloc(rif); 5096 mlxsw_sp_rif_counters_alloc(rif);
5101 mlxsw_sp->router->rifs[rif_index] = rif; 5097 mlxsw_sp->router->rifs[rif_index] = rif;
5102 vr->rif_count++;
5103 5098
5104 return rif; 5099 return rif;
5105 5100
@@ -5110,6 +5105,7 @@ err_fid_get:
5110 kfree(rif); 5105 kfree(rif);
5111err_rif_alloc: 5106err_rif_alloc:
5112err_rif_index_alloc: 5107err_rif_index_alloc:
5108 vr->rif_count--;
5113 mlxsw_sp_vr_put(vr); 5109 mlxsw_sp_vr_put(vr);
5114 return ERR_PTR(err); 5110 return ERR_PTR(err);
5115} 5111}
@@ -5124,7 +5120,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
5124 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); 5120 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
5125 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 5121 vr = &mlxsw_sp->router->vrs[rif->vr_id];
5126 5122
5127 vr->rif_count--;
5128 mlxsw_sp->router->rifs[rif->rif_index] = NULL; 5123 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
5129 mlxsw_sp_rif_counters_free(rif); 5124 mlxsw_sp_rif_counters_free(rif);
5130 ops->deconfigure(rif); 5125 ops->deconfigure(rif);
@@ -5132,6 +5127,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
5132 /* Loopback RIFs are not associated with a FID. */ 5127 /* Loopback RIFs are not associated with a FID. */
5133 mlxsw_sp_fid_put(fid); 5128 mlxsw_sp_fid_put(fid);
5134 kfree(rif); 5129 kfree(rif);
5130 vr->rif_count--;
5135 mlxsw_sp_vr_put(vr); 5131 mlxsw_sp_vr_put(vr);
5136} 5132}
5137 5133
@@ -5900,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
5900 kfree(mlxsw_sp->router->rifs); 5896 kfree(mlxsw_sp->router->rifs);
5901} 5897}
5902 5898
5899static int
5900mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
5901{
5902 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
5903
5904 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
5905 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
5906}
5907
5903static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) 5908static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
5904{ 5909{
5905 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; 5910 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
5906 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); 5911 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
5907 return 0; 5912 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
5908} 5913}
5909 5914
5910static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) 5915static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index db9750695dc7..8ea9320014ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
110 */ 110 */
111 if (!switchdev_port_same_parent_id(in_dev, out_dev)) 111 if (!switchdev_port_same_parent_id(in_dev, out_dev))
112 return -EOPNOTSUPP; 112 return -EOPNOTSUPP;
113 if (!nfp_netdev_is_nfp_repr(out_dev))
114 return -EOPNOTSUPP;
113 115
114 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); 116 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
115 if (!output->port) 117 if (!output->port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1c0187f0af51..e118b5f23996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1180{ 1180{
1181 void *frag; 1181 void *frag;
1182 1182
1183 if (!dp->xdp_prog) 1183 if (!dp->xdp_prog) {
1184 frag = netdev_alloc_frag(dp->fl_bufsz); 1184 frag = netdev_alloc_frag(dp->fl_bufsz);
1185 else 1185 } else {
1186 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); 1186 struct page *page;
1187
1188 page = alloc_page(GFP_KERNEL | __GFP_COLD);
1189 frag = page ? page_address(page) : NULL;
1190 }
1187 if (!frag) { 1191 if (!frag) {
1188 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1192 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1189 return NULL; 1193 return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1203{ 1207{
1204 void *frag; 1208 void *frag;
1205 1209
1206 if (!dp->xdp_prog) 1210 if (!dp->xdp_prog) {
1207 frag = napi_alloc_frag(dp->fl_bufsz); 1211 frag = napi_alloc_frag(dp->fl_bufsz);
1208 else 1212 } else {
1209 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); 1213 struct page *page;
1214
1215 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1216 frag = page ? page_address(page) : NULL;
1217 }
1210 if (!frag) { 1218 if (!frag) {
1211 nn_dp_warn(dp, "Failed to alloc receive page frag\n"); 1219 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1212 return NULL; 1220 return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
464 464
465 do { 465 do {
466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); 466 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
467 *data++ = nn->r_vecs[i].rx_pkts; 467 data[0] = nn->r_vecs[i].rx_pkts;
468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; 468 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; 469 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error; 470 tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
472 472
473 do { 473 do {
474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); 474 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
475 *data++ = nn->r_vecs[i].tx_pkts; 475 data[1] = nn->r_vecs[i].tx_pkts;
476 *data++ = nn->r_vecs[i].tx_busy; 476 data[2] = nn->r_vecs[i].tx_busy;
477 tmp[3] = nn->r_vecs[i].hw_csum_tx; 477 tmp[3] = nn->r_vecs[i].hw_csum_tx;
478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; 478 tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
479 tmp[5] = nn->r_vecs[i].tx_gather; 479 tmp[5] = nn->r_vecs[i].tx_gather;
480 tmp[6] = nn->r_vecs[i].tx_lso; 480 tmp[6] = nn->r_vecs[i].tx_lso;
481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 481 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
482 482
483 data += 3;
484
483 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) 485 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
484 gathered_stats[j] += tmp[j]; 486 gathered_stats[j] += tmp[j];
485 } 487 }
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 0ea3ca09c689..3ed9033e56db 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -898,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
898 898
899 curr_rxbuf->dma_addr = 899 curr_rxbuf->dma_addr =
900 dma_map_single(adpt->netdev->dev.parent, skb->data, 900 dma_map_single(adpt->netdev->dev.parent, skb->data,
901 curr_rxbuf->length, DMA_FROM_DEVICE); 901 adpt->rxbuf_size, DMA_FROM_DEVICE);
902
902 ret = dma_mapping_error(adpt->netdev->dev.parent, 903 ret = dma_mapping_error(adpt->netdev->dev.parent,
903 curr_rxbuf->dma_addr); 904 curr_rxbuf->dma_addr);
904 if (ret) { 905 if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 98f22551eb45..1e33aea59f50 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -51,10 +51,7 @@ struct rmnet_walk_data {
51 51
52static int rmnet_is_real_dev_registered(const struct net_device *real_dev) 52static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53{ 53{
54 rx_handler_func_t *rx_handler; 54 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
55
56 rx_handler = rcu_dereference(real_dev->rx_handler);
57 return (rx_handler == rmnet_rx_handler);
58} 55}
59 56
60/* Needs rtnl lock */ 57/* Needs rtnl lock */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8491 rtl8168_driver_start(tp); 8491 rtl8168_driver_start(tp);
8492 } 8492 }
8493 8493
8494 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
8495
8496 if (pci_dev_run_wake(pdev)) 8494 if (pci_dev_run_wake(pdev))
8497 pm_runtime_put_noidle(&pdev->dev); 8495 pm_runtime_put_noidle(&pdev->dev);
8498 8496
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
index a63ef82e7c72..dfae3c9d57c6 100644
--- a/drivers/net/ethernet/rocker/rocker_tlv.h
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
139int rocker_tlv_put(struct rocker_desc_info *desc_info, 139int rocker_tlv_put(struct rocker_desc_info *desc_info,
140 int attrtype, int attrlen, const void *data); 140 int attrtype, int attrlen, const void *data);
141 141
142static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, 142static inline int
143 int attrtype, u8 value) 143rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
144{ 144{
145 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); 145 u8 tmp = value; /* work around GCC PR81715 */
146
147 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
146} 148}
147 149
148static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, 150static inline int
149 int attrtype, u16 value) 151rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
150{ 152{
151 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); 153 u16 tmp = value;
154
155 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
152} 156}
153 157
154static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, 158static inline int
155 int attrtype, __be16 value) 159rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
156{ 160{
157 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); 161 __be16 tmp = value;
162
163 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
158} 164}
159 165
160static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, 166static inline int
161 int attrtype, u32 value) 167rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
162{ 168{
163 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); 169 u32 tmp = value;
170
171 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
164} 172}
165 173
166static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, 174static inline int
167 int attrtype, __be32 value) 175rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
168{ 176{
169 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); 177 __be32 tmp = value;
178
179 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
170} 180}
171 181
172static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, 182static inline int
173 int attrtype, u64 value) 183rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
174{ 184{
175 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); 185 u64 tmp = value;
186
187 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
176} 188}
177 189
178static inline struct rocker_tlv * 190static inline struct rocker_tlv *
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd6a2f9791cc..3256e5cbad27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
74 plat_dat->axi->axi_wr_osr_lmt--; 74 plat_dat->axi->axi_wr_osr_lmt--;
75 } 75 }
76 76
77 if (of_property_read_u32(np, "read,read-requests", 77 if (of_property_read_u32(np, "snps,read-requests",
78 &plat_dat->axi->axi_rd_osr_lmt)) { 78 &plat_dat->axi->axi_rd_osr_lmt)) {
79 /** 79 /**
80 * Since the register has a reset value of 1, if property 80 * Since the register has a reset value of 1, if property
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
511 .remove = dwc_eth_dwmac_remove, 511 .remove = dwc_eth_dwmac_remove,
512 .driver = { 512 .driver = {
513 .name = "dwc-eth-dwmac", 513 .name = "dwc-eth-dwmac",
514 .pm = &stmmac_pltfr_pm_ops,
514 .of_match_table = dwc_eth_dwmac_match, 515 .of_match_table = dwc_eth_dwmac_match,
515 }, 516 },
516}; 517};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 99823f54696a..13133b30b575 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -83,6 +83,117 @@ struct rk_priv_data {
83 (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ 83 (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
84 ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) 84 ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
85 85
86#define RK3128_GRF_MAC_CON0 0x0168
87#define RK3128_GRF_MAC_CON1 0x016c
88
89/* RK3128_GRF_MAC_CON0 */
90#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
91#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
92#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
93#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
94#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
95#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
96
97/* RK3128_GRF_MAC_CON1 */
98#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
99 (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
100#define RK3128_GMAC_PHY_INTF_SEL_RMII \
101 (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
102#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
103#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
104#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
105#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
106#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
107#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
108#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
109#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
110#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
111#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
112#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
113
114static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
115 int tx_delay, int rx_delay)
116{
117 struct device *dev = &bsp_priv->pdev->dev;
118
119 if (IS_ERR(bsp_priv->grf)) {
120 dev_err(dev, "Missing rockchip,grf property\n");
121 return;
122 }
123
124 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
125 RK3128_GMAC_PHY_INTF_SEL_RGMII |
126 RK3128_GMAC_RMII_MODE_CLR);
127 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
128 DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
129 RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
130 RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
131}
132
133static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
134{
135 struct device *dev = &bsp_priv->pdev->dev;
136
137 if (IS_ERR(bsp_priv->grf)) {
138 dev_err(dev, "Missing rockchip,grf property\n");
139 return;
140 }
141
142 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
143 RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
144}
145
146static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
147{
148 struct device *dev = &bsp_priv->pdev->dev;
149
150 if (IS_ERR(bsp_priv->grf)) {
151 dev_err(dev, "Missing rockchip,grf property\n");
152 return;
153 }
154
155 if (speed == 10)
156 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
157 RK3128_GMAC_CLK_2_5M);
158 else if (speed == 100)
159 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
160 RK3128_GMAC_CLK_25M);
161 else if (speed == 1000)
162 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
163 RK3128_GMAC_CLK_125M);
164 else
165 dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
166}
167
168static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
169{
170 struct device *dev = &bsp_priv->pdev->dev;
171
172 if (IS_ERR(bsp_priv->grf)) {
173 dev_err(dev, "Missing rockchip,grf property\n");
174 return;
175 }
176
177 if (speed == 10) {
178 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
179 RK3128_GMAC_RMII_CLK_2_5M |
180 RK3128_GMAC_SPEED_10M);
181 } else if (speed == 100) {
182 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
183 RK3128_GMAC_RMII_CLK_25M |
184 RK3128_GMAC_SPEED_100M);
185 } else {
186 dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
187 }
188}
189
190static const struct rk_gmac_ops rk3128_ops = {
191 .set_to_rgmii = rk3128_set_to_rgmii,
192 .set_to_rmii = rk3128_set_to_rmii,
193 .set_rgmii_speed = rk3128_set_rgmii_speed,
194 .set_rmii_speed = rk3128_set_rmii_speed,
195};
196
86#define RK3228_GRF_MAC_CON0 0x0900 197#define RK3228_GRF_MAC_CON0 0x0900
87#define RK3228_GRF_MAC_CON1 0x0904 198#define RK3228_GRF_MAC_CON1 0x0904
88 199
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
1313static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); 1424static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
1314 1425
1315static const struct of_device_id rk_gmac_dwmac_match[] = { 1426static const struct of_device_id rk_gmac_dwmac_match[] = {
1427 { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
1316 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, 1428 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
1317 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, 1429 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
1318 { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, 1430 { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c4407e8e39a3..2f7d7ec59962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
296{ 296{
297 void __iomem *ioaddr = hw->pcsr; 297 void __iomem *ioaddr = hw->pcsr;
298 unsigned int pmt = 0; 298 unsigned int pmt = 0;
299 u32 config;
299 300
300 if (mode & WAKE_MAGIC) { 301 if (mode & WAKE_MAGIC) {
301 pr_debug("GMAC: WOL Magic frame\n"); 302 pr_debug("GMAC: WOL Magic frame\n");
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
306 pmt |= power_down | global_unicast | wake_up_frame_en; 307 pmt |= power_down | global_unicast | wake_up_frame_en;
307 } 308 }
308 309
310 if (pmt) {
311 /* The receiver must be enabled for WOL before powering down */
312 config = readl(ioaddr + GMAC_CONFIG);
313 config |= GMAC_CONFIG_RE;
314 writel(config, ioaddr + GMAC_CONFIG);
315 }
309 writel(pmt, ioaddr + GMAC_PMT); 316 writel(pmt, ioaddr + GMAC_PMT);
310} 317}
311 318
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
275 goto exit; 275 goto exit;
276 i++; 276 i++;
277 277
278 } while ((ret == 1) || (i < 10)); 278 } while ((ret == 1) && (i < 10));
279 279
280 if (i == 10) 280 if (i == 10)
281 ret = -EBUSY; 281 ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
34 34
35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, 35 err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
36 !(value & DMA_BUS_MODE_SFT_RESET), 36 !(value & DMA_BUS_MODE_SFT_RESET),
37 100000, 10000); 37 10000, 100000);
38 if (err) 38 if (err)
39 return -EBUSY; 39 return -EBUSY;
40 40
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1763e48c84e2..16bd50929084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb) 473 struct dma_desc *np, struct sk_buff *skb)
474{ 474{
475 struct skb_shared_hwtstamps *shhwtstamp = NULL; 475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 struct dma_desc *desc = p;
476 u64 ns; 477 u64 ns;
477 478
478 if (!priv->hwts_rx_en) 479 if (!priv->hwts_rx_en)
479 return; 480 return;
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv->plat->has_gmac4)
483 desc = np;
480 484
481 /* Check if timestamp is available */ 485 /* Check if timestamp is available */
482 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 486 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
483 /* For GMAC4, the valid timestamp is from CTX next desc. */ 487 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
484 if (priv->plat->has_gmac4)
485 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 else
487 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
488
489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 488 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
490 shhwtstamp = skb_hwtstamps(skb); 489 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 490 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1800{ 1799{
1801 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1800 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1802 unsigned int bytes_compl = 0, pkts_compl = 0; 1801 unsigned int bytes_compl = 0, pkts_compl = 0;
1803 unsigned int entry = tx_q->dirty_tx; 1802 unsigned int entry;
1804 1803
1805 netif_tx_lock(priv->dev); 1804 netif_tx_lock(priv->dev);
1806 1805
1807 priv->xstats.tx_clean++; 1806 priv->xstats.tx_clean++;
1808 1807
1808 entry = tx_q->dirty_tx;
1809 while (entry != tx_q->cur_tx) { 1809 while (entry != tx_q->cur_tx) {
1810 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1810 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1811 struct dma_desc *p; 1811 struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3333 * them in stmmac_rx_refill() function so that 3333 * them in stmmac_rx_refill() function so that
3334 * device can reuse it. 3334 * device can reuse it.
3335 */ 3335 */
3336 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3336 rx_q->rx_skbuff[entry] = NULL; 3337 rx_q->rx_skbuff[entry] = NULL;
3337 dma_unmap_single(priv->device, 3338 dma_unmap_single(priv->device,
3338 rx_q->rx_skbuff_dma[entry], 3339 rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8a280b48e3a9..6383695004a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
150 plat->rx_queues_to_use = 1; 150 plat->rx_queues_to_use = 1;
151 plat->tx_queues_to_use = 1; 151 plat->tx_queues_to_use = 1;
152 152
153 /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
154 * to always set this, otherwise Queue will be classified as AVB
155 * (because MTL_QUEUE_AVB = 0).
156 */
157 plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
158 plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
159
153 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); 160 rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
154 if (!rx_node) 161 if (!rx_node)
155 return; 162 return;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index f6404074b7b0..ed51018a813e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
113 113
114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) 114static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
115{ 115{
116#ifdef __BIG_ENDIAN
117 return (vni[0] == tun_id[2]) &&
118 (vni[1] == tun_id[1]) &&
119 (vni[2] == tun_id[0]);
120#else
121 return !memcmp(vni, &tun_id[5], 3); 116 return !memcmp(vni, &tun_id[5], 3);
122#endif
123} 117}
124 118
125static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) 119static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 5dea2063dbc8..0bcc07f346c3 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
197{ 197{
198 int err; 198 int err;
199 199
200 err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap"); 200 err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
201 201 THIS_MODULE);
202 if (err) 202 if (err)
203 goto out1; 203 goto out1;
204 204
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 98e4deaa3a6a..5ab1b8849c30 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
742 sg_init_table(sg, ret); 742 sg_init_table(sg, ret);
743 ret = skb_to_sgvec(skb, sg, 0, skb->len); 743 ret = skb_to_sgvec(skb, sg, 0, skb->len);
744 if (unlikely(ret < 0)) { 744 if (unlikely(ret < 0)) {
745 aead_request_free(req);
745 macsec_txsa_put(tx_sa); 746 macsec_txsa_put(tx_sa);
746 kfree_skb(skb); 747 kfree_skb(skb);
747 return ERR_PTR(ret); 748 return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
954 sg_init_table(sg, ret); 955 sg_init_table(sg, ret);
955 ret = skb_to_sgvec(skb, sg, 0, skb->len); 956 ret = skb_to_sgvec(skb, sg, 0, skb->len);
956 if (unlikely(ret < 0)) { 957 if (unlikely(ret < 0)) {
958 aead_request_free(req);
957 kfree_skb(skb); 959 kfree_skb(skb);
958 return ERR_PTR(ret); 960 return ERR_PTR(ret);
959 } 961 }
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index c2d0ea2fb019..cba5cb3b849a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -204,8 +204,8 @@ static int macvtap_init(void)
204{ 204{
205 int err; 205 int err;
206 206
207 err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); 207 err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
208 208 THIS_MODULE);
209 if (err) 209 if (err)
210 goto out1; 210 goto out1;
211 211
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 15cbcdba618a..4d02b27df044 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -681,9 +681,11 @@ static int m88e1116r_config_init(struct phy_device *phydev)
681 if (err < 0) 681 if (err < 0)
682 return err; 682 return err;
683 683
684 err = m88e1121_config_aneg_rgmii_delays(phydev); 684 if (phy_interface_is_rgmii(phydev)) {
685 if (err < 0) 685 err = m88e1121_config_aneg_rgmii_delays(phydev);
686 return err; 686 if (err < 0)
687 return err;
688 }
687 689
688 err = genphy_soft_reset(phydev); 690 err = genphy_soft_reset(phydev);
689 if (err < 0) 691 if (err < 0)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a404552555d4..e365866600ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,7 +120,7 @@ struct ppp {
120 int n_channels; /* how many channels are attached 54 */ 120 int n_channels; /* how many channels are attached 54 */
121 spinlock_t rlock; /* lock for receive side 58 */ 121 spinlock_t rlock; /* lock for receive side 58 */
122 spinlock_t wlock; /* lock for transmit side 5c */ 122 spinlock_t wlock; /* lock for transmit side 5c */
123 int *xmit_recursion __percpu; /* xmit recursion detect */ 123 int __percpu *xmit_recursion; /* xmit recursion detect */
124 int mru; /* max receive unit 60 */ 124 int mru; /* max receive unit 60 */
125 unsigned int flags; /* control bits 64 */ 125 unsigned int flags; /* control bits 64 */
126 unsigned int xstate; /* transmit state bits 68 */ 126 unsigned int xstate; /* transmit state bits 68 */
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1339 1339
1340static int ppp_dev_init(struct net_device *dev) 1340static int ppp_dev_init(struct net_device *dev)
1341{ 1341{
1342 struct ppp *ppp;
1343
1342 netdev_lockdep_set_classes(dev); 1344 netdev_lockdep_set_classes(dev);
1345
1346 ppp = netdev_priv(dev);
1347 /* Let the netdevice take a reference on the ppp file. This ensures
1348 * that ppp_destroy_interface() won't run before the device gets
1349 * unregistered.
1350 */
1351 atomic_inc(&ppp->file.refcnt);
1352
1343 return 0; 1353 return 0;
1344} 1354}
1345 1355
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
1362 wake_up_interruptible(&ppp->file.rwait); 1372 wake_up_interruptible(&ppp->file.rwait);
1363} 1373}
1364 1374
1375static void ppp_dev_priv_destructor(struct net_device *dev)
1376{
1377 struct ppp *ppp;
1378
1379 ppp = netdev_priv(dev);
1380 if (atomic_dec_and_test(&ppp->file.refcnt))
1381 ppp_destroy_interface(ppp);
1382}
1383
1365static const struct net_device_ops ppp_netdev_ops = { 1384static const struct net_device_ops ppp_netdev_ops = {
1366 .ndo_init = ppp_dev_init, 1385 .ndo_init = ppp_dev_init,
1367 .ndo_uninit = ppp_dev_uninit, 1386 .ndo_uninit = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
1387 dev->tx_queue_len = 3; 1406 dev->tx_queue_len = 3;
1388 dev->type = ARPHRD_PPP; 1407 dev->type = ARPHRD_PPP;
1389 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1408 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1409 dev->priv_destructor = ppp_dev_priv_destructor;
1390 netif_keep_dst(dev); 1410 netif_keep_dst(dev);
1391} 1411}
1392 1412
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 21b71ae947fd..6c0c84c33e1f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
517 &tap_proto, 0); 517 &tap_proto, 0);
518 if (!q) 518 if (!q)
519 goto err; 519 goto err;
520 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
521 sk_free(&q->sk);
522 goto err;
523 }
520 524
521 RCU_INIT_POINTER(q->sock.wq, &q->wq); 525 RCU_INIT_POINTER(q->sock.wq, &q->wq);
522 init_waitqueue_head(&q->wq.wait); 526 init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
540 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) 544 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
541 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 545 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
542 546
543 err = -ENOMEM;
544 if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
545 goto err_array;
546
547 err = tap_set_queue(tap, file, q); 547 err = tap_set_queue(tap, file, q);
548 if (err) 548 if (err) {
549 goto err_queue; 549 /* tap_sock_destruct() will take care of freeing skb_array */
550 goto err_put;
551 }
550 552
551 dev_put(tap->dev); 553 dev_put(tap->dev);
552 554
553 rtnl_unlock(); 555 rtnl_unlock();
554 return err; 556 return err;
555 557
556err_queue: 558err_put:
557 skb_array_cleanup(&q->skb_array);
558err_array:
559 sock_put(&q->sk); 559 sock_put(&q->sk);
560err: 560err:
561 if (tap) 561 if (tap)
@@ -1032,6 +1032,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
1032 case TUNSETSNDBUF: 1032 case TUNSETSNDBUF:
1033 if (get_user(s, sp)) 1033 if (get_user(s, sp))
1034 return -EFAULT; 1034 return -EFAULT;
1035 if (s <= 0)
1036 return -EINVAL;
1035 1037
1036 q->sk.sk_sndbuf = s; 1038 q->sk.sk_sndbuf = s;
1037 return 0; 1039 return 0;
@@ -1249,8 +1251,8 @@ static int tap_list_add(dev_t major, const char *device_name)
1249 return 0; 1251 return 0;
1250} 1252}
1251 1253
1252int tap_create_cdev(struct cdev *tap_cdev, 1254int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1253 dev_t *tap_major, const char *device_name) 1255 const char *device_name, struct module *module)
1254{ 1256{
1255 int err; 1257 int err;
1256 1258
@@ -1259,6 +1261,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
1259 goto out1; 1261 goto out1;
1260 1262
1261 cdev_init(tap_cdev, &tap_fops); 1263 cdev_init(tap_cdev, &tap_fops);
1264 tap_cdev->owner = module;
1262 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); 1265 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1263 if (err) 1266 if (err)
1264 goto out2; 1267 goto out2;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3c9985f29950..42bb820a56c9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1286 buflen += SKB_DATA_ALIGN(len + pad); 1286 buflen += SKB_DATA_ALIGN(len + pad);
1287 rcu_read_unlock(); 1287 rcu_read_unlock();
1288 1288
1289 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1289 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1290 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1290 return ERR_PTR(-ENOMEM); 1291 return ERR_PTR(-ENOMEM);
1291 1292
@@ -1496,11 +1497,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1496 switch (tun->flags & TUN_TYPE_MASK) { 1497 switch (tun->flags & TUN_TYPE_MASK) {
1497 case IFF_TUN: 1498 case IFF_TUN:
1498 if (tun->flags & IFF_NO_PI) { 1499 if (tun->flags & IFF_NO_PI) {
1499 switch (skb->data[0] & 0xf0) { 1500 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1500 case 0x40: 1501
1502 switch (ip_version) {
1503 case 4:
1501 pi.proto = htons(ETH_P_IP); 1504 pi.proto = htons(ETH_P_IP);
1502 break; 1505 break;
1503 case 0x60: 1506 case 6:
1504 pi.proto = htons(ETH_P_IPV6); 1507 pi.proto = htons(ETH_P_IPV6);
1505 break; 1508 break;
1506 default: 1509 default:
@@ -2025,6 +2028,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2025 2028
2026 if (!dev) 2029 if (!dev)
2027 return -ENOMEM; 2030 return -ENOMEM;
2031 err = dev_get_valid_name(net, dev, name);
2032 if (err < 0)
2033 goto err_free_dev;
2028 2034
2029 dev_net_set(dev, net); 2035 dev_net_set(dev, net);
2030 dev->rtnl_link_ops = &tun_link_ops; 2036 dev->rtnl_link_ops = &tun_link_ops;
@@ -2423,6 +2429,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
2423 ret = -EFAULT; 2429 ret = -EFAULT;
2424 break; 2430 break;
2425 } 2431 }
2432 if (sndbuf <= 0) {
2433 ret = -EINVAL;
2434 break;
2435 }
2426 2436
2427 tun->sndbuf = sndbuf; 2437 tun->sndbuf = sndbuf;
2428 tun_set_sndbuf(tun); 2438 tun_set_sndbuf(tun);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8ab281b478f2..3e7a3ac3a362 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
54 desc->bInterfaceProtocol == 3); 54 desc->bInterfaceProtocol == 3);
55} 55}
56 56
57static int is_novatel_rndis(struct usb_interface_descriptor *desc)
58{
59 return (desc->bInterfaceClass == USB_CLASS_MISC &&
60 desc->bInterfaceSubClass == 4 &&
61 desc->bInterfaceProtocol == 1);
62}
63
57#else 64#else
58 65
59#define is_rndis(desc) 0 66#define is_rndis(desc) 0
60#define is_activesync(desc) 0 67#define is_activesync(desc) 0
61#define is_wireless_rndis(desc) 0 68#define is_wireless_rndis(desc) 0
69#define is_novatel_rndis(desc) 0
62 70
63#endif 71#endif
64 72
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
150 */ 158 */
151 rndis = (is_rndis(&intf->cur_altsetting->desc) || 159 rndis = (is_rndis(&intf->cur_altsetting->desc) ||
152 is_activesync(&intf->cur_altsetting->desc) || 160 is_activesync(&intf->cur_altsetting->desc) ||
153 is_wireless_rndis(&intf->cur_altsetting->desc)); 161 is_wireless_rndis(&intf->cur_altsetting->desc) ||
162 is_novatel_rndis(&intf->cur_altsetting->desc));
154 163
155 memset(info, 0, sizeof(*info)); 164 memset(info, 0, sizeof(*info));
156 info->control = intf; 165 info->control = intf;
@@ -547,9 +556,12 @@ static const struct driver_info wwan_info = {
547#define REALTEK_VENDOR_ID 0x0bda 556#define REALTEK_VENDOR_ID 0x0bda
548#define SAMSUNG_VENDOR_ID 0x04e8 557#define SAMSUNG_VENDOR_ID 0x04e8
549#define LENOVO_VENDOR_ID 0x17ef 558#define LENOVO_VENDOR_ID 0x17ef
559#define LINKSYS_VENDOR_ID 0x13b1
550#define NVIDIA_VENDOR_ID 0x0955 560#define NVIDIA_VENDOR_ID 0x0955
551#define HP_VENDOR_ID 0x03f0 561#define HP_VENDOR_ID 0x03f0
552#define MICROSOFT_VENDOR_ID 0x045e 562#define MICROSOFT_VENDOR_ID 0x045e
563#define UBLOX_VENDOR_ID 0x1546
564#define TPLINK_VENDOR_ID 0x2357
553 565
554static const struct usb_device_id products[] = { 566static const struct usb_device_id products[] = {
555/* BLACKLIST !! 567/* BLACKLIST !!
@@ -737,6 +749,15 @@ static const struct usb_device_id products[] = {
737 .driver_info = 0, 749 .driver_info = 0,
738}, 750},
739 751
752#if IS_ENABLED(CONFIG_USB_RTL8152)
753/* Linksys USB3GIGV1 Ethernet Adapter */
754{
755 USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
756 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
757 .driver_info = 0,
758},
759#endif
760
740/* ThinkPad USB-C Dock (based on Realtek RTL8153) */ 761/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
741{ 762{
742 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, 763 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
@@ -793,6 +814,13 @@ static const struct usb_device_id products[] = {
793 .driver_info = 0, 814 .driver_info = 0,
794}, 815},
795 816
817 /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
818{
819 USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
820 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
821 .driver_info = 0,
822},
823
796/* WHITELIST!!! 824/* WHITELIST!!!
797 * 825 *
798 * CDC Ether uses two interfaces, not necessarily consecutive. 826 * CDC Ether uses two interfaces, not necessarily consecutive.
@@ -844,12 +872,30 @@ static const struct usb_device_id products[] = {
844 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 872 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
845 .driver_info = (kernel_ulong_t)&wwan_info, 873 .driver_info = (kernel_ulong_t)&wwan_info,
846}, { 874}, {
875 /* Huawei ME906 and ME909 */
876 USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
877 USB_CDC_SUBCLASS_ETHERNET,
878 USB_CDC_PROTO_NONE),
879 .driver_info = (unsigned long)&wwan_info,
880}, {
847 /* ZTE modules */ 881 /* ZTE modules */
848 USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, 882 USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
849 USB_CDC_SUBCLASS_ETHERNET, 883 USB_CDC_SUBCLASS_ETHERNET,
850 USB_CDC_PROTO_NONE), 884 USB_CDC_PROTO_NONE),
851 .driver_info = (unsigned long)&zte_cdc_info, 885 .driver_info = (unsigned long)&zte_cdc_info,
852}, { 886}, {
887 /* U-blox TOBY-L2 */
888 USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
889 USB_CDC_SUBCLASS_ETHERNET,
890 USB_CDC_PROTO_NONE),
891 .driver_info = (unsigned long)&wwan_info,
892}, {
893 /* U-blox SARA-U2 */
894 USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
895 USB_CDC_SUBCLASS_ETHERNET,
896 USB_CDC_PROTO_NONE),
897 .driver_info = (unsigned long)&wwan_info,
898}, {
853 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, 899 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
854 USB_CDC_PROTO_NONE), 900 USB_CDC_PROTO_NONE),
855 .driver_info = (unsigned long) &cdc_info, 901 .driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ceb78e2ea4f0..d51d9abf7986 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -613,7 +613,9 @@ enum rtl8152_flags {
613#define VENDOR_ID_MICROSOFT 0x045e 613#define VENDOR_ID_MICROSOFT 0x045e
614#define VENDOR_ID_SAMSUNG 0x04e8 614#define VENDOR_ID_SAMSUNG 0x04e8
615#define VENDOR_ID_LENOVO 0x17ef 615#define VENDOR_ID_LENOVO 0x17ef
616#define VENDOR_ID_LINKSYS 0x13b1
616#define VENDOR_ID_NVIDIA 0x0955 617#define VENDOR_ID_NVIDIA 0x0955
618#define VENDOR_ID_TPLINK 0x2357
617 619
618#define MCU_TYPE_PLA 0x0100 620#define MCU_TYPE_PLA 0x0100
619#define MCU_TYPE_USB 0x0000 621#define MCU_TYPE_USB 0x0000
@@ -5316,7 +5318,9 @@ static const struct usb_device_id rtl8152_table[] = {
5316 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 5318 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
5317 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, 5319 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
5318 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, 5320 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
5321 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
5319 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 5322 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
5323 {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
5320 {} 5324 {}
5321}; 5325};
5322 5326
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a151f267aebb..b807c91abe1d 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -632,6 +632,10 @@ static const struct usb_device_id products [] = {
632 /* RNDIS for tethering */ 632 /* RNDIS for tethering */
633 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), 633 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
634 .driver_info = (unsigned long) &rndis_info, 634 .driver_info = (unsigned long) &rndis_info,
635}, {
636 /* Novatel Verizon USB730L */
637 USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
638 .driver_info = (unsigned long) &rndis_info,
635}, 639},
636 { }, // END 640 { }, // END
637}; 641};
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 63f749078a1f..0e3f8ed84660 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -346,7 +346,6 @@ out:
346fail: 346fail:
347 dev_put(dev); 347 dev_put(dev);
348 free_netdev(ndev); 348 free_netdev(ndev);
349 kfree(lapbeth);
350 goto out; 349 goto out;
351} 350}
352 351
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
652 struct device *dev = i2400m_dev(i2400m); 652 struct device *dev = i2400m_dev(i2400m);
653 struct { 653 struct {
654 struct i2400m_bootrom_header cmd; 654 struct i2400m_bootrom_header cmd;
655 u8 cmd_payload[chunk_len]; 655 u8 cmd_payload[];
656 } __packed *buf; 656 } __packed *buf;
657 struct i2400m_bootrom_header ack; 657 struct i2400m_bootrom_header ack;
658 658
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a3f5dc78353f..0aeeb233af78 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -550,6 +550,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
550 return IEEE80211_TKIP_IV_LEN; 550 return IEEE80211_TKIP_IV_LEN;
551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 551 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
552 return IEEE80211_CCMP_HDR_LEN; 552 return IEEE80211_CCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
554 return IEEE80211_CCMP_256_HDR_LEN;
555 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
556 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
557 return IEEE80211_GCMP_HDR_LEN;
553 case HTT_RX_MPDU_ENCRYPT_WEP128: 558 case HTT_RX_MPDU_ENCRYPT_WEP128:
554 case HTT_RX_MPDU_ENCRYPT_WAPI: 559 case HTT_RX_MPDU_ENCRYPT_WAPI:
555 break; 560 break;
@@ -575,6 +580,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
575 return IEEE80211_TKIP_ICV_LEN; 580 return IEEE80211_TKIP_ICV_LEN;
576 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: 581 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
577 return IEEE80211_CCMP_MIC_LEN; 582 return IEEE80211_CCMP_MIC_LEN;
583 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
584 return IEEE80211_CCMP_256_MIC_LEN;
585 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
586 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
587 return IEEE80211_GCMP_MIC_LEN;
578 case HTT_RX_MPDU_ENCRYPT_WEP128: 588 case HTT_RX_MPDU_ENCRYPT_WEP128:
579 case HTT_RX_MPDU_ENCRYPT_WAPI: 589 case HTT_RX_MPDU_ENCRYPT_WAPI:
580 break; 590 break;
@@ -1051,9 +1061,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1051 hdr = (void *)msdu->data; 1061 hdr = (void *)msdu->data;
1052 1062
1053 /* Tail */ 1063 /* Tail */
1054 if (status->flag & RX_FLAG_IV_STRIPPED) 1064 if (status->flag & RX_FLAG_IV_STRIPPED) {
1055 skb_trim(msdu, msdu->len - 1065 skb_trim(msdu, msdu->len -
1056 ath10k_htt_rx_crypto_tail_len(ar, enctype)); 1066 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1067 } else {
1068 /* MIC */
1069 if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
1070 enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1071 skb_trim(msdu, msdu->len - 8);
1072
1073 /* ICV */
1074 if (status->flag & RX_FLAG_ICV_STRIPPED &&
1075 enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
1076 skb_trim(msdu, msdu->len -
1077 ath10k_htt_rx_crypto_tail_len(ar, enctype));
1078 }
1057 1079
1058 /* MMIC */ 1080 /* MMIC */
1059 if ((status->flag & RX_FLAG_MMIC_STRIPPED) && 1081 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
@@ -1075,7 +1097,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1075static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, 1097static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1076 struct sk_buff *msdu, 1098 struct sk_buff *msdu,
1077 struct ieee80211_rx_status *status, 1099 struct ieee80211_rx_status *status,
1078 const u8 first_hdr[64]) 1100 const u8 first_hdr[64],
1101 enum htt_rx_mpdu_encrypt_type enctype)
1079{ 1102{
1080 struct ieee80211_hdr *hdr; 1103 struct ieee80211_hdr *hdr;
1081 struct htt_rx_desc *rxd; 1104 struct htt_rx_desc *rxd;
@@ -1083,6 +1106,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1083 u8 da[ETH_ALEN]; 1106 u8 da[ETH_ALEN];
1084 u8 sa[ETH_ALEN]; 1107 u8 sa[ETH_ALEN];
1085 int l3_pad_bytes; 1108 int l3_pad_bytes;
1109 int bytes_aligned = ar->hw_params.decap_align_bytes;
1086 1110
1087 /* Delivered decapped frame: 1111 /* Delivered decapped frame:
1088 * [nwifi 802.11 header] <-- replaced with 802.11 hdr 1112 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1111,6 +1135,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1111 /* push original 802.11 header */ 1135 /* push original 802.11 header */
1112 hdr = (struct ieee80211_hdr *)first_hdr; 1136 hdr = (struct ieee80211_hdr *)first_hdr;
1113 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1137 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1138
1139 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1140 memcpy(skb_push(msdu,
1141 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1142 (void *)hdr + round_up(hdr_len, bytes_aligned),
1143 ath10k_htt_rx_crypto_param_len(ar, enctype));
1144 }
1145
1114 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1146 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1115 1147
1116 /* original 802.11 header has a different DA and in 1148 /* original 802.11 header has a different DA and in
@@ -1171,6 +1203,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1171 u8 sa[ETH_ALEN]; 1203 u8 sa[ETH_ALEN];
1172 int l3_pad_bytes; 1204 int l3_pad_bytes;
1173 struct htt_rx_desc *rxd; 1205 struct htt_rx_desc *rxd;
1206 int bytes_aligned = ar->hw_params.decap_align_bytes;
1174 1207
1175 /* Delivered decapped frame: 1208 /* Delivered decapped frame:
1176 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc 1209 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1199,6 +1232,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1199 /* push original 802.11 header */ 1232 /* push original 802.11 header */
1200 hdr = (struct ieee80211_hdr *)first_hdr; 1233 hdr = (struct ieee80211_hdr *)first_hdr;
1201 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1234 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1235
1236 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1237 memcpy(skb_push(msdu,
1238 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1239 (void *)hdr + round_up(hdr_len, bytes_aligned),
1240 ath10k_htt_rx_crypto_param_len(ar, enctype));
1241 }
1242
1202 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1243 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1203 1244
1204 /* original 802.11 header has a different DA and in 1245 /* original 802.11 header has a different DA and in
@@ -1212,12 +1253,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1212static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, 1253static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1213 struct sk_buff *msdu, 1254 struct sk_buff *msdu,
1214 struct ieee80211_rx_status *status, 1255 struct ieee80211_rx_status *status,
1215 const u8 first_hdr[64]) 1256 const u8 first_hdr[64],
1257 enum htt_rx_mpdu_encrypt_type enctype)
1216{ 1258{
1217 struct ieee80211_hdr *hdr; 1259 struct ieee80211_hdr *hdr;
1218 size_t hdr_len; 1260 size_t hdr_len;
1219 int l3_pad_bytes; 1261 int l3_pad_bytes;
1220 struct htt_rx_desc *rxd; 1262 struct htt_rx_desc *rxd;
1263 int bytes_aligned = ar->hw_params.decap_align_bytes;
1221 1264
1222 /* Delivered decapped frame: 1265 /* Delivered decapped frame:
1223 * [amsdu header] <-- replaced with 802.11 hdr 1266 * [amsdu header] <-- replaced with 802.11 hdr
@@ -1233,6 +1276,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1233 1276
1234 hdr = (struct ieee80211_hdr *)first_hdr; 1277 hdr = (struct ieee80211_hdr *)first_hdr;
1235 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1278 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1279
1280 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1281 memcpy(skb_push(msdu,
1282 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1283 (void *)hdr + round_up(hdr_len, bytes_aligned),
1284 ath10k_htt_rx_crypto_param_len(ar, enctype));
1285 }
1286
1236 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); 1287 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1237} 1288}
1238 1289
@@ -1267,13 +1318,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1267 is_decrypted); 1318 is_decrypted);
1268 break; 1319 break;
1269 case RX_MSDU_DECAP_NATIVE_WIFI: 1320 case RX_MSDU_DECAP_NATIVE_WIFI:
1270 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); 1321 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1322 enctype);
1271 break; 1323 break;
1272 case RX_MSDU_DECAP_ETHERNET2_DIX: 1324 case RX_MSDU_DECAP_ETHERNET2_DIX:
1273 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); 1325 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1274 break; 1326 break;
1275 case RX_MSDU_DECAP_8023_SNAP_LLC: 1327 case RX_MSDU_DECAP_8023_SNAP_LLC:
1276 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); 1328 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1329 enctype);
1277 break; 1330 break;
1278 } 1331 }
1279} 1332}
@@ -1316,7 +1369,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1316 1369
1317static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, 1370static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1318 struct sk_buff_head *amsdu, 1371 struct sk_buff_head *amsdu,
1319 struct ieee80211_rx_status *status) 1372 struct ieee80211_rx_status *status,
1373 bool fill_crypt_header)
1320{ 1374{
1321 struct sk_buff *first; 1375 struct sk_buff *first;
1322 struct sk_buff *last; 1376 struct sk_buff *last;
@@ -1326,7 +1380,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1326 enum htt_rx_mpdu_encrypt_type enctype; 1380 enum htt_rx_mpdu_encrypt_type enctype;
1327 u8 first_hdr[64]; 1381 u8 first_hdr[64];
1328 u8 *qos; 1382 u8 *qos;
1329 size_t hdr_len;
1330 bool has_fcs_err; 1383 bool has_fcs_err;
1331 bool has_crypto_err; 1384 bool has_crypto_err;
1332 bool has_tkip_err; 1385 bool has_tkip_err;
@@ -1351,15 +1404,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1351 * decapped header. It'll be used for undecapping of each MSDU. 1404 * decapped header. It'll be used for undecapping of each MSDU.
1352 */ 1405 */
1353 hdr = (void *)rxd->rx_hdr_status; 1406 hdr = (void *)rxd->rx_hdr_status;
1354 hdr_len = ieee80211_hdrlen(hdr->frame_control); 1407 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1355 memcpy(first_hdr, hdr, hdr_len);
1356 1408
1357 /* Each A-MSDU subframe will use the original header as the base and be 1409 /* Each A-MSDU subframe will use the original header as the base and be
1358 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. 1410 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1359 */ 1411 */
1360 hdr = (void *)first_hdr; 1412 hdr = (void *)first_hdr;
1361 qos = ieee80211_get_qos_ctl(hdr); 1413
1362 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 1414 if (ieee80211_is_data_qos(hdr->frame_control)) {
1415 qos = ieee80211_get_qos_ctl(hdr);
1416 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1417 }
1363 1418
1364 /* Some attention flags are valid only in the last MSDU. */ 1419 /* Some attention flags are valid only in the last MSDU. */
1365 last = skb_peek_tail(amsdu); 1420 last = skb_peek_tail(amsdu);
@@ -1406,9 +1461,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1406 status->flag |= RX_FLAG_DECRYPTED; 1461 status->flag |= RX_FLAG_DECRYPTED;
1407 1462
1408 if (likely(!is_mgmt)) 1463 if (likely(!is_mgmt))
1409 status->flag |= RX_FLAG_IV_STRIPPED | 1464 status->flag |= RX_FLAG_MMIC_STRIPPED;
1410 RX_FLAG_MMIC_STRIPPED; 1465
1411} 1466 if (fill_crypt_header)
1467 status->flag |= RX_FLAG_MIC_STRIPPED |
1468 RX_FLAG_ICV_STRIPPED;
1469 else
1470 status->flag |= RX_FLAG_IV_STRIPPED;
1471 }
1412 1472
1413 skb_queue_walk(amsdu, msdu) { 1473 skb_queue_walk(amsdu, msdu) {
1414 ath10k_htt_rx_h_csum_offload(msdu); 1474 ath10k_htt_rx_h_csum_offload(msdu);
@@ -1424,6 +1484,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1424 if (is_mgmt) 1484 if (is_mgmt)
1425 continue; 1485 continue;
1426 1486
1487 if (fill_crypt_header)
1488 continue;
1489
1427 hdr = (void *)msdu->data; 1490 hdr = (void *)msdu->data;
1428 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); 1491 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1429 } 1492 }
@@ -1434,6 +1497,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1434 struct ieee80211_rx_status *status) 1497 struct ieee80211_rx_status *status)
1435{ 1498{
1436 struct sk_buff *msdu; 1499 struct sk_buff *msdu;
1500 struct sk_buff *first_subframe;
1501
1502 first_subframe = skb_peek(amsdu);
1437 1503
1438 while ((msdu = __skb_dequeue(amsdu))) { 1504 while ((msdu = __skb_dequeue(amsdu))) {
1439 /* Setup per-MSDU flags */ 1505 /* Setup per-MSDU flags */
@@ -1442,6 +1508,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
1442 else 1508 else
1443 status->flag |= RX_FLAG_AMSDU_MORE; 1509 status->flag |= RX_FLAG_AMSDU_MORE;
1444 1510
1511 if (msdu == first_subframe) {
1512 first_subframe = NULL;
1513 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1514 } else {
1515 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1516 }
1517
1445 ath10k_process_rx(ar, status, msdu); 1518 ath10k_process_rx(ar, status, msdu);
1446 } 1519 }
1447} 1520}
@@ -1584,7 +1657,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1584 ath10k_htt_rx_h_unchain(ar, &amsdu); 1657 ath10k_htt_rx_h_unchain(ar, &amsdu);
1585 1658
1586 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); 1659 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
1587 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); 1660 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
1588 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); 1661 ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
1589 1662
1590 return num_msdus; 1663 return num_msdus;
@@ -1745,8 +1818,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
1745} 1818}
1746 1819
1747static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, 1820static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1748 struct sk_buff_head *amsdu, 1821 struct sk_buff_head *amsdu)
1749 int budget_left)
1750{ 1822{
1751 struct sk_buff *msdu; 1823 struct sk_buff *msdu;
1752 struct htt_rx_desc *rxd; 1824 struct htt_rx_desc *rxd;
@@ -1757,9 +1829,8 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
1757 if (WARN_ON(!skb_queue_empty(amsdu))) 1829 if (WARN_ON(!skb_queue_empty(amsdu)))
1758 return -EINVAL; 1830 return -EINVAL;
1759 1831
1760 while ((msdu = __skb_dequeue(list)) && budget_left) { 1832 while ((msdu = __skb_dequeue(list))) {
1761 __skb_queue_tail(amsdu, msdu); 1833 __skb_queue_tail(amsdu, msdu);
1762 budget_left--;
1763 1834
1764 rxd = (void *)msdu->data - sizeof(*rxd); 1835 rxd = (void *)msdu->data - sizeof(*rxd);
1765 if (rxd->msdu_end.common.info0 & 1836 if (rxd->msdu_end.common.info0 &
@@ -1850,8 +1921,7 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
1850 return num_msdu; 1921 return num_msdu;
1851} 1922}
1852 1923
1853static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, 1924static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
1854 int budget_left)
1855{ 1925{
1856 struct ath10k_htt *htt = &ar->htt; 1926 struct ath10k_htt *htt = &ar->htt;
1857 struct htt_resp *resp = (void *)skb->data; 1927 struct htt_resp *resp = (void *)skb->data;
@@ -1908,9 +1978,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
1908 if (offload) 1978 if (offload)
1909 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); 1979 num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
1910 1980
1911 while (!skb_queue_empty(&list) && budget_left) { 1981 while (!skb_queue_empty(&list)) {
1912 __skb_queue_head_init(&amsdu); 1982 __skb_queue_head_init(&amsdu);
1913 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left); 1983 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
1914 switch (ret) { 1984 switch (ret) {
1915 case 0: 1985 case 0:
1916 /* Note: The in-order indication may report interleaved 1986 /* Note: The in-order indication may report interleaved
@@ -1920,10 +1990,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
1920 * should still give an idea about rx rate to the user. 1990 * should still give an idea about rx rate to the user.
1921 */ 1991 */
1922 num_msdus += skb_queue_len(&amsdu); 1992 num_msdus += skb_queue_len(&amsdu);
1923 budget_left -= skb_queue_len(&amsdu);
1924 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); 1993 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
1925 ath10k_htt_rx_h_filter(ar, &amsdu, status); 1994 ath10k_htt_rx_h_filter(ar, &amsdu, status);
1926 ath10k_htt_rx_h_mpdu(ar, &amsdu, status); 1995 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
1927 ath10k_htt_rx_h_deliver(ar, &amsdu, status); 1996 ath10k_htt_rx_h_deliver(ar, &amsdu, status);
1928 break; 1997 break;
1929 case -EAGAIN: 1998 case -EAGAIN:
@@ -2563,8 +2632,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2563 } 2632 }
2564 2633
2565 spin_lock_bh(&htt->rx_ring.lock); 2634 spin_lock_bh(&htt->rx_ring.lock);
2566 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb, 2635 num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
2567 (budget - quota));
2568 spin_unlock_bh(&htt->rx_ring.lock); 2636 spin_unlock_bh(&htt->rx_ring.lock);
2569 if (num_rx_msdus < 0) { 2637 if (num_rx_msdus < 0) {
2570 resched_napi = true; 2638 resched_napi = true;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bc1633945a56..195dafb98131 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
3396 3396
3397MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3397MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3398 3398
3399#ifdef CONFIG_PM 3399static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3400
3401static int ath10k_pci_pm_suspend(struct device *dev)
3402{ 3400{
3403 struct ath10k *ar = dev_get_drvdata(dev); 3401 struct ath10k *ar = dev_get_drvdata(dev);
3404 int ret; 3402 int ret;
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
3414 return ret; 3412 return ret;
3415} 3413}
3416 3414
3417static int ath10k_pci_pm_resume(struct device *dev) 3415static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3418{ 3416{
3419 struct ath10k *ar = dev_get_drvdata(dev); 3417 struct ath10k *ar = dev_get_drvdata(dev);
3420 int ret; 3418 int ret;
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
3433static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, 3431static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3434 ath10k_pci_pm_suspend, 3432 ath10k_pci_pm_suspend,
3435 ath10k_pci_pm_resume); 3433 ath10k_pci_pm_resume);
3436#endif
3437 3434
3438static struct pci_driver ath10k_pci_driver = { 3435static struct pci_driver ath10k_pci_driver = {
3439 .name = "ath10k_pci", 3436 .name = "ath10k_pci",
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index c1022a1cf855..28da14398951 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -239,6 +239,9 @@ enum htt_rx_mpdu_encrypt_type {
239 HTT_RX_MPDU_ENCRYPT_WAPI = 5, 239 HTT_RX_MPDU_ENCRYPT_WAPI = 5,
240 HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, 240 HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
241 HTT_RX_MPDU_ENCRYPT_NONE = 7, 241 HTT_RX_MPDU_ENCRYPT_NONE = 7,
242 HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
243 HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
244 HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
242}; 245};
243 246
244#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff 247#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 35bd50bcbbd5..b83f01d6e3dd 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -812,7 +812,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
812 if (!sta) { 812 if (!sta) {
813 wcn36xx_err("sta %pM is not found\n", 813 wcn36xx_err("sta %pM is not found\n",
814 bss_conf->bssid); 814 bss_conf->bssid);
815 rcu_read_unlock();
816 goto out; 815 goto out;
817 } 816 }
818 sta_priv = wcn36xx_sta_to_priv(sta); 817 sta_priv = wcn36xx_sta_to_priv(sta);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index aaed4ab503ad..4157c90ad973 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
980 980
981 eth_broadcast_addr(params_le->bssid); 981 eth_broadcast_addr(params_le->bssid);
982 params_le->bss_type = DOT11_BSSTYPE_ANY; 982 params_le->bss_type = DOT11_BSSTYPE_ANY;
983 params_le->scan_type = 0; 983 params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
984 params_le->channel_num = 0; 984 params_le->channel_num = 0;
985 params_le->nprobes = cpu_to_le32(-1); 985 params_le->nprobes = cpu_to_le32(-1);
986 params_le->active_time = cpu_to_le32(-1); 986 params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
988 params_le->home_time = cpu_to_le32(-1); 988 params_le->home_time = cpu_to_le32(-1);
989 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le)); 989 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
990 990
991 /* if request is null exit so it will be all channel broadcast scan */
992 if (!request)
993 return;
994
995 n_ssids = request->n_ssids; 991 n_ssids = request->n_ssids;
996 n_channels = request->n_channels; 992 n_channels = request->n_channels;
993
997 /* Copy channel array if applicable */ 994 /* Copy channel array if applicable */
998 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", 995 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
999 n_channels); 996 n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
1030 ptr += sizeof(ssid_le); 1027 ptr += sizeof(ssid_le);
1031 } 1028 }
1032 } else { 1029 } else {
1033 brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids); 1030 brcmf_dbg(SCAN, "Performing passive scan\n");
1034 if ((request->ssids) && request->ssids->ssid_len) { 1031 params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
1035 brcmf_dbg(SCAN, "SSID %s len=%d\n",
1036 params_le->ssid_le.SSID,
1037 request->ssids->ssid_len);
1038 params_le->ssid_le.SSID_len =
1039 cpu_to_le32(request->ssids->ssid_len);
1040 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
1041 request->ssids->ssid_len);
1042 }
1043 } 1032 }
1044 /* Adding mask to channel numbers */ 1033 /* Adding mask to channel numbers */
1045 params_le->channel_num = 1034 params_le->channel_num =
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3162 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 3151 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
3163 s32 status; 3152 s32 status;
3164 struct brcmf_escan_result_le *escan_result_le; 3153 struct brcmf_escan_result_le *escan_result_le;
3154 u32 escan_buflen;
3165 struct brcmf_bss_info_le *bss_info_le; 3155 struct brcmf_bss_info_le *bss_info_le;
3166 struct brcmf_bss_info_le *bss = NULL; 3156 struct brcmf_bss_info_le *bss = NULL;
3167 u32 bi_length; 3157 u32 bi_length;
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3181 3171
3182 if (status == BRCMF_E_STATUS_PARTIAL) { 3172 if (status == BRCMF_E_STATUS_PARTIAL) {
3183 brcmf_dbg(SCAN, "ESCAN Partial result\n"); 3173 brcmf_dbg(SCAN, "ESCAN Partial result\n");
3174 if (e->datalen < sizeof(*escan_result_le)) {
3175 brcmf_err("invalid event data length\n");
3176 goto exit;
3177 }
3184 escan_result_le = (struct brcmf_escan_result_le *) data; 3178 escan_result_le = (struct brcmf_escan_result_le *) data;
3185 if (!escan_result_le) { 3179 if (!escan_result_le) {
3186 brcmf_err("Invalid escan result (NULL pointer)\n"); 3180 brcmf_err("Invalid escan result (NULL pointer)\n");
3187 goto exit; 3181 goto exit;
3188 } 3182 }
3183 escan_buflen = le32_to_cpu(escan_result_le->buflen);
3184 if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
3185 escan_buflen > e->datalen ||
3186 escan_buflen < sizeof(*escan_result_le)) {
3187 brcmf_err("Invalid escan buffer length: %d\n",
3188 escan_buflen);
3189 goto exit;
3190 }
3189 if (le16_to_cpu(escan_result_le->bss_count) != 1) { 3191 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
3190 brcmf_err("Invalid bss_count %d: ignoring\n", 3192 brcmf_err("Invalid bss_count %d: ignoring\n",
3191 escan_result_le->bss_count); 3193 escan_result_le->bss_count);
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3202 } 3204 }
3203 3205
3204 bi_length = le32_to_cpu(bss_info_le->length); 3206 bi_length = le32_to_cpu(bss_info_le->length);
3205 if (bi_length != (le32_to_cpu(escan_result_le->buflen) - 3207 if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
3206 WL_ESCAN_RESULTS_FIXED_SIZE)) { 3208 brcmf_err("Ignoring invalid bss_info length: %d\n",
3207 brcmf_err("Invalid bss_info length %d: ignoring\n",
3208 bi_length); 3209 bi_length);
3209 goto exit; 3210 goto exit;
3210 } 3211 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 4eb1e1ce9ace..ef72baf6dd96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
429 if (code != BRCMF_E_IF && !fweh->evt_handler[code]) 429 if (code != BRCMF_E_IF && !fweh->evt_handler[code])
430 return; 430 return;
431 431
432 if (datalen > BRCMF_DCMD_MAXLEN) 432 if (datalen > BRCMF_DCMD_MAXLEN ||
433 datalen + sizeof(*event_packet) > packet_len)
433 return; 434 return;
434 435
435 if (in_interrupt()) 436 if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 8391989b1882..e0d22fedb2b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
45#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff 45#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
46#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 46#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
47 47
48/* scan type definitions */
49#define BRCMF_SCANTYPE_DEFAULT 0xFF
50#define BRCMF_SCANTYPE_ACTIVE 0
51#define BRCMF_SCANTYPE_PASSIVE 1
52
48#define BRCMF_WSEC_MAX_PSK_LEN 32 53#define BRCMF_WSEC_MAX_PSK_LEN 32
49#define BRCMF_WSEC_PASSPHRASE BIT(0) 54#define BRCMF_WSEC_PASSPHRASE BIT(0)
50 55
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2fe96eb..ef685465f80a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
14764} 14764}
14765 14765
14766static void 14766static void
14767wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys, 14767wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
14768 u8 len) 14768 const u8 *dlys, u8 len)
14769{ 14769{
14770 u32 t1_offset, t2_offset; 14770 u32 t1_offset, t2_offset;
14771 u8 ctr; 14771 u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
15240static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) 15240static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15241{ 15241{
15242 u16 currband; 15242 u16 currband;
15243 s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 }; 15243 static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
15244 s8 *lna1_gain_db = NULL; 15244 const s8 *lna1_gain_db = NULL;
15245 s8 *lna1_gain_db_2 = NULL; 15245 const s8 *lna1_gain_db_2 = NULL;
15246 s8 *lna2_gain_db = NULL; 15246 const s8 *lna2_gain_db = NULL;
15247 s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 }; 15247 static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
15248 s8 *tia_gain_db; 15248 const s8 *tia_gain_db;
15249 s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 }; 15249 static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
15250 s8 *tia_gainbits; 15250 const s8 *tia_gainbits;
15251 u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f }; 15251 static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
15252 u16 *rfseq_init_gain; 15252 const u16 *rfseq_init_gain;
15253 u16 init_gaincode; 15253 u16 init_gaincode;
15254 u16 clip1hi_gaincode; 15254 u16 clip1hi_gaincode;
15255 u16 clip1md_gaincode = 0; 15255 u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15310 15310
15311 if ((freq <= 5080) || (freq == 5825)) { 15311 if ((freq <= 5080) || (freq == 5825)) {
15312 15312
15313 s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 }; 15313 static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
15314 s8 lna1A_gain_db_2_rev7[] = { 15314 static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
15315 11, 17, 22, 25}; 15315 static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15316 s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15317 15316
15318 crsminu_th = 0x3e; 15317 crsminu_th = 0x3e;
15319 lna1_gain_db = lna1A_gain_db_rev7; 15318 lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15321 lna2_gain_db = lna2A_gain_db_rev7; 15320 lna2_gain_db = lna2A_gain_db_rev7;
15322 } else if ((freq >= 5500) && (freq <= 5700)) { 15321 } else if ((freq >= 5500) && (freq <= 5700)) {
15323 15322
15324 s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 }; 15323 static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
15325 s8 lna1A_gain_db_2_rev7[] = { 15324 static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
15326 12, 18, 22, 26}; 15325 static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
15327 s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
15328 15326
15329 crsminu_th = 0x45; 15327 crsminu_th = 0x45;
15330 clip1md_gaincode_B = 0x14; 15328 clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
15335 lna2_gain_db = lna2A_gain_db_rev7; 15333 lna2_gain_db = lna2A_gain_db_rev7;
15336 } else { 15334 } else {
15337 15335
15338 s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 }; 15336 static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
15339 s8 lna1A_gain_db_2_rev7[] = { 15337 static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
15340 12, 18, 22, 26}; 15338 static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15341 s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
15342 15339
15343 crsminu_th = 0x41; 15340 crsminu_th = 0x41;
15344 lna1_gain_db = lna1A_gain_db_rev7; 15341 lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
15450 NPHY_RFSEQ_CMD_CLR_HIQ_DIS, 15447 NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
15451 NPHY_RFSEQ_CMD_SET_HPF_BW 15448 NPHY_RFSEQ_CMD_SET_HPF_BW
15452 }; 15449 };
15453 u8 rfseq_updategainu_dlys[] = { 10, 30, 1 }; 15450 static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
15454 s8 lna1G_gain_db[] = { 7, 11, 16, 23 }; 15451 static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
15455 s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 }; 15452 static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
15456 s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 }; 15453 static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
15457 s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 }; 15454 static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
15458 s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 }; 15455 static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
15459 s8 lna1A_gain_db[] = { 7, 11, 17, 23 }; 15456 static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
15460 s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 }; 15457 static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
15461 s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 }; 15458 static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
15462 s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 }; 15459 static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
15463 s8 *lna1_gain_db = NULL; 15460 const s8 *lna1_gain_db = NULL;
15464 s8 lna2G_gain_db[] = { -5, 6, 10, 14 }; 15461 static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
15465 s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 }; 15462 static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
15466 s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 }; 15463 static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
15467 s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 }; 15464 static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
15468 s8 lna2A_gain_db[] = { -6, 2, 6, 10 }; 15465 static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
15469 s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 }; 15466 static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
15470 s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 }; 15467 static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
15471 s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 }; 15468 static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
15472 s8 *lna2_gain_db = NULL; 15469 const s8 *lna2_gain_db = NULL;
15473 s8 tiaG_gain_db[] = { 15470 static const s8 tiaG_gain_db[] = {
15474 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A }; 15471 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
15475 s8 tiaA_gain_db[] = { 15472 static const s8 tiaA_gain_db[] = {
15476 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }; 15473 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
15477 s8 tiaA_gain_db_rev4[] = { 15474 static const s8 tiaA_gain_db_rev4[] = {
15478 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15475 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15479 s8 tiaA_gain_db_rev5[] = { 15476 static const s8 tiaA_gain_db_rev5[] = {
15480 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15477 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15481 s8 tiaA_gain_db_rev6[] = { 15478 static const s8 tiaA_gain_db_rev6[] = {
15482 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; 15479 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
15483 s8 *tia_gain_db; 15480 const s8 *tia_gain_db;
15484 s8 tiaG_gainbits[] = { 15481 static const s8 tiaG_gainbits[] = {
15485 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; 15482 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
15486 s8 tiaA_gainbits[] = { 15483 static const s8 tiaA_gainbits[] = {
15487 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }; 15484 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
15488 s8 tiaA_gainbits_rev4[] = { 15485 static const s8 tiaA_gainbits_rev4[] = {
15489 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15486 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15490 s8 tiaA_gainbits_rev5[] = { 15487 static const s8 tiaA_gainbits_rev5[] = {
15491 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15488 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15492 s8 tiaA_gainbits_rev6[] = { 15489 static const s8 tiaA_gainbits_rev6[] = {
15493 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; 15490 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
15494 s8 *tia_gainbits; 15491 const s8 *tia_gainbits;
15495 s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 }; 15492 static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
15496 s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 }; 15493 static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
15497 u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f }; 15494 static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
15498 u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f }; 15495 static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
15499 u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f }; 15496 static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
15500 u16 rfseqG_init_gain_rev5_elna[] = { 15497 static const u16 rfseqG_init_gain_rev5_elna[] = {
15501 0x013f, 0x013f, 0x013f, 0x013f }; 15498 0x013f, 0x013f, 0x013f, 0x013f };
15502 u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f }; 15499 static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
15503 u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f }; 15500 static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
15504 u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f }; 15501 static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
15505 u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f }; 15502 static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
15506 u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f }; 15503 static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
15507 u16 rfseqA_init_gain_rev4_elna[] = { 15504 static const u16 rfseqA_init_gain_rev4_elna[] = {
15508 0x314f, 0x314f, 0x314f, 0x314f }; 15505 0x314f, 0x314f, 0x314f, 0x314f };
15509 u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f }; 15506 static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
15510 u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f }; 15507 static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
15511 u16 *rfseq_init_gain; 15508 const u16 *rfseq_init_gain;
15512 u16 initG_gaincode = 0x627e; 15509 u16 initG_gaincode = 0x627e;
15513 u16 initG_gaincode_rev4 = 0x527e; 15510 u16 initG_gaincode_rev4 = 0x527e;
15514 u16 initG_gaincode_rev5 = 0x427e; 15511 u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
15538 u16 clip1mdA_gaincode_rev6 = 0x2084; 15535 u16 clip1mdA_gaincode_rev6 = 0x2084;
15539 u16 clip1md_gaincode = 0; 15536 u16 clip1md_gaincode = 0;
15540 u16 clip1loG_gaincode = 0x0074; 15537 u16 clip1loG_gaincode = 0x0074;
15541 u16 clip1loG_gaincode_rev5[] = { 15538 static const u16 clip1loG_gaincode_rev5[] = {
15542 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c 15539 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
15543 }; 15540 };
15544 u16 clip1loG_gaincode_rev6[] = { 15541 static const u16 clip1loG_gaincode_rev6[] = {
15545 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e 15542 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
15546 }; 15543 };
15547 u16 clip1loG_gaincode_rev6_224B0 = 0x1074; 15544 u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
16066 16063
16067static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) 16064static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16068{ 16065{
16069 u8 rfseq_rx2tx_events[] = { 16066 static const u8 rfseq_rx2tx_events[] = {
16070 NPHY_RFSEQ_CMD_NOP, 16067 NPHY_RFSEQ_CMD_NOP,
16071 NPHY_RFSEQ_CMD_RXG_FBW, 16068 NPHY_RFSEQ_CMD_RXG_FBW,
16072 NPHY_RFSEQ_CMD_TR_SWITCH, 16069 NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16076 NPHY_RFSEQ_CMD_EXT_PA 16073 NPHY_RFSEQ_CMD_EXT_PA
16077 }; 16074 };
16078 u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 }; 16075 u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
16079 u8 rfseq_tx2rx_events[] = { 16076 static const u8 rfseq_tx2rx_events[] = {
16080 NPHY_RFSEQ_CMD_NOP, 16077 NPHY_RFSEQ_CMD_NOP,
16081 NPHY_RFSEQ_CMD_EXT_PA, 16078 NPHY_RFSEQ_CMD_EXT_PA,
16082 NPHY_RFSEQ_CMD_TX_GAIN, 16079 NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16085 NPHY_RFSEQ_CMD_RXG_FBW, 16082 NPHY_RFSEQ_CMD_RXG_FBW,
16086 NPHY_RFSEQ_CMD_CLR_HIQ_DIS 16083 NPHY_RFSEQ_CMD_CLR_HIQ_DIS
16087 }; 16084 };
16088 u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 }; 16085 static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
16089 u8 rfseq_tx2rx_events_rev3[] = { 16086 static const u8 rfseq_tx2rx_events_rev3[] = {
16090 NPHY_REV3_RFSEQ_CMD_EXT_PA, 16087 NPHY_REV3_RFSEQ_CMD_EXT_PA,
16091 NPHY_REV3_RFSEQ_CMD_INT_PA_PU, 16088 NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
16092 NPHY_REV3_RFSEQ_CMD_TX_GAIN, 16089 NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16096 NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS, 16093 NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
16097 NPHY_REV3_RFSEQ_CMD_END 16094 NPHY_REV3_RFSEQ_CMD_END
16098 }; 16095 };
16099 u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 }; 16096 static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
16100 u8 rfseq_rx2tx_events_rev3[] = { 16097 u8 rfseq_rx2tx_events_rev3[] = {
16101 NPHY_REV3_RFSEQ_CMD_NOP, 16098 NPHY_REV3_RFSEQ_CMD_NOP,
16102 NPHY_REV3_RFSEQ_CMD_RXG_FBW, 16099 NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16110 }; 16107 };
16111 u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; 16108 u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
16112 16109
16113 u8 rfseq_rx2tx_events_rev3_ipa[] = { 16110 static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
16114 NPHY_REV3_RFSEQ_CMD_NOP, 16111 NPHY_REV3_RFSEQ_CMD_NOP,
16115 NPHY_REV3_RFSEQ_CMD_RXG_FBW, 16112 NPHY_REV3_RFSEQ_CMD_RXG_FBW,
16116 NPHY_REV3_RFSEQ_CMD_TR_SWITCH, 16113 NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16121 NPHY_REV3_RFSEQ_CMD_INT_PA_PU, 16118 NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
16122 NPHY_REV3_RFSEQ_CMD_END 16119 NPHY_REV3_RFSEQ_CMD_END
16123 }; 16120 };
16124 u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; 16121 static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
16125 u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f }; 16122 static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
16126 16123
16127 s16 alpha0, alpha1, alpha2; 16124 s16 alpha0, alpha1, alpha2;
16128 s16 beta0, beta1, beta2; 16125 s16 beta0, beta1, beta2;
16129 u32 leg_data_weights, ht_data_weights, nss1_data_weights, 16126 u32 leg_data_weights, ht_data_weights, nss1_data_weights,
16130 stbc_data_weights; 16127 stbc_data_weights;
16131 u8 chan_freq_range = 0; 16128 u8 chan_freq_range = 0;
16132 u16 dac_control = 0x0002; 16129 static const u16 dac_control = 0x0002;
16133 u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 }; 16130 u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
16134 u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 }; 16131 u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
16135 u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 }; 16132 u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16139 u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 }; 16136 u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
16140 u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 }; 16137 u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
16141 u16 *aux_adc_gain; 16138 u16 *aux_adc_gain;
16142 u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 }; 16139 static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
16143 u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 }; 16140 static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
16144 s32 min_nvar_val = 0x18d; 16141 s32 min_nvar_val = 0x18d;
16145 s32 min_nvar_offset_6mbps = 20; 16142 s32 min_nvar_offset_6mbps = 20;
16146 u8 pdetrange; 16143 u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16151 u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77; 16148 u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
16152 u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77; 16149 u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
16153 u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77; 16150 u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
16154 u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 }; 16151 static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
16155 u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; 16152 static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
16156 u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; 16153 static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
16157 u16 ipalvlshift_3p3_war_en = 0; 16154 u16 ipalvlshift_3p3_war_en = 0;
16158 u16 rccal_bcap_val, rccal_scap_val; 16155 u16 rccal_bcap_val, rccal_scap_val;
16159 u16 rccal_tx20_11b_bcap = 0; 16156 u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
24291 u16 bbmult; 24288 u16 bbmult;
24292 u16 tblentry; 24289 u16 tblentry;
24293 24290
24294 struct nphy_txiqcal_ladder ladder_lo[] = { 24291 static const struct nphy_txiqcal_ladder ladder_lo[] = {
24295 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, 24292 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
24296 {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5}, 24293 {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
24297 {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7} 24294 {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
24298 }; 24295 };
24299 24296
24300 struct nphy_txiqcal_ladder ladder_iq[] = { 24297 static const struct nphy_txiqcal_ladder ladder_iq[] = {
24301 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, 24298 {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
24302 {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1}, 24299 {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
24303 {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7} 24300 {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
25773 u16 cal_gain[2]; 25770 u16 cal_gain[2];
25774 struct nphy_iqcal_params cal_params[2]; 25771 struct nphy_iqcal_params cal_params[2];
25775 u32 tbl_len; 25772 u32 tbl_len;
25776 void *tbl_ptr; 25773 const void *tbl_ptr;
25777 bool ladder_updated[2]; 25774 bool ladder_updated[2];
25778 u8 mphase_cal_lastphase = 0; 25775 u8 mphase_cal_lastphase = 0;
25779 int bcmerror = 0; 25776 int bcmerror = 0;
25780 bool phyhang_avoid_state = false; 25777 bool phyhang_avoid_state = false;
25781 25778
25782 u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { 25779 static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
25783 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901, 25780 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
25784 0x1902, 25781 0x1902,
25785 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607, 25782 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
25786 0x6407 25783 0x6407
25787 }; 25784 };
25788 25785
25789 u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { 25786 static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
25790 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400, 25787 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
25791 0x3200, 25788 0x3200,
25792 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406, 25789 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
25793 0x6407 25790 0x6407
25794 }; 25791 };
25795 25792
25796 u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { 25793 static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
25797 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201, 25794 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
25798 0x1202, 25795 0x1202,
25799 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207, 25796 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
25800 0x4707 25797 0x4707
25801 }; 25798 };
25802 25799
25803 u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { 25800 static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
25804 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900, 25801 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
25805 0x2300, 25802 0x2300,
25806 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706, 25803 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
25807 0x4707 25804 0x4707
25808 }; 25805 };
25809 25806
25810 u16 tbl_tx_iqlo_cal_startcoefs[] = { 25807 static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
25811 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25808 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25812 0x0000 25809 0x0000
25813 }; 25810 };
25814 25811
25815 u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { 25812 static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
25816 0x8123, 0x8264, 0x8086, 0x8245, 0x8056, 25813 0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
25817 0x9123, 0x9264, 0x9086, 0x9245, 0x9056 25814 0x9123, 0x9264, 0x9086, 0x9245, 0x9056
25818 }; 25815 };
25819 25816
25820 u16 tbl_tx_iqlo_cal_cmds_recal[] = { 25817 static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
25821 0x8101, 0x8253, 0x8053, 0x8234, 0x8034, 25818 0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
25822 0x9101, 0x9253, 0x9053, 0x9234, 0x9034 25819 0x9101, 0x9253, 0x9053, 0x9234, 0x9034
25823 }; 25820 };
25824 25821
25825 u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = { 25822 static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
25826 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25823 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25827 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 25824 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
25828 0x0000 25825 0x0000
25829 }; 25826 };
25830 25827
25831 u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { 25828 static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
25832 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234, 25829 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
25833 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234 25830 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
25834 }; 25831 };
25835 25832
25836 u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { 25833 static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
25837 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223, 25834 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
25838 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223 25835 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
25839 }; 25836 };
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 45e2efc70d19..ce741beec1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
309 .nvm_calib_ver = IWL3168_TX_POWER_VERSION, 309 .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
310 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, 310 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
311 .dccm_len = IWL7265_DCCM_LEN, 311 .dccm_len = IWL7265_DCCM_LEN,
312 .nvm_type = IWL_NVM_SDP,
312}; 313};
313 314
314const struct iwl_cfg iwl7265_2ac_cfg = { 315const struct iwl_cfg iwl7265_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 2e6c52664cee..c2a5936ccede 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
164 .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ 164 .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
165 .thermal_params = &iwl8000_tt_params, \ 165 .thermal_params = &iwl8000_tt_params, \
166 .apmg_not_supported = true, \ 166 .apmg_not_supported = true, \
167 .ext_nvm = true, \ 167 .nvm_type = IWL_NVM_EXT, \
168 .dbgc_supported = true 168 .dbgc_supported = true
169 169
170#define IWL_DEVICE_8000 \ 170#define IWL_DEVICE_8000 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 2babe0a1f18b..e8b5ff42f5a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
148 .vht_mu_mimo_supported = true, \ 148 .vht_mu_mimo_supported = true, \
149 .mac_addr_from_csr = true, \ 149 .mac_addr_from_csr = true, \
150 .rf_id = true, \ 150 .rf_id = true, \
151 .ext_nvm = true, \ 151 .nvm_type = IWL_NVM_EXT, \
152 .dbgc_supported = true 152 .dbgc_supported = true
153 153
154const struct iwl_cfg iwl9160_2ac_cfg = { 154const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index 76ba1f8bc72f..a440140ed8dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
133 .use_tfh = true, \ 133 .use_tfh = true, \
134 .rf_id = true, \ 134 .rf_id = true, \
135 .gen2 = true, \ 135 .gen2 = true, \
136 .ext_nvm = true, \ 136 .nvm_type = IWL_NVM_EXT, \
137 .dbgc_supported = true 137 .dbgc_supported = true
138 138
139const struct iwl_cfg iwla000_2ac_cfg_hr = { 139const struct iwl_cfg iwla000_2ac_cfg_hr = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 00bc7a25dece..3fd07bc80f54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
108 * @NVM_SECTION_TYPE_REGULATORY: regulatory section 108 * @NVM_SECTION_TYPE_REGULATORY: regulatory section
109 * @NVM_SECTION_TYPE_CALIBRATION: calibration section 109 * @NVM_SECTION_TYPE_CALIBRATION: calibration section
110 * @NVM_SECTION_TYPE_PRODUCTION: production section 110 * @NVM_SECTION_TYPE_PRODUCTION: production section
111 * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
111 * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section 112 * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
112 * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section 113 * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
113 * @NVM_MAX_NUM_SECTIONS: number of sections 114 * @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
117 NVM_SECTION_TYPE_REGULATORY = 3, 118 NVM_SECTION_TYPE_REGULATORY = 3,
118 NVM_SECTION_TYPE_CALIBRATION = 4, 119 NVM_SECTION_TYPE_CALIBRATION = 4,
119 NVM_SECTION_TYPE_PRODUCTION = 5, 120 NVM_SECTION_TYPE_PRODUCTION = 5,
121 NVM_SECTION_TYPE_REGULATORY_SDP = 8,
120 NVM_SECTION_TYPE_MAC_OVERRIDE = 11, 122 NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
121 NVM_SECTION_TYPE_PHY_SKU = 12, 123 NVM_SECTION_TYPE_PHY_SKU = 12,
122 NVM_MAX_NUM_SECTIONS = 13, 124 NVM_MAX_NUM_SECTIONS = 13,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6afc7a799892..f5dd7d83cd0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1086 1086
1087 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1087 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1088 /* stop recording */ 1088 /* stop recording */
1089 iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1089 iwl_fw_dbg_stop_recording(fwrt);
1090 1090
1091 iwl_fw_error_dump(fwrt); 1091 iwl_fw_error_dump(fwrt);
1092 1092
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1104 u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); 1104 u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
1105 u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); 1105 u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
1106 1106
1107 /* stop recording */ 1107 iwl_fw_dbg_stop_recording(fwrt);
1108 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
1109 udelay(100);
1110 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
1111 /* wait before we collect the data till the DBGC stop */ 1108 /* wait before we collect the data till the DBGC stop */
1112 udelay(500); 1109 udelay(500);
1113 1110
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 0f810ea89d31..9c889a32fe24 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -68,6 +68,8 @@
68#include <linux/workqueue.h> 68#include <linux/workqueue.h>
69#include <net/cfg80211.h> 69#include <net/cfg80211.h>
70#include "runtime.h" 70#include "runtime.h"
71#include "iwl-prph.h"
72#include "iwl-io.h"
71#include "file.h" 73#include "file.h"
72#include "error-dump.h" 74#include "error-dump.h"
73 75
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
194 iwl_fw_dbg_get_trigger((fwrt)->fw,\ 196 iwl_fw_dbg_get_trigger((fwrt)->fw,\
195 (trig))) 197 (trig)))
196 198
199static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
200{
201 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
202 iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
203 } else {
204 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
205 udelay(100);
206 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
207 }
208}
209
197static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) 210static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
198{ 211{
212 iwl_fw_dbg_stop_recording(fwrt);
213
199 fwrt->dump.conf = FW_DBG_INVALID; 214 fwrt->dump.conf = FW_DBG_INVALID;
200} 215}
201 216
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e057b539d5b..71cb1ecde0f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -108,6 +108,18 @@ enum iwl_led_mode {
108 IWL_LED_DISABLE, 108 IWL_LED_DISABLE,
109}; 109};
110 110
111/**
112 * enum iwl_nvm_type - nvm formats
113 * @IWL_NVM: the regular format
114 * @IWL_NVM_EXT: extended NVM format
115 * @IWL_NVM_SDP: NVM format used by 3168 series
116 */
117enum iwl_nvm_type {
118 IWL_NVM,
119 IWL_NVM_EXT,
120 IWL_NVM_SDP,
121};
122
111/* 123/*
112 * This is the threshold value of plcp error rate per 100mSecs. It is 124 * This is the threshold value of plcp error rate per 100mSecs. It is
113 * used to set and check for the validity of plcp_delta. 125 * used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
320 * @integrated: discrete or integrated 332 * @integrated: discrete or integrated
321 * @gen2: a000 and on transport operation 333 * @gen2: a000 and on transport operation
322 * @cdb: CDB support 334 * @cdb: CDB support
323 * @ext_nvm: extended NVM format 335 * @nvm_type: see &enum iwl_nvm_type
324 * 336 *
325 * We enable the driver to be backward compatible wrt. hardware features. 337 * We enable the driver to be backward compatible wrt. hardware features.
326 * API differences in uCode shouldn't be handled here but through TLVs 338 * API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
342 const struct iwl_tt_params *thermal_params; 354 const struct iwl_tt_params *thermal_params;
343 enum iwl_device_family device_family; 355 enum iwl_device_family device_family;
344 enum iwl_led_mode led_mode; 356 enum iwl_led_mode led_mode;
357 enum iwl_nvm_type nvm_type;
345 u32 max_data_size; 358 u32 max_data_size;
346 u32 max_inst_size; 359 u32 max_inst_size;
347 netdev_features_t features; 360 netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
369 use_tfh:1, 382 use_tfh:1,
370 gen2:1, 383 gen2:1,
371 cdb:1, 384 cdb:1,
372 ext_nvm:1,
373 dbgc_supported:1; 385 dbgc_supported:1;
374 u8 valid_tx_ant; 386 u8 valid_tx_ant;
375 u8 valid_rx_ant; 387 u8 valid_rx_ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3014beef4873..c3a5d8ccc95e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -77,7 +77,7 @@
77#include "iwl-csr.h" 77#include "iwl-csr.h"
78 78
79/* NVM offsets (in words) definitions */ 79/* NVM offsets (in words) definitions */
80enum wkp_nvm_offsets { 80enum nvm_offsets {
81 /* NVM HW-Section offset (in words) definitions */ 81 /* NVM HW-Section offset (in words) definitions */
82 SUBSYSTEM_ID = 0x0A, 82 SUBSYSTEM_ID = 0x0A,
83 HW_ADDR = 0x15, 83 HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
92 92
93 /* NVM calibration section offset (in words) definitions */ 93 /* NVM calibration section offset (in words) definitions */
94 NVM_CALIB_SECTION = 0x2B8, 94 NVM_CALIB_SECTION = 0x2B8,
95 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION 95 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
96
97 /* NVM REGULATORY -Section offset (in words) definitions */
98 NVM_CHANNELS_SDP = 0,
96}; 99};
97 100
98enum ext_nvm_offsets { 101enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
206 NVM_CHANNEL_DC_HIGH = BIT(12), 209 NVM_CHANNEL_DC_HIGH = BIT(12),
207}; 210};
208 211
212static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
213 int chan, u16 flags)
214{
209#define CHECK_AND_PRINT_I(x) \ 215#define CHECK_AND_PRINT_I(x) \
210 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") 216 ((flags & NVM_CHANNEL_##x) ? " " #x : "")
217
218 if (!(flags & NVM_CHANNEL_VALID)) {
219 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
220 chan, flags);
221 return;
222 }
223
224 /* Note: already can print up to 101 characters, 110 is the limit! */
225 IWL_DEBUG_DEV(dev, level,
226 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
227 chan, flags,
228 CHECK_AND_PRINT_I(VALID),
229 CHECK_AND_PRINT_I(IBSS),
230 CHECK_AND_PRINT_I(ACTIVE),
231 CHECK_AND_PRINT_I(RADAR),
232 CHECK_AND_PRINT_I(INDOOR_ONLY),
233 CHECK_AND_PRINT_I(GO_CONCURRENT),
234 CHECK_AND_PRINT_I(UNIFORM),
235 CHECK_AND_PRINT_I(20MHZ),
236 CHECK_AND_PRINT_I(40MHZ),
237 CHECK_AND_PRINT_I(80MHZ),
238 CHECK_AND_PRINT_I(160MHZ),
239 CHECK_AND_PRINT_I(DC_HIGH));
240#undef CHECK_AND_PRINT_I
241}
211 242
212static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, 243static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
213 u16 nvm_flags, const struct iwl_cfg *cfg) 244 u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
215 u32 flags = IEEE80211_CHAN_NO_HT40; 246 u32 flags = IEEE80211_CHAN_NO_HT40;
216 u32 last_5ghz_ht = LAST_5GHZ_HT; 247 u32 last_5ghz_ht = LAST_5GHZ_HT;
217 248
218 if (cfg->ext_nvm) 249 if (cfg->nvm_type == IWL_NVM_EXT)
219 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 250 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
220 251
221 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { 252 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
268 int num_of_ch, num_2ghz_channels; 299 int num_of_ch, num_2ghz_channels;
269 const u8 *nvm_chan; 300 const u8 *nvm_chan;
270 301
271 if (!cfg->ext_nvm) { 302 if (cfg->nvm_type != IWL_NVM_EXT) {
272 num_of_ch = IWL_NUM_CHANNELS; 303 num_of_ch = IWL_NUM_CHANNELS;
273 nvm_chan = &iwl_nvm_channels[0]; 304 nvm_chan = &iwl_nvm_channels[0];
274 num_2ghz_channels = NUM_2GHZ_CHANNELS; 305 num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
302 * supported, hence we still want to add them to 333 * supported, hence we still want to add them to
303 * the list of supported channels to cfg80211. 334 * the list of supported channels to cfg80211.
304 */ 335 */
305 IWL_DEBUG_EEPROM(dev, 336 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
306 "Ch. %d Flags %x [%sGHz] - No traffic\n", 337 nvm_chan[ch_idx], ch_flags);
307 nvm_chan[ch_idx],
308 ch_flags,
309 (ch_idx >= num_2ghz_channels) ?
310 "5.2" : "2.4");
311 continue; 338 continue;
312 } 339 }
313 340
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
337 else 364 else
338 channel->flags = 0; 365 channel->flags = 0;
339 366
340 IWL_DEBUG_EEPROM(dev, 367 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
341 "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", 368 channel->hw_value, ch_flags);
342 channel->hw_value, 369 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
343 is_5ghz ? "5.2" : "2.4", 370 channel->hw_value, channel->max_power);
344 ch_flags,
345 CHECK_AND_PRINT_I(VALID),
346 CHECK_AND_PRINT_I(IBSS),
347 CHECK_AND_PRINT_I(ACTIVE),
348 CHECK_AND_PRINT_I(RADAR),
349 CHECK_AND_PRINT_I(INDOOR_ONLY),
350 CHECK_AND_PRINT_I(GO_CONCURRENT),
351 CHECK_AND_PRINT_I(UNIFORM),
352 CHECK_AND_PRINT_I(20MHZ),
353 CHECK_AND_PRINT_I(40MHZ),
354 CHECK_AND_PRINT_I(80MHZ),
355 CHECK_AND_PRINT_I(160MHZ),
356 CHECK_AND_PRINT_I(DC_HIGH),
357 channel->max_power,
358 ((ch_flags & NVM_CHANNEL_IBSS) &&
359 !(ch_flags & NVM_CHANNEL_RADAR))
360 ? "" : "not ");
361 } 371 }
362 372
363 return n_channels; 373 return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
484static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 494static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
485 const __le16 *phy_sku) 495 const __le16 *phy_sku)
486{ 496{
487 if (!cfg->ext_nvm) 497 if (cfg->nvm_type != IWL_NVM_EXT)
488 return le16_to_cpup(nvm_sw + SKU); 498 return le16_to_cpup(nvm_sw + SKU);
489 499
490 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); 500 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
492 502
493static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 503static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
494{ 504{
495 if (!cfg->ext_nvm) 505 if (cfg->nvm_type != IWL_NVM_EXT)
496 return le16_to_cpup(nvm_sw + NVM_VERSION); 506 return le16_to_cpup(nvm_sw + NVM_VERSION);
497 else 507 else
498 return le32_to_cpup((__le32 *)(nvm_sw + 508 return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
502static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 512static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
503 const __le16 *phy_sku) 513 const __le16 *phy_sku)
504{ 514{
505 if (!cfg->ext_nvm) 515 if (cfg->nvm_type != IWL_NVM_EXT)
506 return le16_to_cpup(nvm_sw + RADIO_CFG); 516 return le16_to_cpup(nvm_sw + RADIO_CFG);
507 517
508 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); 518 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
513{ 523{
514 int n_hw_addr; 524 int n_hw_addr;
515 525
516 if (!cfg->ext_nvm) 526 if (cfg->nvm_type != IWL_NVM_EXT)
517 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 527 return le16_to_cpup(nvm_sw + N_HW_ADDRS);
518 528
519 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); 529 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
525 struct iwl_nvm_data *data, 535 struct iwl_nvm_data *data,
526 u32 radio_cfg) 536 u32 radio_cfg)
527{ 537{
528 if (!cfg->ext_nvm) { 538 if (cfg->nvm_type != IWL_NVM_EXT) {
529 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 539 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
530 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); 540 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
531 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); 541 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
634{ 644{
635 if (cfg->mac_addr_from_csr) { 645 if (cfg->mac_addr_from_csr) {
636 iwl_set_hw_address_from_csr(trans, data); 646 iwl_set_hw_address_from_csr(trans, data);
637 } else if (!cfg->ext_nvm) { 647 } else if (cfg->nvm_type != IWL_NVM_EXT) {
638 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); 648 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
639 649
640 /* The byte order is little endian 16 bit, meaning 214365 */ 650 /* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
706 u16 lar_config; 716 u16 lar_config;
707 const __le16 *ch_section; 717 const __le16 *ch_section;
708 718
709 if (!cfg->ext_nvm) 719 if (cfg->nvm_type != IWL_NVM_EXT)
710 data = kzalloc(sizeof(*data) + 720 data = kzalloc(sizeof(*data) +
711 sizeof(struct ieee80211_channel) * 721 sizeof(struct ieee80211_channel) *
712 IWL_NUM_CHANNELS, 722 IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
740 750
741 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 751 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
742 752
743 if (!cfg->ext_nvm) { 753 if (cfg->nvm_type != IWL_NVM_EXT) {
744 /* Checking for required sections */ 754 /* Checking for required sections */
745 if (!nvm_calib) { 755 if (!nvm_calib) {
746 IWL_ERR(trans, 756 IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
748 kfree(data); 758 kfree(data);
749 return NULL; 759 return NULL;
750 } 760 }
761
762 ch_section = cfg->nvm_type == IWL_NVM_SDP ?
763 &regulatory[NVM_CHANNELS_SDP] :
764 &nvm_sw[NVM_CHANNELS];
765
751 /* in family 8000 Xtal calibration values moved to OTP */ 766 /* in family 8000 Xtal calibration values moved to OTP */
752 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 767 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
753 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 768 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
754 lar_enabled = true; 769 lar_enabled = true;
755 ch_section = &nvm_sw[NVM_CHANNELS];
756 } else { 770 } else {
757 u16 lar_offset = data->nvm_version < 0xE39 ? 771 u16 lar_offset = data->nvm_version < 0xE39 ?
758 NVM_LAR_OFFSET_OLD : 772 NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
786 u32 flags = NL80211_RRF_NO_HT40; 800 u32 flags = NL80211_RRF_NO_HT40;
787 u32 last_5ghz_ht = LAST_5GHZ_HT; 801 u32 last_5ghz_ht = LAST_5GHZ_HT;
788 802
789 if (cfg->ext_nvm) 803 if (cfg->nvm_type == IWL_NVM_EXT)
790 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 804 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
791 805
792 if (ch_idx < NUM_2GHZ_CHANNELS && 806 if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
834 int ch_idx; 848 int ch_idx;
835 u16 ch_flags; 849 u16 ch_flags;
836 u32 reg_rule_flags, prev_reg_rule_flags = 0; 850 u32 reg_rule_flags, prev_reg_rule_flags = 0;
837 const u8 *nvm_chan = cfg->ext_nvm ? 851 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
838 iwl_ext_nvm_channels : iwl_nvm_channels; 852 iwl_ext_nvm_channels : iwl_nvm_channels;
839 struct ieee80211_regdomain *regd; 853 struct ieee80211_regdomain *regd;
840 int size_of_regd; 854 int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
843 int center_freq, prev_center_freq = 0; 857 int center_freq, prev_center_freq = 0;
844 int valid_rules = 0; 858 int valid_rules = 0;
845 bool new_rule; 859 bool new_rule;
846 int max_num_ch = cfg->ext_nvm ? 860 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
847 IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; 861 IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
848 862
849 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 863 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
873 new_rule = false; 887 new_rule = false;
874 888
875 if (!(ch_flags & NVM_CHANNEL_VALID)) { 889 if (!(ch_flags & NVM_CHANNEL_VALID)) {
876 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 890 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
877 "Ch. %d Flags %x [%sGHz] - No traffic\n", 891 nvm_chan[ch_idx], ch_flags);
878 nvm_chan[ch_idx],
879 ch_flags,
880 (ch_idx >= NUM_2GHZ_CHANNELS) ?
881 "5.2" : "2.4");
882 continue; 892 continue;
883 } 893 }
884 894
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
914 prev_center_freq = center_freq; 924 prev_center_freq = center_freq;
915 prev_reg_rule_flags = reg_rule_flags; 925 prev_reg_rule_flags = reg_rule_flags;
916 926
917 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 927 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
918 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n", 928 nvm_chan[ch_idx], ch_flags);
919 center_freq,
920 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
921 CHECK_AND_PRINT_I(VALID),
922 CHECK_AND_PRINT_I(IBSS),
923 CHECK_AND_PRINT_I(ACTIVE),
924 CHECK_AND_PRINT_I(RADAR),
925 CHECK_AND_PRINT_I(INDOOR_ONLY),
926 CHECK_AND_PRINT_I(GO_CONCURRENT),
927 CHECK_AND_PRINT_I(UNIFORM),
928 CHECK_AND_PRINT_I(20MHZ),
929 CHECK_AND_PRINT_I(40MHZ),
930 CHECK_AND_PRINT_I(80MHZ),
931 CHECK_AND_PRINT_I(160MHZ),
932 CHECK_AND_PRINT_I(DC_HIGH),
933 ch_flags);
934 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
935 "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
936 center_freq,
937 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
938 reg_rule_flags,
939 ((ch_flags & NVM_CHANNEL_ACTIVE) &&
940 !(ch_flags & NVM_CHANNEL_RADAR))
941 ? "Ad-Hoc" : "");
942 } 929 }
943 930
944 regd->n_reg_rules = valid_rules; 931 regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 5de19ea10575..b205a7bfb828 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2167,7 +2167,7 @@ out:
2167 * 1. We are not using a unified image 2167 * 1. We are not using a unified image
2168 * 2. We are using a unified image but had an error while exiting D3 2168 * 2. We are using a unified image but had an error while exiting D3
2169 */ 2169 */
2170 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 2170 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2171 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 2171 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
2172 /* 2172 /*
2173 * When switching images we return 1, which causes mac80211 2173 * When switching images we return 1, which causes mac80211
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 15f2d826bb4b..a9ac872226fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1077 mvm->vif_count = 0; 1077 mvm->vif_count = 0;
1078 mvm->rx_ba_sessions = 0; 1078 mvm->rx_ba_sessions = 0;
1079 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1079 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1080 mvm->monitor_on = false;
1080 1081
1081 /* keep statistics ticking */ 1082 /* keep statistics ticking */
1082 iwl_mvm_accu_radio_stats(mvm); 1083 iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1437 mvm->p2p_device_vif = vif; 1438 mvm->p2p_device_vif = vif;
1438 } 1439 }
1439 1440
1441 if (vif->type == NL80211_IFTYPE_MONITOR)
1442 mvm->monitor_on = true;
1443
1440 iwl_mvm_vif_dbgfs_register(mvm, vif); 1444 iwl_mvm_vif_dbgfs_register(mvm, vif);
1441 goto out_unlock; 1445 goto out_unlock;
1442 1446
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1526 iwl_mvm_power_update_mac(mvm); 1530 iwl_mvm_power_update_mac(mvm);
1527 iwl_mvm_mac_ctxt_remove(mvm, vif); 1531 iwl_mvm_mac_ctxt_remove(mvm, vif);
1528 1532
1533 if (vif->type == NL80211_IFTYPE_MONITOR)
1534 mvm->monitor_on = false;
1535
1529out_release: 1536out_release:
1530 mutex_unlock(&mvm->mutex); 1537 mutex_unlock(&mvm->mutex);
1531} 1538}
@@ -1546,6 +1553,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1546 struct iwl_mvm_mc_iter_data *data = _data; 1553 struct iwl_mvm_mc_iter_data *data = _data;
1547 struct iwl_mvm *mvm = data->mvm; 1554 struct iwl_mvm *mvm = data->mvm;
1548 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1555 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1556 struct iwl_host_cmd hcmd = {
1557 .id = MCAST_FILTER_CMD,
1558 .flags = CMD_ASYNC,
1559 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1560 };
1549 int ret, len; 1561 int ret, len;
1550 1562
1551 /* if we don't have free ports, mcast frames will be dropped */ 1563 /* if we don't have free ports, mcast frames will be dropped */
@@ -1560,7 +1572,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1560 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1572 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1561 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1573 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1562 1574
1563 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); 1575 hcmd.len[0] = len;
1576 hcmd.data[0] = cmd;
1577
1578 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1564 if (ret) 1579 if (ret)
1565 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1580 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1566} 1581}
@@ -1635,6 +1650,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1635 if (!cmd) 1650 if (!cmd)
1636 goto out; 1651 goto out;
1637 1652
1653 if (changed_flags & FIF_ALLMULTI)
1654 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
1655
1656 if (cmd->pass_all)
1657 cmd->count = 0;
1658
1638 iwl_mvm_recalc_multicast(mvm); 1659 iwl_mvm_recalc_multicast(mvm);
1639out: 1660out:
1640 mutex_unlock(&mvm->mutex); 1661 mutex_unlock(&mvm->mutex);
@@ -2563,7 +2584,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2563 * queues, so we should never get a second deferred 2584 * queues, so we should never get a second deferred
2564 * frame for the RA/TID. 2585 * frame for the RA/TID.
2565 */ 2586 */
2566 iwl_mvm_start_mac_queues(mvm, info->hw_queue); 2587 iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
2567 ieee80211_free_txskb(mvm->hw, skb); 2588 ieee80211_free_txskb(mvm->hw, skb);
2568 } 2589 }
2569 } 2590 }
@@ -3975,6 +3996,43 @@ out_unlock:
3975 return ret; 3996 return ret;
3976} 3997}
3977 3998
3999static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
4000{
4001 if (drop) {
4002 if (iwl_mvm_has_new_tx_api(mvm))
4003 /* TODO new tx api */
4004 WARN_ONCE(1,
4005 "Need to implement flush TX queue\n");
4006 else
4007 iwl_mvm_flush_tx_path(mvm,
4008 iwl_mvm_flushable_queues(mvm) & queues,
4009 0);
4010 } else {
4011 if (iwl_mvm_has_new_tx_api(mvm)) {
4012 struct ieee80211_sta *sta;
4013 int i;
4014
4015 mutex_lock(&mvm->mutex);
4016
4017 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4018 sta = rcu_dereference_protected(
4019 mvm->fw_id_to_mac_id[i],
4020 lockdep_is_held(&mvm->mutex));
4021 if (IS_ERR_OR_NULL(sta))
4022 continue;
4023
4024 iwl_mvm_wait_sta_queues_empty(mvm,
4025 iwl_mvm_sta_from_mac80211(sta));
4026 }
4027
4028 mutex_unlock(&mvm->mutex);
4029 } else {
4030 iwl_trans_wait_tx_queues_empty(mvm->trans,
4031 queues);
4032 }
4033 }
4034}
4035
3978static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4036static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3979 struct ieee80211_vif *vif, u32 queues, bool drop) 4037 struct ieee80211_vif *vif, u32 queues, bool drop)
3980{ 4038{
@@ -3985,7 +4043,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3985 int i; 4043 int i;
3986 u32 msk = 0; 4044 u32 msk = 0;
3987 4045
3988 if (!vif || vif->type != NL80211_IFTYPE_STATION) 4046 if (!vif) {
4047 iwl_mvm_flush_no_vif(mvm, queues, drop);
4048 return;
4049 }
4050
4051 if (vif->type != NL80211_IFTYPE_STATION)
3989 return; 4052 return;
3990 4053
3991 /* Make sure we're done with the deferred traffic before flushing */ 4054 /* Make sure we're done with the deferred traffic before flushing */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 83303bac0e4b..949e63418299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
1015 bool drop_bcn_ap_mode; 1015 bool drop_bcn_ap_mode;
1016 1016
1017 struct delayed_work cs_tx_unblock_dwork; 1017 struct delayed_work cs_tx_unblock_dwork;
1018
1019 /* does a monitor vif exist (only one can exist hence bool) */
1020 bool monitor_on;
1018#ifdef CONFIG_ACPI 1021#ifdef CONFIG_ACPI
1019 struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; 1022 struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
1020 struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES]; 1023 struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
1159 * Enable LAR only if it is supported by the FW (TLV) && 1162 * Enable LAR only if it is supported by the FW (TLV) &&
1160 * enabled in the NVM 1163 * enabled in the NVM
1161 */ 1164 */
1162 if (mvm->cfg->ext_nvm) 1165 if (mvm->cfg->nvm_type == IWL_NVM_EXT)
1163 return nvm_lar && tlv_lar; 1166 return nvm_lar && tlv_lar;
1164 else 1167 else
1165 return tlv_lar; 1168 return tlv_lar;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 422aa6be9932..fb25b6f29323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
295 const __be16 *hw; 295 const __be16 *hw;
296 const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; 296 const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
297 bool lar_enabled; 297 bool lar_enabled;
298 int regulatory_type;
298 299
299 /* Checking for required sections */ 300 /* Checking for required sections */
300 if (!mvm->trans->cfg->ext_nvm) { 301 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
301 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 302 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
302 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { 303 !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
303 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); 304 IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
304 return NULL; 305 return NULL;
305 } 306 }
306 } else { 307 } else {
308 if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
309 regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
310 else
311 regulatory_type = NVM_SECTION_TYPE_REGULATORY;
312
307 /* SW and REGULATORY sections are mandatory */ 313 /* SW and REGULATORY sections are mandatory */
308 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 314 if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
309 !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { 315 !mvm->nvm_sections[regulatory_type].data) {
310 IWL_ERR(mvm, 316 IWL_ERR(mvm,
311 "Can't parse empty family 8000 OTP/NVM sections\n"); 317 "Can't parse empty family 8000 OTP/NVM sections\n");
312 return NULL; 318 return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
330 hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; 336 hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
331 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 337 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
332 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 338 calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
333 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
334 mac_override = 339 mac_override =
335 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; 340 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
336 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; 341 phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
337 342
343 regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
344 (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
345 (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
346
338 lar_enabled = !iwlwifi_mod_params.lar_disable && 347 lar_enabled = !iwlwifi_mod_params.lar_disable &&
339 fw_has_capa(&mvm->fw->ucode_capa, 348 fw_has_capa(&mvm->fw->ucode_capa,
340 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 349 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
394 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); 403 IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
395 404
396 /* Maximal size depends on NVM version */ 405 /* Maximal size depends on NVM version */
397 if (!mvm->trans->cfg->ext_nvm) 406 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
398 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 407 max_section_size = IWL_MAX_NVM_SECTION_SIZE;
399 else 408 else
400 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; 409 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
465 break; 474 break;
466 } 475 }
467 476
468 if (!mvm->trans->cfg->ext_nvm) { 477 if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
469 section_size = 478 section_size =
470 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 479 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
471 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 480 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
740 struct ieee80211_regdomain *regd; 749 struct ieee80211_regdomain *regd;
741 char mcc[3]; 750 char mcc[3];
742 751
743 if (mvm->cfg->ext_nvm) { 752 if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
744 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, 753 tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
745 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 754 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
746 nvm_lar = mvm->nvm_data->lar_enabled; 755 nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ba7bd049d3d4..0fe723ca844e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
661 (lq_sta->tx_agg_tid_en & BIT(tid)) && 661 (lq_sta->tx_agg_tid_en & BIT(tid)) &&
662 (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { 662 (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
663 IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); 663 IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
664 rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta); 664 if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
665 tid_data->state = IWL_AGG_QUEUED;
665 } 666 }
666} 667}
667 668
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 184c749766f2..2d14a58cbdd7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
244 return 0; 244 return 0;
245 245
246 default: 246 default:
247 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); 247 /* Expected in monitor (not having the keys) */
248 if (!mvm->monitor_on)
249 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
248 } 250 }
249 251
250 return 0; 252 return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 67ffd9774712..248699c2c4bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
277 stats->flag |= RX_FLAG_DECRYPTED; 277 stats->flag |= RX_FLAG_DECRYPTED;
278 return 0; 278 return 0;
279 default: 279 default:
280 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); 280 /* Expected in monitor (not having the keys) */
281 if (!mvm->monitor_on)
282 IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
281 } 283 }
282 284
283 return 0; 285 return 0;
@@ -672,11 +674,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
672 * If there was a significant jump in the nssn - adjust. 674 * If there was a significant jump in the nssn - adjust.
673 * If the SN is smaller than the NSSN it might need to first go into 675 * If the SN is smaller than the NSSN it might need to first go into
674 * the reorder buffer, in which case we just release up to it and the 676 * the reorder buffer, in which case we just release up to it and the
675 * rest of the function will take of storing it and releasing up to the 677 * rest of the function will take care of storing it and releasing up to
676 * nssn 678 * the nssn
677 */ 679 */
678 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, 680 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
679 buffer->buf_size)) { 681 buffer->buf_size) ||
682 !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
680 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; 683 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
681 684
682 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); 685 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 50983615dce6..774122fed454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
555 struct iwl_host_cmd cmd = { 555 struct iwl_host_cmd cmd = {
556 .id = SCAN_OFFLOAD_ABORT_CMD, 556 .id = SCAN_OFFLOAD_ABORT_CMD,
557 }; 557 };
558 u32 status; 558 u32 status = CAN_ABORT_STATUS;
559 559
560 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); 560 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
561 if (ret) 561 if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 411a2055dc45..c4a343534c5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1285{ 1285{
1286 struct iwl_mvm_add_sta_cmd cmd; 1286 struct iwl_mvm_add_sta_cmd cmd;
1287 int ret; 1287 int ret;
1288 u32 status; 1288 u32 status = ADD_STA_SUCCESS;
1289 1289
1290 lockdep_assert_held(&mvm->mutex); 1290 lockdep_assert_held(&mvm->mutex);
1291 1291
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2386 return -EINVAL; 2386 return -EINVAL;
2387 2387
2388 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2388 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2389 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", 2389 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2390 IWL_ERR(mvm,
2391 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2390 mvmsta->tid_data[tid].state); 2392 mvmsta->tid_data[tid].state);
2391 return -ENXIO; 2393 return -ENXIO;
2392 } 2394 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d13893806513..aedabe101cf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -281,6 +281,7 @@ struct iwl_mvm_vif;
281 * These states relate to a specific RA / TID. 281 * These states relate to a specific RA / TID.
282 * 282 *
283 * @IWL_AGG_OFF: aggregation is not used 283 * @IWL_AGG_OFF: aggregation is not used
284 * @IWL_AGG_QUEUED: aggregation start work has been queued
284 * @IWL_AGG_STARTING: aggregation are starting (between start and oper) 285 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
285 * @IWL_AGG_ON: aggregation session is up 286 * @IWL_AGG_ON: aggregation session is up
286 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the 287 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
@@ -290,6 +291,7 @@ struct iwl_mvm_vif;
290 */ 291 */
291enum iwl_mvm_agg_state { 292enum iwl_mvm_agg_state {
292 IWL_AGG_OFF = 0, 293 IWL_AGG_OFF = 0,
294 IWL_AGG_QUEUED,
293 IWL_AGG_STARTING, 295 IWL_AGG_STARTING,
294 IWL_AGG_ON, 296 IWL_AGG_ON,
295 IWL_EMPTYING_HW_QUEUE_ADDBA, 297 IWL_EMPTYING_HW_QUEUE_ADDBA,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 8876c2abc440..1232f63278eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
529 529
530 lockdep_assert_held(&mvm->mutex); 530 lockdep_assert_held(&mvm->mutex);
531 531
532 status = 0;
532 ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, 533 ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
533 CTDP_CONFIG_CMD), 534 CTDP_CONFIG_CMD),
534 sizeof(cmd), &cmd, &status); 535 sizeof(cmd), &cmd, &status);
@@ -630,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
630 631
631 if (!iwl_mvm_firmware_running(mvm) || 632 if (!iwl_mvm_firmware_running(mvm) ||
632 mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { 633 mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
633 ret = -EIO; 634 ret = -ENODATA;
634 goto out; 635 goto out;
635 } 636 }
636 637
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 172b5e63d3fb..6f2e2af23219 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
564 case NL80211_IFTYPE_AP: 564 case NL80211_IFTYPE_AP:
565 case NL80211_IFTYPE_ADHOC: 565 case NL80211_IFTYPE_ADHOC:
566 /* 566 /*
567 * Handle legacy hostapd as well, where station will be added 567 * Non-bufferable frames use the broadcast station, thus they
568 * only just before sending the association response. 568 * use the probe queue.
569 * Also take care of the case where we send a deauth to a 569 * Also take care of the case where we send a deauth to a
570 * station that we don't have, or similarly an association 570 * station that we don't have, or similarly an association
571 * response (with non-success status) for a station we can't 571 * response (with non-success status) for a station we can't
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
573 * Also, disassociate frames might happen, particular with 573 * Also, disassociate frames might happen, particular with
574 * reason 7 ("Class 3 frame received from nonassociated STA"). 574 * reason 7 ("Class 3 frame received from nonassociated STA").
575 */ 575 */
576 if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || 576 if (ieee80211_is_mgmt(fc) &&
577 ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) || 577 (!ieee80211_is_bufferable_mmpdu(fc) ||
578 ieee80211_is_disassoc(fc)) 578 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
579 return mvm->probe_queue; 579 return mvm->probe_queue;
580 if (info->hw_queue == info->control.vif->cab_queue) 580 if (info->hw_queue == info->control.vif->cab_queue)
581 return mvmvif->cab_queue; 581 return mvmvif->cab_queue;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 856fa6e8327e..a450bc6bc774 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
115 115
116 vif = qtnf_netdev_get_priv(wdev->netdev); 116 vif = qtnf_netdev_get_priv(wdev->netdev);
117 117
118 qtnf_scan_done(vif->mac, true);
119
118 if (qtnf_cmd_send_del_intf(vif)) 120 if (qtnf_cmd_send_del_intf(vif))
119 pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, 121 pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
120 vif->vifid); 122 vif->vifid);
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
335 struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); 337 struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
336 int ret; 338 int ret;
337 339
340 qtnf_scan_done(vif->mac, true);
341
338 ret = qtnf_cmd_send_stop_ap(vif); 342 ret = qtnf_cmd_send_stop_ap(vif);
339 if (ret) { 343 if (ret) {
340 pr_err("VIF%u.%u: failed to stop AP operation in FW\n", 344 pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
570 !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) 574 !qtnf_sta_list_lookup(&vif->sta_list, params->mac))
571 return 0; 575 return 0;
572 576
573 qtnf_scan_done(vif->mac, true);
574
575 ret = qtnf_cmd_send_del_sta(vif, params); 577 ret = qtnf_cmd_send_del_sta(vif, params);
576 if (ret) 578 if (ret)
577 pr_err("VIF%u.%u: failed to delete STA %pM\n", 579 pr_err("VIF%u.%u: failed to delete STA %pM\n",
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
1134 } 1136 }
1135 1137
1136 vif->sta_state = QTNF_STA_DISCONNECTED; 1138 vif->sta_state = QTNF_STA_DISCONNECTED;
1137 qtnf_scan_done(mac, true);
1138 } 1139 }
1140
1141 qtnf_scan_done(mac, true);
1139} 1142}
1140 1143
1141void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) 1144void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 6a4af52522b8..66db26613b1f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
34 .aborted = aborted, 34 .aborted = aborted,
35 }; 35 };
36 36
37 if (timer_pending(&mac->scan_timeout))
38 del_timer_sync(&mac->scan_timeout);
39
37 mutex_lock(&mac->mac_lock); 40 mutex_lock(&mac->mac_lock);
38 41
39 if (mac->scan_req) { 42 if (mac->scan_req) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 0fc2814eafad..43d2e7fd6e02 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
345 return -EINVAL; 345 return -EINVAL;
346 } 346 }
347 347
348 if (timer_pending(&mac->scan_timeout))
349 del_timer_sync(&mac->scan_timeout);
350 qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); 348 qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
351 349
352 return 0; 350 return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 502e72b7cdcc..69131965a298 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
661 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); 661 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
662 dma_addr_t txbd_paddr, skb_paddr; 662 dma_addr_t txbd_paddr, skb_paddr;
663 struct qtnf_tx_bd *txbd; 663 struct qtnf_tx_bd *txbd;
664 unsigned long flags;
664 int len, i; 665 int len, i;
665 u32 info; 666 u32 info;
666 int ret = 0; 667 int ret = 0;
667 668
669 spin_lock_irqsave(&priv->tx0_lock, flags);
670
668 if (!qtnf_tx_queue_ready(priv)) { 671 if (!qtnf_tx_queue_ready(priv)) {
669 if (skb->dev) 672 if (skb->dev)
670 netif_stop_queue(skb->dev); 673 netif_stop_queue(skb->dev);
671 674
675 spin_unlock_irqrestore(&priv->tx0_lock, flags);
672 return NETDEV_TX_BUSY; 676 return NETDEV_TX_BUSY;
673 } 677 }
674 678
@@ -717,8 +721,10 @@ tx_done:
717 dev_kfree_skb_any(skb); 721 dev_kfree_skb_any(skb);
718 } 722 }
719 723
720 qtnf_pcie_data_tx_reclaim(priv);
721 priv->tx_done_count++; 724 priv->tx_done_count++;
725 spin_unlock_irqrestore(&priv->tx0_lock, flags);
726
727 qtnf_pcie_data_tx_reclaim(priv);
722 728
723 return NETDEV_TX_OK; 729 return NETDEV_TX_OK;
724} 730}
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1247 strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); 1253 strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
1248 init_completion(&bus->request_firmware_complete); 1254 init_completion(&bus->request_firmware_complete);
1249 mutex_init(&bus->bus_lock); 1255 mutex_init(&bus->bus_lock);
1256 spin_lock_init(&pcie_priv->tx0_lock);
1250 spin_lock_init(&pcie_priv->irq_lock); 1257 spin_lock_init(&pcie_priv->irq_lock);
1251 spin_lock_init(&pcie_priv->tx_reclaim_lock); 1258 spin_lock_init(&pcie_priv->tx_reclaim_lock);
1252 1259
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
index e76a23716ee0..86ac1ccedb52 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
34 34
35 /* lock for tx reclaim operations */ 35 /* lock for tx reclaim operations */
36 spinlock_t tx_reclaim_lock; 36 spinlock_t tx_reclaim_lock;
37 /* lock for tx0 operations */
38 spinlock_t tx0_lock;
37 u8 msi_enabled; 39 u8 msi_enabled;
38 int mps; 40 int mps;
39 41
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 4f73012978e9..1d431d4bf6d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
1122 } 1122 }
1123 if (0 == tmp) { 1123 if (0 == tmp) {
1124 read_addr = REG_DBI_RDATA + addr % 4; 1124 read_addr = REG_DBI_RDATA + addr % 4;
1125 ret = rtl_read_byte(rtlpriv, read_addr); 1125 ret = rtl_read_word(rtlpriv, read_addr);
1126 } 1126 }
1127 return ret; 1127 return ret;
1128} 1128}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ee8ed9da00ad..4491ca5aee90 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
486 486
487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 487 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
488 488
489 dev->min_mtu = 0; 489 dev->min_mtu = ETH_MIN_MTU;
490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; 490 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
491 491
492 /* 492 /*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1316 netdev->features |= netdev->hw_features; 1316 netdev->features |= netdev->hw_features;
1317 1317
1318 netdev->ethtool_ops = &xennet_ethtool_ops; 1318 netdev->ethtool_ops = &xennet_ethtool_ops;
1319 netdev->min_mtu = 0; 1319 netdev->min_mtu = ETH_MIN_MTU;
1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 1320 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1321 SET_NETDEV_DEV(netdev, &dev->dev); 1321 SET_NETDEV_DEV(netdev, &dev->dev);
1322 1322
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index acc816b67582..37f9039bb9ca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req)
134 return false; 134 return false;
135 if (nvme_req(req)->status & NVME_SC_DNR) 135 if (nvme_req(req)->status & NVME_SC_DNR)
136 return false; 136 return false;
137 if (jiffies - req->start_time >= req->timeout)
138 return false;
139 if (nvme_req(req)->retries >= nvme_max_retries) 137 if (nvme_req(req)->retries >= nvme_max_retries)
140 return false; 138 return false;
141 return true; 139 return true;
@@ -1251,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
1251 goto out; 1249 goto out;
1252 } 1250 }
1253 1251
1252 __nvme_revalidate_disk(disk, id);
1254 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); 1253 nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
1255 if (!uuid_equal(&ns->uuid, &uuid) || 1254 if (!uuid_equal(&ns->uuid, &uuid) ||
1256 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || 1255 memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
@@ -2138,7 +2137,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
2138 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 2137 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2139 2138
2140 if (a == &dev_attr_uuid.attr) { 2139 if (a == &dev_attr_uuid.attr) {
2141 if (uuid_is_null(&ns->uuid) || 2140 if (uuid_is_null(&ns->uuid) &&
2142 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2141 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
2143 return 0; 2142 return 0;
2144 } 2143 }
@@ -2590,7 +2589,7 @@ static void nvme_async_event_work(struct work_struct *work)
2590 container_of(work, struct nvme_ctrl, async_event_work); 2589 container_of(work, struct nvme_ctrl, async_event_work);
2591 2590
2592 spin_lock_irq(&ctrl->lock); 2591 spin_lock_irq(&ctrl->lock);
2593 while (ctrl->event_limit > 0) { 2592 while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
2594 int aer_idx = --ctrl->event_limit; 2593 int aer_idx = --ctrl->event_limit;
2595 2594
2596 spin_unlock_irq(&ctrl->lock); 2595 spin_unlock_irq(&ctrl->lock);
@@ -2677,7 +2676,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
2677 /*FALLTHRU*/ 2676 /*FALLTHRU*/
2678 case NVME_SC_ABORT_REQ: 2677 case NVME_SC_ABORT_REQ:
2679 ++ctrl->event_limit; 2678 ++ctrl->event_limit;
2680 queue_work(nvme_wq, &ctrl->async_event_work); 2679 if (ctrl->state == NVME_CTRL_LIVE)
2680 queue_work(nvme_wq, &ctrl->async_event_work);
2681 break; 2681 break;
2682 default: 2682 default:
2683 break; 2683 break;
@@ -2692,7 +2692,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
2692 nvme_queue_scan(ctrl); 2692 nvme_queue_scan(ctrl);
2693 break; 2693 break;
2694 case NVME_AER_NOTICE_FW_ACT_STARTING: 2694 case NVME_AER_NOTICE_FW_ACT_STARTING:
2695 schedule_work(&ctrl->fw_act_work); 2695 queue_work(nvme_wq, &ctrl->fw_act_work);
2696 break; 2696 break;
2697 default: 2697 default:
2698 dev_warn(ctrl->device, "async event result %08x\n", result); 2698 dev_warn(ctrl->device, "async event result %08x\n", result);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 47307752dc65..555c976cc2ee 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
565 opts->queue_size = NVMF_DEF_QUEUE_SIZE; 565 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
566 opts->nr_io_queues = num_online_cpus(); 566 opts->nr_io_queues = num_online_cpus();
567 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; 567 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
568 opts->kato = NVME_DEFAULT_KATO;
568 569
569 options = o = kstrdup(buf, GFP_KERNEL); 570 options = o = kstrdup(buf, GFP_KERNEL);
570 if (!options) 571 if (!options)
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
655 goto out; 656 goto out;
656 } 657 }
657 658
658 if (opts->discovery_nqn) {
659 pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
660 ret = -EINVAL;
661 goto out;
662 }
663
664 if (token < 0) { 659 if (token < 0) {
665 pr_err("Invalid keep_alive_tmo %d\n", token); 660 pr_err("Invalid keep_alive_tmo %d\n", token);
666 ret = -EINVAL; 661 ret = -EINVAL;
667 goto out; 662 goto out;
668 } else if (token == 0) { 663 } else if (token == 0 && !opts->discovery_nqn) {
669 /* Allowed for debug */ 664 /* Allowed for debug */
670 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); 665 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
671 } 666 }
672 opts->kato = token; 667 opts->kato = token;
668
669 if (opts->discovery_nqn && opts->kato) {
670 pr_err("Discovery controllers cannot accept KATO != 0\n");
671 ret = -EINVAL;
672 goto out;
673 }
674
673 break; 675 break;
674 case NVMF_OPT_CTRL_LOSS_TMO: 676 case NVMF_OPT_CTRL_LOSS_TMO:
675 if (match_int(args, &token)) { 677 if (match_int(args, &token)) {
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
762 uuid_copy(&opts->host->id, &hostid); 764 uuid_copy(&opts->host->id, &hostid);
763 765
764out: 766out:
765 if (!opts->discovery_nqn && !opts->kato)
766 opts->kato = NVME_DEFAULT_KATO;
767 kfree(options); 767 kfree(options);
768 return ret; 768 return ret;
769} 769}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d2e882c0f496..be49d0f79381 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1376 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) 1376 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1377 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 1377 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1378 else if (freq->status) 1378 else if (freq->status)
1379 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1379 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1380 1380
1381 /* 1381 /*
1382 * For the linux implementation, if we have an unsuccesful 1382 * For the linux implementation, if we have an unsuccesful
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1404 */ 1404 */
1405 if (freq->transferred_length != 1405 if (freq->transferred_length !=
1406 be32_to_cpu(op->cmd_iu.data_len)) { 1406 be32_to_cpu(op->cmd_iu.data_len)) {
1407 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1407 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1408 goto done; 1408 goto done;
1409 } 1409 }
1410 result.u64 = 0; 1410 result.u64 = 0;
@@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1421 freq->transferred_length || 1421 freq->transferred_length ||
1422 op->rsp_iu.status_code || 1422 op->rsp_iu.status_code ||
1423 sqe->common.command_id != cqe->command_id)) { 1423 sqe->common.command_id != cqe->command_id)) {
1424 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1424 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1425 goto done; 1425 goto done;
1426 } 1426 }
1427 result = cqe->result; 1427 result = cqe->result;
@@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1429 break; 1429 break;
1430 1430
1431 default: 1431 default:
1432 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1432 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1433 goto done; 1433 goto done;
1434 } 1434 }
1435 1435
@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1989 * as well as those by FC-NVME spec. 1989 * as well as those by FC-NVME spec.
1990 */ 1990 */
1991 WARN_ON_ONCE(sqe->common.metadata); 1991 WARN_ON_ONCE(sqe->common.metadata);
1992 WARN_ON_ONCE(sqe->common.dptr.prp1);
1993 WARN_ON_ONCE(sqe->common.dptr.prp2);
1994 sqe->common.flags |= NVME_CMD_SGL_METABUF; 1992 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1995 1993
1996 /* 1994 /*
1997 * format SQE DPTR field per FC-NVME rules 1995 * format SQE DPTR field per FC-NVME rules:
1998 * type=data block descr; subtype=offset; 1996 * type=0x5 Transport SGL Data Block Descriptor
1999 * offset is currently 0. 1997 * subtype=0xA Transport-specific value
1998 * address=0
1999 * length=length of the data series
2000 */ 2000 */
2001 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; 2001 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2002 NVME_SGL_FMT_TRANSPORT_A;
2002 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2003 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2003 sqe->rw.dptr.sgl.addr = 0; 2004 sqe->rw.dptr.sgl.addr = 0;
2004 2005
@@ -2544,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
2544 nvme_fc_abort_aen_ops(ctrl); 2545 nvme_fc_abort_aen_ops(ctrl);
2545 2546
2546 /* wait for all io that had to be aborted */ 2547 /* wait for all io that had to be aborted */
2547 spin_lock_irqsave(&ctrl->lock, flags); 2548 spin_lock_irq(&ctrl->lock);
2548 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 2549 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
2549 ctrl->flags &= ~FCCTRL_TERMIO; 2550 ctrl->flags &= ~FCCTRL_TERMIO;
2550 spin_unlock_irqrestore(&ctrl->lock, flags); 2551 spin_unlock_irq(&ctrl->lock);
2551 2552
2552 nvme_fc_term_aen_ops(ctrl); 2553 nvme_fc_term_aen_ops(ctrl);
2553 2554
@@ -2733,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2733{ 2734{
2734 struct nvme_fc_ctrl *ctrl; 2735 struct nvme_fc_ctrl *ctrl;
2735 unsigned long flags; 2736 unsigned long flags;
2736 int ret, idx; 2737 int ret, idx, retry;
2737 2738
2738 if (!(rport->remoteport.port_role & 2739 if (!(rport->remoteport.port_role &
2739 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 2740 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -2759,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2759 ctrl->rport = rport; 2760 ctrl->rport = rport;
2760 ctrl->dev = lport->dev; 2761 ctrl->dev = lport->dev;
2761 ctrl->cnum = idx; 2762 ctrl->cnum = idx;
2763 init_waitqueue_head(&ctrl->ioabort_wait);
2762 2764
2763 get_device(ctrl->dev); 2765 get_device(ctrl->dev);
2764 kref_init(&ctrl->ref); 2766 kref_init(&ctrl->ref);
@@ -2824,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2824 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 2826 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2825 spin_unlock_irqrestore(&rport->lock, flags); 2827 spin_unlock_irqrestore(&rport->lock, flags);
2826 2828
2827 ret = nvme_fc_create_association(ctrl); 2829 /*
2830 * It's possible that transactions used to create the association
2831 * may fail. Examples: CreateAssociation LS or CreateIOConnection
2832 * LS gets dropped/corrupted/fails; or a frame gets dropped or a
2833 * command times out for one of the actions to init the controller
2834 * (Connect, Get/Set_Property, Set_Features, etc). Many of these
2835 * transport errors (frame drop, LS failure) inherently must kill
2836 * the association. The transport is coded so that any command used
2837 * to create the association (prior to a LIVE state transition
2838 * while NEW or RECONNECTING) will fail if it completes in error or
2839 * times out.
2840 *
2841 * As such: as the connect request was mostly likely due to a
2842 * udev event that discovered the remote port, meaning there is
2843 * not an admin or script there to restart if the connect
2844 * request fails, retry the initial connection creation up to
2845 * three times before giving up and declaring failure.
2846 */
2847 for (retry = 0; retry < 3; retry++) {
2848 ret = nvme_fc_create_association(ctrl);
2849 if (!ret)
2850 break;
2851 }
2852
2828 if (ret) { 2853 if (ret) {
2854 /* couldn't schedule retry - fail out */
2855 dev_err(ctrl->ctrl.device,
2856 "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
2857
2829 ctrl->ctrl.opts = NULL; 2858 ctrl->ctrl.opts = NULL;
2859
2830 /* initiate nvme ctrl ref counting teardown */ 2860 /* initiate nvme ctrl ref counting teardown */
2831 nvme_uninit_ctrl(&ctrl->ctrl); 2861 nvme_uninit_ctrl(&ctrl->ctrl);
2832 nvme_put_ctrl(&ctrl->ctrl); 2862 nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4a2121335f48..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/once.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/poison.h> 29#include <linux/poison.h>
29#include <linux/t10-pi.h> 30#include <linux/t10-pi.h>
@@ -93,7 +94,7 @@ struct nvme_dev {
93 struct mutex shutdown_lock; 94 struct mutex shutdown_lock;
94 bool subsystem; 95 bool subsystem;
95 void __iomem *cmb; 96 void __iomem *cmb;
96 dma_addr_t cmb_dma_addr; 97 pci_bus_addr_t cmb_bus_addr;
97 u64 cmb_size; 98 u64 cmb_size;
98 u32 cmbsz; 99 u32 cmbsz;
99 u32 cmbloc; 100 u32 cmbloc;
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
540} 541}
541#endif 542#endif
542 543
544static void nvme_print_sgl(struct scatterlist *sgl, int nents)
545{
546 int i;
547 struct scatterlist *sg;
548
549 for_each_sg(sgl, sg, nents, i) {
550 dma_addr_t phys = sg_phys(sg);
551 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
552 "dma_address:%pad dma_length:%d\n",
553 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
554 sg_dma_len(sg));
555 }
556}
557
543static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) 558static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
544{ 559{
545 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 560 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
622 return BLK_STS_OK; 637 return BLK_STS_OK;
623 638
624 bad_sgl: 639 bad_sgl:
625 if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", 640 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
626 blk_rq_payload_bytes(req), iod->nents)) { 641 "Invalid SGL for payload:%d nents:%d\n",
627 for_each_sg(iod->sg, sg, iod->nents, i) { 642 blk_rq_payload_bytes(req), iod->nents);
628 dma_addr_t phys = sg_phys(sg);
629 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
630 "dma_address:%pad dma_length:%d\n", i, &phys,
631 sg->offset, sg->length,
632 &sg_dma_address(sg),
633 sg_dma_len(sg));
634 }
635 }
636 return BLK_STS_IOERR; 643 return BLK_STS_IOERR;
637
638} 644}
639 645
640static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 646static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1220 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 1226 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1221 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1227 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1222 dev->ctrl.page_size); 1228 dev->ctrl.page_size);
1223 nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 1229 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1224 nvmeq->sq_cmds_io = dev->cmb + offset; 1230 nvmeq->sq_cmds_io = dev->cmb + offset;
1225 } else { 1231 } else {
1226 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1232 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1313 if (result < 0) 1319 if (result < 0)
1314 goto release_cq; 1320 goto release_cq;
1315 1321
1322 nvme_init_queue(nvmeq, qid);
1316 result = queue_request_irq(nvmeq); 1323 result = queue_request_irq(nvmeq);
1317 if (result < 0) 1324 if (result < 0)
1318 goto release_sq; 1325 goto release_sq;
1319 1326
1320 nvme_init_queue(nvmeq, qid);
1321 return result; 1327 return result;
1322 1328
1323 release_sq: 1329 release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1464 return result; 1470 return result;
1465 1471
1466 nvmeq->cq_vector = 0; 1472 nvmeq->cq_vector = 0;
1473 nvme_init_queue(nvmeq, 0);
1467 result = queue_request_irq(nvmeq); 1474 result = queue_request_irq(nvmeq);
1468 if (result) { 1475 if (result) {
1469 nvmeq->cq_vector = -1; 1476 nvmeq->cq_vector = -1;
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1520 resource_size_t bar_size; 1527 resource_size_t bar_size;
1521 struct pci_dev *pdev = to_pci_dev(dev->dev); 1528 struct pci_dev *pdev = to_pci_dev(dev->dev);
1522 void __iomem *cmb; 1529 void __iomem *cmb;
1523 dma_addr_t dma_addr; 1530 int bar;
1524 1531
1525 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1532 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1526 if (!(NVME_CMB_SZ(dev->cmbsz))) 1533 if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1533 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1540 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1534 size = szu * NVME_CMB_SZ(dev->cmbsz); 1541 size = szu * NVME_CMB_SZ(dev->cmbsz);
1535 offset = szu * NVME_CMB_OFST(dev->cmbloc); 1542 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1536 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 1543 bar = NVME_CMB_BIR(dev->cmbloc);
1544 bar_size = pci_resource_len(pdev, bar);
1537 1545
1538 if (offset > bar_size) 1546 if (offset > bar_size)
1539 return NULL; 1547 return NULL;
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1546 if (size > bar_size - offset) 1554 if (size > bar_size - offset)
1547 size = bar_size - offset; 1555 size = bar_size - offset;
1548 1556
1549 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 1557 cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
1550 cmb = ioremap_wc(dma_addr, size);
1551 if (!cmb) 1558 if (!cmb)
1552 return NULL; 1559 return NULL;
1553 1560
1554 dev->cmb_dma_addr = dma_addr; 1561 dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1555 dev->cmb_size = size; 1562 dev->cmb_size = size;
1556 return cmb; 1563 return cmb;
1557} 1564}
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
2156 if (result) 2163 if (result)
2157 goto out; 2164 goto out;
2158 2165
2159 nvme_init_queue(dev->queues[0], 0);
2160 result = nvme_alloc_admin_tags(dev); 2166 result = nvme_alloc_admin_tags(dev);
2161 if (result) 2167 if (result)
2162 goto out; 2168 goto out;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 58983000964b..0ebb539f3bd3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
571 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) 571 if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
572 return; 572 return;
573 573
574 if (nvme_rdma_queue_idx(queue) == 0) {
575 nvme_rdma_free_qe(queue->device->dev,
576 &queue->ctrl->async_event_sqe,
577 sizeof(struct nvme_command), DMA_TO_DEVICE);
578 }
579
574 nvme_rdma_destroy_queue_ib(queue); 580 nvme_rdma_destroy_queue_ib(queue);
575 rdma_destroy_id(queue->cm_id); 581 rdma_destroy_id(queue->cm_id);
576} 582}
@@ -739,8 +745,6 @@ out:
739static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, 745static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
740 bool remove) 746 bool remove)
741{ 747{
742 nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
743 sizeof(struct nvme_command), DMA_TO_DEVICE);
744 nvme_rdma_stop_queue(&ctrl->queues[0]); 748 nvme_rdma_stop_queue(&ctrl->queues[0]);
745 if (remove) { 749 if (remove) {
746 blk_cleanup_queue(ctrl->ctrl.admin_q); 750 blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
765 769
766 if (new) { 770 if (new) {
767 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 771 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
768 if (IS_ERR(ctrl->ctrl.admin_tagset)) 772 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
773 error = PTR_ERR(ctrl->ctrl.admin_tagset);
769 goto out_free_queue; 774 goto out_free_queue;
775 }
770 776
771 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 777 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
772 if (IS_ERR(ctrl->ctrl.admin_q)) { 778 if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
846 852
847 if (new) { 853 if (new) {
848 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); 854 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
849 if (IS_ERR(ctrl->ctrl.tagset)) 855 if (IS_ERR(ctrl->ctrl.tagset)) {
856 ret = PTR_ERR(ctrl->ctrl.tagset);
850 goto out_free_io_queues; 857 goto out_free_io_queues;
858 }
851 859
852 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 860 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
853 if (IS_ERR(ctrl->ctrl.connect_q)) { 861 if (IS_ERR(ctrl->ctrl.connect_q)) {
@@ -942,7 +950,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
942 } 950 }
943 951
944 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 952 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
945 WARN_ON_ONCE(!changed); 953 if (!changed) {
954 /* state change failure is ok if we're in DELETING state */
955 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
956 return;
957 }
958
946 ctrl->ctrl.nr_reconnects = 0; 959 ctrl->ctrl.nr_reconnects = 0;
947 960
948 nvme_start_ctrl(&ctrl->ctrl); 961 nvme_start_ctrl(&ctrl->ctrl);
@@ -962,7 +975,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
962 struct nvme_rdma_ctrl *ctrl = container_of(work, 975 struct nvme_rdma_ctrl *ctrl = container_of(work,
963 struct nvme_rdma_ctrl, err_work); 976 struct nvme_rdma_ctrl, err_work);
964 977
965 nvme_stop_ctrl(&ctrl->ctrl); 978 nvme_stop_keep_alive(&ctrl->ctrl);
966 979
967 if (ctrl->ctrl.queue_count > 1) { 980 if (ctrl->ctrl.queue_count > 1) {
968 nvme_stop_queues(&ctrl->ctrl); 981 nvme_stop_queues(&ctrl->ctrl);
@@ -1601,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
1601 /* 1614 /*
1602 * reconnecting state means transport disruption, which 1615 * reconnecting state means transport disruption, which
1603 * can take a long time and even might fail permanently, 1616 * can take a long time and even might fail permanently,
1604 * so we can't let incoming I/O be requeued forever. 1617 * fail fast to give upper layers a chance to failover.
1605 * fail it fast to allow upper layers a chance to 1618 * deleting state means that the ctrl will never accept
1606 * failover. 1619 * commands again, fail it permanently.
1607 */ 1620 */
1608 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1621 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
1622 queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
1623 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
1609 return BLK_STS_IOERR; 1624 return BLK_STS_IOERR;
1625 }
1610 return BLK_STS_RESOURCE; /* try again later */ 1626 return BLK_STS_RESOURCE; /* try again later */
1611 } 1627 }
1612 } 1628 }
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 7c23eaf8e563..645ba7eee35d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -387,13 +387,22 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
387 387
388static void __nvmet_req_complete(struct nvmet_req *req, u16 status) 388static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
389{ 389{
390 u32 old_sqhd, new_sqhd;
391 u16 sqhd;
392
390 if (status) 393 if (status)
391 nvmet_set_status(req, status); 394 nvmet_set_status(req, status);
392 395
393 /* XXX: need to fill in something useful for sq_head */ 396 if (req->sq->size) {
394 req->rsp->sq_head = 0; 397 do {
395 if (likely(req->sq)) /* may happen during early failure */ 398 old_sqhd = req->sq->sqhd;
396 req->rsp->sq_id = cpu_to_le16(req->sq->qid); 399 new_sqhd = (old_sqhd + 1) % req->sq->size;
400 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
401 old_sqhd);
402 }
403 sqhd = req->sq->sqhd & 0x0000FFFF;
404 req->rsp->sq_head = cpu_to_le16(sqhd);
405 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
397 req->rsp->command_id = req->cmd->common.command_id; 406 req->rsp->command_id = req->cmd->common.command_id;
398 407
399 if (req->ns) 408 if (req->ns)
@@ -420,6 +429,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
420void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, 429void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
421 u16 qid, u16 size) 430 u16 qid, u16 size)
422{ 431{
432 sq->sqhd = 0;
423 sq->qid = qid; 433 sq->qid = qid;
424 sq->size = size; 434 sq->size = size;
425 435
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 859a66725291..db3bf6b8bf9e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
109 pr_warn("queue already connected!\n"); 109 pr_warn("queue already connected!\n");
110 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; 110 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
111 } 111 }
112 if (!sqsize) {
113 pr_warn("queue size zero!\n");
114 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
115 }
112 116
113 nvmet_cq_setup(ctrl, req->cq, qid, sqsize); 117 /* note: convert queue size from 0's-based value to 1's-based value */
114 nvmet_sq_setup(ctrl, req->sq, qid, sqsize); 118 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
119 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
115 return 0; 120 return 0;
116} 121}
117 122
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 421e43bf1dd7..58e010bdda3e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc {
148 u32 a_id; 148 u32 a_id;
149 struct nvmet_fc_tgtport *tgtport; 149 struct nvmet_fc_tgtport *tgtport;
150 struct list_head a_list; 150 struct list_head a_list;
151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; 151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
152 struct kref ref; 152 struct kref ref;
153}; 153};
154 154
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
608 unsigned long flags; 608 unsigned long flags;
609 int ret; 609 int ret;
610 610
611 if (qid >= NVMET_NR_QUEUES) 611 if (qid > NVMET_NR_QUEUES)
612 return NULL; 612 return NULL;
613 613
614 queue = kzalloc((sizeof(*queue) + 614 queue = kzalloc((sizeof(*queue) +
@@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
783 u16 qid = nvmet_fc_getqueueid(connection_id); 783 u16 qid = nvmet_fc_getqueueid(connection_id);
784 unsigned long flags; 784 unsigned long flags;
785 785
786 if (qid > NVMET_NR_QUEUES)
787 return NULL;
788
786 spin_lock_irqsave(&tgtport->lock, flags); 789 spin_lock_irqsave(&tgtport->lock, flags);
787 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 790 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
788 if (association_id == assoc->association_id) { 791 if (association_id == assoc->association_id) {
@@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
888 int i; 891 int i;
889 892
890 spin_lock_irqsave(&tgtport->lock, flags); 893 spin_lock_irqsave(&tgtport->lock, flags);
891 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { 894 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
892 queue = assoc->queues[i]; 895 queue = assoc->queues[i];
893 if (queue) { 896 if (queue) {
894 if (!nvmet_fc_tgt_q_get(queue)) 897 if (!nvmet_fc_tgt_q_get(queue))
@@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1910 spin_lock_irqsave(&fod->flock, flags); 1913 spin_lock_irqsave(&fod->flock, flags);
1911 fod->writedataactive = false; 1914 fod->writedataactive = false;
1912 spin_unlock_irqrestore(&fod->flock, flags); 1915 spin_unlock_irqrestore(&fod->flock, flags);
1913 nvmet_req_complete(&fod->req, 1916 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1914 NVME_SC_FC_TRANSPORT_ERROR);
1915 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 1917 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1916 fcpreq->fcp_error = ret; 1918 fcpreq->fcp_error = ret;
1917 fcpreq->transferred_length = 0; 1919 fcpreq->transferred_length = 0;
@@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1929 /* if in the middle of an io and we need to tear down */ 1931 /* if in the middle of an io and we need to tear down */
1930 if (abort) { 1932 if (abort) {
1931 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 1933 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1932 nvmet_req_complete(&fod->req, 1934 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1933 NVME_SC_FC_TRANSPORT_ERROR);
1934 return true; 1935 return true;
1935 } 1936 }
1936 1937
@@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1968 fod->abort = true; 1969 fod->abort = true;
1969 spin_unlock(&fod->flock); 1970 spin_unlock(&fod->flock);
1970 1971
1971 nvmet_req_complete(&fod->req, 1972 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1972 NVME_SC_FC_TRANSPORT_ERROR);
1973 return; 1973 return;
1974 } 1974 }
1975 1975
@@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
2533{ 2533{
2534 struct nvmet_fc_tgtport *tgtport = port->priv; 2534 struct nvmet_fc_tgtport *tgtport = port->priv;
2535 unsigned long flags; 2535 unsigned long flags;
2536 bool matched = false;
2536 2537
2537 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2538 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2538 if (tgtport->port == port) { 2539 if (tgtport->port == port) {
2539 nvmet_fc_tgtport_put(tgtport); 2540 matched = true;
2540 tgtport->port = NULL; 2541 tgtport->port = NULL;
2541 } 2542 }
2542 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2543 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2544
2545 if (matched)
2546 nvmet_fc_tgtport_put(tgtport);
2543} 2547}
2544 2548
2545static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2549static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1cb9847ec261..7b75d9de55ab 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -224,8 +224,6 @@ struct fcloop_nport {
224 struct fcloop_lport *lport; 224 struct fcloop_lport *lport;
225 struct list_head nport_list; 225 struct list_head nport_list;
226 struct kref ref; 226 struct kref ref;
227 struct completion rport_unreg_done;
228 struct completion tport_unreg_done;
229 u64 node_name; 227 u64 node_name;
230 u64 port_name; 228 u64 port_name;
231 u32 port_role; 229 u32 port_role;
@@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
576 tfcp_req->aborted = true; 574 tfcp_req->aborted = true;
577 spin_unlock(&tfcp_req->reqlock); 575 spin_unlock(&tfcp_req->reqlock);
578 576
579 tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; 577 tfcp_req->status = NVME_SC_INTERNAL;
580 578
581 /* 579 /*
582 * nothing more to do. If io wasn't active, the transport should 580 * nothing more to do. If io wasn't active, the transport should
@@ -631,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
631} 629}
632 630
633static void 631static void
632fcloop_nport_free(struct kref *ref)
633{
634 struct fcloop_nport *nport =
635 container_of(ref, struct fcloop_nport, ref);
636 unsigned long flags;
637
638 spin_lock_irqsave(&fcloop_lock, flags);
639 list_del(&nport->nport_list);
640 spin_unlock_irqrestore(&fcloop_lock, flags);
641
642 kfree(nport);
643}
644
645static void
646fcloop_nport_put(struct fcloop_nport *nport)
647{
648 kref_put(&nport->ref, fcloop_nport_free);
649}
650
651static int
652fcloop_nport_get(struct fcloop_nport *nport)
653{
654 return kref_get_unless_zero(&nport->ref);
655}
656
657static void
634fcloop_localport_delete(struct nvme_fc_local_port *localport) 658fcloop_localport_delete(struct nvme_fc_local_port *localport)
635{ 659{
636 struct fcloop_lport *lport = localport->private; 660 struct fcloop_lport *lport = localport->private;
@@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
644{ 668{
645 struct fcloop_rport *rport = remoteport->private; 669 struct fcloop_rport *rport = remoteport->private;
646 670
647 /* release any threads waiting for the unreg to complete */ 671 fcloop_nport_put(rport->nport);
648 complete(&rport->nport->rport_unreg_done);
649} 672}
650 673
651static void 674static void
@@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
653{ 676{
654 struct fcloop_tport *tport = targetport->private; 677 struct fcloop_tport *tport = targetport->private;
655 678
656 /* release any threads waiting for the unreg to complete */ 679 fcloop_nport_put(tport->nport);
657 complete(&tport->nport->tport_unreg_done);
658} 680}
659 681
660#define FCLOOP_HW_QUEUES 4 682#define FCLOOP_HW_QUEUES 4
@@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
722 goto out_free_opts; 744 goto out_free_opts;
723 } 745 }
724 746
747 memset(&pinfo, 0, sizeof(pinfo));
725 pinfo.node_name = opts->wwnn; 748 pinfo.node_name = opts->wwnn;
726 pinfo.port_name = opts->wwpn; 749 pinfo.port_name = opts->wwpn;
727 pinfo.port_role = opts->roles; 750 pinfo.port_role = opts->roles;
@@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
804 return ret ? ret : count; 827 return ret ? ret : count;
805} 828}
806 829
807static void
808fcloop_nport_free(struct kref *ref)
809{
810 struct fcloop_nport *nport =
811 container_of(ref, struct fcloop_nport, ref);
812 unsigned long flags;
813
814 spin_lock_irqsave(&fcloop_lock, flags);
815 list_del(&nport->nport_list);
816 spin_unlock_irqrestore(&fcloop_lock, flags);
817
818 kfree(nport);
819}
820
821static void
822fcloop_nport_put(struct fcloop_nport *nport)
823{
824 kref_put(&nport->ref, fcloop_nport_free);
825}
826
827static int
828fcloop_nport_get(struct fcloop_nport *nport)
829{
830 return kref_get_unless_zero(&nport->ref);
831}
832
833static struct fcloop_nport * 830static struct fcloop_nport *
834fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 831fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
835{ 832{
@@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
938 if (!nport) 935 if (!nport)
939 return -EIO; 936 return -EIO;
940 937
938 memset(&pinfo, 0, sizeof(pinfo));
941 pinfo.node_name = nport->node_name; 939 pinfo.node_name = nport->node_name;
942 pinfo.port_name = nport->port_name; 940 pinfo.port_name = nport->port_name;
943 pinfo.port_role = nport->port_role; 941 pinfo.port_role = nport->port_role;
@@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport)
979} 977}
980 978
981static int 979static int
982__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 980__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
983{ 981{
984 int ret;
985
986 if (!rport) 982 if (!rport)
987 return -EALREADY; 983 return -EALREADY;
988 984
989 init_completion(&nport->rport_unreg_done); 985 return nvme_fc_unregister_remoteport(rport->remoteport);
990
991 ret = nvme_fc_unregister_remoteport(rport->remoteport);
992 if (ret)
993 return ret;
994
995 wait_for_completion(&nport->rport_unreg_done);
996
997 fcloop_nport_put(nport);
998
999 return ret;
1000} 986}
1001 987
1002static ssize_t 988static ssize_t
@@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1029 if (!nport) 1015 if (!nport)
1030 return -ENOENT; 1016 return -ENOENT;
1031 1017
1032 ret = __wait_remoteport_unreg(nport, rport); 1018 ret = __remoteport_unreg(nport, rport);
1033 1019
1034 return ret ? ret : count; 1020 return ret ? ret : count;
1035} 1021}
@@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport)
1086} 1072}
1087 1073
1088static int 1074static int
1089__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1075__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1090{ 1076{
1091 int ret;
1092
1093 if (!tport) 1077 if (!tport)
1094 return -EALREADY; 1078 return -EALREADY;
1095 1079
1096 init_completion(&nport->tport_unreg_done); 1080 return nvmet_fc_unregister_targetport(tport->targetport);
1097
1098 ret = nvmet_fc_unregister_targetport(tport->targetport);
1099 if (ret)
1100 return ret;
1101
1102 wait_for_completion(&nport->tport_unreg_done);
1103
1104 fcloop_nport_put(nport);
1105
1106 return ret;
1107} 1081}
1108 1082
1109static ssize_t 1083static ssize_t
@@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1136 if (!nport) 1110 if (!nport)
1137 return -ENOENT; 1111 return -ENOENT;
1138 1112
1139 ret = __wait_targetport_unreg(nport, tport); 1113 ret = __targetport_unreg(nport, tport);
1140 1114
1141 return ret ? ret : count; 1115 return ret ? ret : count;
1142} 1116}
@@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void)
1223 1197
1224 spin_unlock_irqrestore(&fcloop_lock, flags); 1198 spin_unlock_irqrestore(&fcloop_lock, flags);
1225 1199
1226 ret = __wait_targetport_unreg(nport, tport); 1200 ret = __targetport_unreg(nport, tport);
1227 if (ret) 1201 if (ret)
1228 pr_warn("%s: Failed deleting target port\n", __func__); 1202 pr_warn("%s: Failed deleting target port\n", __func__);
1229 1203
1230 ret = __wait_remoteport_unreg(nport, rport); 1204 ret = __remoteport_unreg(nport, rport);
1231 if (ret) 1205 if (ret)
1232 pr_warn("%s: Failed deleting remote port\n", __func__); 1206 pr_warn("%s: Failed deleting remote port\n", __func__);
1233 1207
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7d261ab894f4..87e429bfcd8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,6 +74,7 @@ struct nvmet_sq {
74 struct percpu_ref ref; 74 struct percpu_ref ref;
75 u16 qid; 75 u16 qid;
76 u16 size; 76 u16 size;
77 u32 sqhd;
77 struct completion free_done; 78 struct completion free_done;
78 struct completion confirm_done; 79 struct completion confirm_done;
79}; 80};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index de54c7f5048a..d12e5de78e70 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
135 135
136 /* Stop the user from writing */ 136 /* Stop the user from writing */
137 if (pos >= nvmem->size) 137 if (pos >= nvmem->size)
138 return 0; 138 return -EFBIG;
139 139
140 if (count < nvmem->word_size) 140 if (count < nvmem->word_size)
141 return -EINVAL; 141 return -EINVAL;
@@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
789 return ERR_PTR(-EINVAL); 789 return ERR_PTR(-EINVAL);
790 790
791 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 791 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
792 of_node_put(nvmem_np);
792 if (IS_ERR(nvmem)) 793 if (IS_ERR(nvmem))
793 return ERR_CAST(nvmem); 794 return ERR_CAST(nvmem);
794 795
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 260d33c0f26c..63897531cd75 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index)
1781{ 1781{
1782 if (!dn || dn != of_stdout || console_set_on_cmdline) 1782 if (!dn || dn != of_stdout || console_set_on_cmdline)
1783 return false; 1783 return false;
1784 return !add_preferred_console(name, index, 1784
1785 kstrdup(of_stdout_options, GFP_KERNEL)); 1785 /*
1786 * XXX: cast `options' to char pointer to suppress complication
1787 * warnings: printk, UART and console drivers expect char pointer.
1788 */
1789 return !add_preferred_console(name, index, (char *)of_stdout_options);
1786} 1790}
1787EXPORT_SYMBOL_GPL(of_console_check); 1791EXPORT_SYMBOL_GPL(of_console_check);
1788 1792
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d94dd8b77abd..98258583abb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
44 return -EINVAL; 44 return -EINVAL;
45} 45}
46 46
47static void of_mdiobus_register_phy(struct mii_bus *mdio, 47static int of_mdiobus_register_phy(struct mii_bus *mdio,
48 struct device_node *child, u32 addr) 48 struct device_node *child, u32 addr)
49{ 49{
50 struct phy_device *phy; 50 struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
60 else 60 else
61 phy = get_phy_device(mdio, addr, is_c45); 61 phy = get_phy_device(mdio, addr, is_c45);
62 if (IS_ERR(phy)) 62 if (IS_ERR(phy))
63 return; 63 return PTR_ERR(phy);
64 64
65 rc = irq_of_parse_and_map(child, 0); 65 rc = of_irq_get(child, 0);
66 if (rc == -EPROBE_DEFER) {
67 phy_device_free(phy);
68 return rc;
69 }
66 if (rc > 0) { 70 if (rc > 0) {
67 phy->irq = rc; 71 phy->irq = rc;
68 mdio->irq[addr] = rc; 72 mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
84 if (rc) { 88 if (rc) {
85 phy_device_free(phy); 89 phy_device_free(phy);
86 of_node_put(child); 90 of_node_put(child);
87 return; 91 return rc;
88 } 92 }
89 93
90 dev_dbg(&mdio->dev, "registered phy %s at address %i\n", 94 dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
91 child->name, addr); 95 child->name, addr);
96 return 0;
92} 97}
93 98
94static void of_mdiobus_register_device(struct mii_bus *mdio, 99static int of_mdiobus_register_device(struct mii_bus *mdio,
95 struct device_node *child, u32 addr) 100 struct device_node *child, u32 addr)
96{ 101{
97 struct mdio_device *mdiodev; 102 struct mdio_device *mdiodev;
98 int rc; 103 int rc;
99 104
100 mdiodev = mdio_device_create(mdio, addr); 105 mdiodev = mdio_device_create(mdio, addr);
101 if (IS_ERR(mdiodev)) 106 if (IS_ERR(mdiodev))
102 return; 107 return PTR_ERR(mdiodev);
103 108
104 /* Associate the OF node with the device structure so it 109 /* Associate the OF node with the device structure so it
105 * can be looked up later. 110 * can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
112 if (rc) { 117 if (rc) {
113 mdio_device_free(mdiodev); 118 mdio_device_free(mdiodev);
114 of_node_put(child); 119 of_node_put(child);
115 return; 120 return rc;
116 } 121 }
117 122
118 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", 123 dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
119 child->name, addr); 124 child->name, addr);
125 return 0;
120} 126}
121 127
122/* The following is a list of PHY compatible strings which appear in 128/* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
219 } 225 }
220 226
221 if (of_mdiobus_child_is_phy(child)) 227 if (of_mdiobus_child_is_phy(child))
222 of_mdiobus_register_phy(mdio, child, addr); 228 rc = of_mdiobus_register_phy(mdio, child, addr);
223 else 229 else
224 of_mdiobus_register_device(mdio, child, addr); 230 rc = of_mdiobus_register_device(mdio, child, addr);
231 if (rc)
232 goto unregister;
225 } 233 }
226 234
227 if (!scanphys) 235 if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
242 dev_info(&mdio->dev, "scan phy %s at address %i\n", 250 dev_info(&mdio->dev, "scan phy %s at address %i\n",
243 child->name, addr); 251 child->name, addr);
244 252
245 if (of_mdiobus_child_is_phy(child)) 253 if (of_mdiobus_child_is_phy(child)) {
246 of_mdiobus_register_phy(mdio, child, addr); 254 rc = of_mdiobus_register_phy(mdio, child, addr);
255 if (rc)
256 goto unregister;
257 }
247 } 258 }
248 } 259 }
249 260
250 return 0; 261 return 0;
262
263unregister:
264 mdiobus_unregister(mdio);
265 return rc;
251} 266}
252EXPORT_SYMBOL(of_mdiobus_register); 267EXPORT_SYMBOL(of_mdiobus_register);
253 268
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index d507c3569a88..32771c2ced7b 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -25,7 +25,7 @@
25#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#define MAX_RESERVED_REGIONS 16 28#define MAX_RESERVED_REGIONS 32
29static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; 29static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
30static int reserved_mem_count; 30static int reserved_mem_count;
31 31
diff --git a/drivers/of/property.c b/drivers/of/property.c
index fbb72116e9d4..264c355ba1ff 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
954 struct device_node *np; 954 struct device_node *np;
955 955
956 /* Get the parent of the port */ 956 /* Get the parent of the port */
957 np = of_get_next_parent(to_of_node(fwnode)); 957 np = of_get_parent(to_of_node(fwnode));
958 if (!np) 958 if (!np)
959 return NULL; 959 return NULL;
960 960
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 89f4e3d072d7..26ed0c08f209 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
935 bridge->sysdata = pcie; 935 bridge->sysdata = pcie;
936 bridge->busnr = 0; 936 bridge->busnr = 0;
937 bridge->ops = &advk_pcie_ops; 937 bridge->ops = &advk_pcie_ops;
938 bridge->map_irq = of_irq_parse_and_map_pci;
939 bridge->swizzle_irq = pci_common_swizzle;
938 940
939 ret = pci_scan_root_bus_bridge(bridge); 941 ret = pci_scan_root_bus_bridge(bridge);
940 if (ret < 0) { 942 if (ret < 0) {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 9c40da54f88a..1987fec1f126 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,6 +233,7 @@ struct tegra_msi {
233 struct msi_controller chip; 233 struct msi_controller chip;
234 DECLARE_BITMAP(used, INT_PCI_MSI_NR); 234 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
235 struct irq_domain *domain; 235 struct irq_domain *domain;
236 unsigned long pages;
236 struct mutex lock; 237 struct mutex lock;
237 u64 phys; 238 u64 phys;
238 int irq; 239 int irq;
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1529 goto err; 1530 goto err;
1530 } 1531 }
1531 1532
1532 /* 1533 /* setup AFI/FPCI range */
1533 * The PCI host bridge on Tegra contains some logic that intercepts 1534 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1534 * MSI writes, which means that the MSI target address doesn't have 1535 msi->phys = virt_to_phys((void *)msi->pages);
1535 * to point to actual physical memory. Rather than allocating one 4
1536 * KiB page of system memory that's never used, we can simply pick
1537 * an arbitrary address within an area reserved for system memory
1538 * in the FPCI address map.
1539 *
1540 * However, in order to avoid confusion, we pick an address that
1541 * doesn't map to physical memory. The FPCI address map reserves a
1542 * 1012 GiB region for system memory and memory-mapped I/O. Since
1543 * none of the Tegra SoCs that contain this PCI host bridge can
1544 * address more than 16 GiB of system memory, the last 4 KiB of
1545 * these 1012 GiB is a good candidate.
1546 */
1547 msi->phys = 0xfcfffff000;
1548 1536
1549 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); 1537 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1550 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); 1538 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1596 afi_writel(pcie, 0, AFI_MSI_EN_VEC6); 1584 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1597 afi_writel(pcie, 0, AFI_MSI_EN_VEC7); 1585 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1598 1586
1587 free_pages(msi->pages, 0);
1588
1599 if (msi->irq > 0) 1589 if (msi->irq > 0)
1600 free_irq(msi->irq, pcie); 1590 free_irq(msi->irq, pcie);
1601 1591
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 1eecfa301f7f..8e075ea2743e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
686 const char *buf, size_t count) 686 const char *buf, size_t count)
687{ 687{
688 struct pci_dev *pdev = to_pci_dev(dev); 688 struct pci_dev *pdev = to_pci_dev(dev);
689 char *driver_override, *old = pdev->driver_override, *cp; 689 char *driver_override, *old, *cp;
690 690
691 /* We need to keep extra room for a newline */ 691 /* We need to keep extra room for a newline */
692 if (count >= (PAGE_SIZE - 1)) 692 if (count >= (PAGE_SIZE - 1))
@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
700 if (cp) 700 if (cp)
701 *cp = '\0'; 701 *cp = '\0';
702 702
703 device_lock(dev);
704 old = pdev->driver_override;
703 if (strlen(driver_override)) { 705 if (strlen(driver_override)) {
704 pdev->driver_override = driver_override; 706 pdev->driver_override = driver_override;
705 } else { 707 } else {
706 kfree(driver_override); 708 kfree(driver_override);
707 pdev->driver_override = NULL; 709 pdev->driver_override = NULL;
708 } 710 }
711 device_unlock(dev);
709 712
710 kfree(old); 713 kfree(old);
711 714
@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
716 struct device_attribute *attr, char *buf) 719 struct device_attribute *attr, char *buf)
717{ 720{
718 struct pci_dev *pdev = to_pci_dev(dev); 721 struct pci_dev *pdev = to_pci_dev(dev);
722 ssize_t len;
719 723
720 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 724 device_lock(dev);
725 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
726 device_unlock(dev);
727 return len;
721} 728}
722static DEVICE_ATTR_RW(driver_override); 729static DEVICE_ATTR_RW(driver_override);
723 730
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 73ebad6634a7..89c887ea5557 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -111,6 +111,8 @@
111#define MVEBU_COMPHY_CONF6_40B BIT(18) 111#define MVEBU_COMPHY_CONF6_40B BIT(18)
112#define MVEBU_COMPHY_SELECTOR 0x1140 112#define MVEBU_COMPHY_SELECTOR 0x1140
113#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) 113#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
114#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
115#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
114 116
115#define MVEBU_COMPHY_LANES 6 117#define MVEBU_COMPHY_LANES 6
116#define MVEBU_COMPHY_PORTS 3 118#define MVEBU_COMPHY_PORTS 3
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy)
468{ 470{
469 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); 471 struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
470 struct mvebu_comphy_priv *priv = lane->priv; 472 struct mvebu_comphy_priv *priv = lane->priv;
471 int ret; 473 int ret, mux;
472 u32 mux, val; 474 u32 val;
473 475
474 mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode); 476 mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode);
475 if (mux < 0) 477 if (mux < 0)
476 return -ENOTSUPP; 478 return -ENOTSUPP;
477 479
480 regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
481 val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
482 regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
483
478 regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val); 484 regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val);
479 val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); 485 val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
480 val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id); 486 val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id);
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy)
526 val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); 532 val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
527 regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val); 533 regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val);
528 534
535 regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
536 val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
537 regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
538
529 return 0; 539 return 0;
530} 540}
531 541
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
576 return PTR_ERR(priv->regmap); 586 return PTR_ERR(priv->regmap);
577 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 587 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
578 priv->base = devm_ioremap_resource(&pdev->dev, res); 588 priv->base = devm_ioremap_resource(&pdev->dev, res);
579 if (!priv->base) 589 if (IS_ERR(priv->base))
580 return -ENOMEM; 590 return PTR_ERR(priv->base);
581 591
582 for_each_available_child_of_node(pdev->dev.of_node, child) { 592 for_each_available_child_of_node(pdev->dev.of_node, child) {
583 struct mvebu_comphy_lane *lane; 593 struct mvebu_comphy_lane *lane;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index e3baad78521f..721a2a1c97ef 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -27,6 +27,7 @@
27/* banks shared by multiple phys */ 27/* banks shared by multiple phys */
28#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */ 28#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
29#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */ 29#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
30#define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */
30/* u2 phy bank */ 31/* u2 phy bank */
31#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000 32#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
32/* u3/pcie/sata phy banks */ 33/* u3/pcie/sata phy banks */
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy,
762 case PHY_TYPE_USB3: 763 case PHY_TYPE_USB3:
763 case PHY_TYPE_PCIE: 764 case PHY_TYPE_PCIE:
764 u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC; 765 u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
765 u3_banks->chip = NULL; 766 u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
766 u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD; 767 u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
767 u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA; 768 u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
768 break; 769 break;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 4d2c57f21d76..a958c9bced01 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy,
443 return regmap_write(tcphy->grf_regs, reg->offset, val | mask); 443 return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
444} 444}
445 445
446static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
447{
448 u16 tx_ana_ctrl_reg_1;
449
450 /*
451 * Select the polarity of the xcvr:
452 * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
453 * down aux_m)
454 * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down
455 * aux_p)
456 */
457 tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
458 if (!tcphy->flip)
459 tx_ana_ctrl_reg_1 |= BIT(12);
460 else
461 tx_ana_ctrl_reg_1 &= ~BIT(12);
462 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
463}
464
446static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) 465static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
447{ 466{
467 u16 tx_ana_ctrl_reg_1;
448 u16 rdata, rdata2, val; 468 u16 rdata, rdata2, val;
449 469
450 /* disable txda_cal_latch_en for rewrite the calibration values */ 470 /* disable txda_cal_latch_en for rewrite the calibration values */
451 rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1); 471 tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
452 val = rdata & 0xdfff; 472 tx_ana_ctrl_reg_1 &= ~BIT(13);
453 writel(val, tcphy->base + TX_ANA_CTRL_REG_1); 473 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
454 474
455 /* 475 /*
456 * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and 476 * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
472 * Activate this signal for 1 clock cycle to sample new calibration 492 * Activate this signal for 1 clock cycle to sample new calibration
473 * values. 493 * values.
474 */ 494 */
475 rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1); 495 tx_ana_ctrl_reg_1 |= BIT(13);
476 val = rdata | 0x2000; 496 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
477 writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
478 usleep_range(150, 200); 497 usleep_range(150, 200);
479 498
480 /* set TX Voltage Level and TX Deemphasis to 0 */ 499 /* set TX Voltage Level and TX Deemphasis to 0 */
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
482 /* re-enable decap */ 501 /* re-enable decap */
483 writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2); 502 writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
484 writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2); 503 writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
485 writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1); 504 tx_ana_ctrl_reg_1 |= BIT(3);
486 writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1); 505 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
506 tx_ana_ctrl_reg_1 |= BIT(4);
507 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
487 508
488 writel(0, tcphy->base + TX_ANA_CTRL_REG_5); 509 writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
489 510
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
494 writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4); 515 writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
495 516
496 /* re-enables Bandgap reference for LDO */ 517 /* re-enables Bandgap reference for LDO */
497 writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1); 518 tx_ana_ctrl_reg_1 |= BIT(7);
498 writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1); 519 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
520 tx_ana_ctrl_reg_1 |= BIT(8);
521 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
499 522
500 /* 523 /*
501 * re-enables the transmitter pre-driver, driver data selection MUX, 524 * re-enables the transmitter pre-driver, driver data selection MUX,
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
505 writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2); 528 writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
506 529
507 /* 530 /*
508 * BIT 12: Controls auxda_polarity, which selects the polarity of the 531 * Do some magic undocumented stuff, some of which appears to
509 * xcvr: 532 * undo the "re-enables Bandgap reference for LDO" above.
510 * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
511 * down aux_m)
512 * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
513 * aux_p)
514 */ 533 */
515 val = 0xa078; 534 tx_ana_ctrl_reg_1 |= BIT(15);
516 if (!tcphy->flip) 535 tx_ana_ctrl_reg_1 &= ~BIT(8);
517 val |= BIT(12); 536 tx_ana_ctrl_reg_1 &= ~BIT(7);
518 writel(val, tcphy->base + TX_ANA_CTRL_REG_1); 537 tx_ana_ctrl_reg_1 |= BIT(6);
538 tx_ana_ctrl_reg_1 |= BIT(5);
539 writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
519 540
520 writel(0, tcphy->base + TX_ANA_CTRL_REG_3); 541 writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
521 writel(0, tcphy->base + TX_ANA_CTRL_REG_4); 542 writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
522 writel(0, tcphy->base + TX_ANA_CTRL_REG_5); 543 writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
523 544
524 /* 545 /*
525 * Controls low_power_swing_en, set the voltage swing of the driver 546 * Controls low_power_swing_en, don't set the voltage swing of the
526 * to 400mv. The values below are peak to peak (differential) values. 547 * driver to 400mv. The values below are peak to peak (differential)
548 * values.
527 */ 549 */
528 writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL); 550 writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
529 writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA); 551 writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
530 552
531 /* Controls tx_high_z_tm_en */ 553 /* Controls tx_high_z_tm_en */
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
555 reset_control_deassert(tcphy->tcphy_rst); 577 reset_control_deassert(tcphy->tcphy_rst);
556 578
557 property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip); 579 property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
580 tcphy_dp_aux_set_flip(tcphy);
558 581
559 tcphy_cfg_24m(tcphy); 582 tcphy_cfg_24m(tcphy);
560 583
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
685 if (tcphy->mode == new_mode) 708 if (tcphy->mode == new_mode)
686 goto unlock_ret; 709 goto unlock_ret;
687 710
688 if (tcphy->mode == MODE_DISCONNECT) 711 if (tcphy->mode == MODE_DISCONNECT) {
689 tcphy_phy_init(tcphy, new_mode); 712 ret = tcphy_phy_init(tcphy, new_mode);
713 if (ret)
714 goto unlock_ret;
715 }
690 716
691 /* wait TCPHY for pipe ready */ 717 /* wait TCPHY for pipe ready */
692 for (timeout = 0; timeout < 100; timeout++) { 718 for (timeout = 0; timeout < 100; timeout++) {
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
760 */ 786 */
761 if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) { 787 if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
762 tcphy_phy_deinit(tcphy); 788 tcphy_phy_deinit(tcphy);
763 tcphy_phy_init(tcphy, new_mode); 789 ret = tcphy_phy_init(tcphy, new_mode);
764 } else if (tcphy->mode == MODE_DISCONNECT) { 790 } else if (tcphy->mode == MODE_DISCONNECT) {
765 tcphy_phy_init(tcphy, new_mode); 791 ret = tcphy_phy_init(tcphy, new_mode);
766 } 792 }
793 if (ret)
794 goto unlock_ret;
767 795
768 ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL, 796 ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
769 val, val & DP_MODE_A2, 1000, 797 val, val & DP_MODE_A2, 1000,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 3cbcb2537657..4307bf0013e1 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
454 char *name; 454 char *name;
455 455
456 name = kasprintf(GFP_KERNEL, "%s-%u", type, index); 456 name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
457 if (!name)
458 return ERR_PTR(-ENOMEM);
457 np = of_find_node_by_name(np, name); 459 np = of_find_node_by_name(np, name);
458 kfree(name); 460 kfree(name);
459 } 461 }
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 1778cf4f81c7..82cd8b08d71f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -100,6 +100,7 @@ config PINCTRL_AMD
100 tristate "AMD GPIO pin control" 100 tristate "AMD GPIO pin control"
101 depends on GPIOLIB 101 depends on GPIOLIB
102 select GPIOLIB_IRQCHIP 102 select GPIOLIB_IRQCHIP
103 select PINMUX
103 select PINCONF 104 select PINCONF
104 select GENERIC_PINCONF 105 select GENERIC_PINCONF
105 help 106 help
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0944310225db..ff782445dfb7 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
373 unsigned long events; 373 unsigned long events;
374 unsigned offset; 374 unsigned offset;
375 unsigned gpio; 375 unsigned gpio;
376 unsigned int type;
377 376
378 events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); 377 events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4);
379 events &= mask; 378 events &= mask;
380 events &= pc->enabled_irq_map[bank]; 379 events &= pc->enabled_irq_map[bank];
381 for_each_set_bit(offset, &events, 32) { 380 for_each_set_bit(offset, &events, 32) {
382 gpio = (32 * bank) + offset; 381 gpio = (32 * bank) + offset;
383 /* FIXME: no clue why the code looks up the type here */
384 type = pc->irq_type[gpio];
385
386 generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, 382 generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain,
387 gpio)); 383 gpio));
388 } 384 }
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 04e929fd0ffe..fadbca907c7c 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1577 struct gpio_chip *chip = &pctrl->chip; 1577 struct gpio_chip *chip = &pctrl->chip;
1578 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); 1578 bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
1579 int ret, i, offset; 1579 int ret, i, offset;
1580 int irq_base;
1580 1581
1581 *chip = chv_gpio_chip; 1582 *chip = chv_gpio_chip;
1582 1583
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
1622 /* Clear all interrupts */ 1623 /* Clear all interrupts */
1623 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); 1624 chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
1624 1625
1625 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, 1626 if (!need_valid_mask) {
1627 irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
1628 chip->ngpio, NUMA_NO_NODE);
1629 if (irq_base < 0) {
1630 dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
1631 return irq_base;
1632 }
1633 } else {
1634 irq_base = 0;
1635 }
1636
1637 ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
1626 handle_bad_irq, IRQ_TYPE_NONE); 1638 handle_bad_irq, IRQ_TYPE_NONE);
1627 if (ret) { 1639 if (ret) {
1628 dev_err(pctrl->dev, "failed to add IRQ chip\n"); 1640 dev_err(pctrl->dev, "failed to add IRQ chip\n");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3f6b34febbf1..433af328d981 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
534 continue; 534 continue;
535 irq = irq_find_mapping(gc->irqdomain, irqnr + i); 535 irq = irq_find_mapping(gc->irqdomain, irqnr + i);
536 generic_handle_irq(irq); 536 generic_handle_irq(irq);
537 /* Clear interrupt */ 537
538 /* Clear interrupt.
539 * We must read the pin register again, in case the
540 * value was changed while executing
541 * generic_handle_irq() above.
542 */
543 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
544 regval = readl(regs + i);
538 writel(regval, regs + i); 545 writel(regval, regs + i);
546 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
539 ret = IRQ_HANDLED; 547 ret = IRQ_HANDLED;
540 } 548 }
541 } 549 }
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3e40d4245512..9c950bbf07ba 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
407 ret = mcp_read(mcp, MCP_GPIO, &status); 407 ret = mcp_read(mcp, MCP_GPIO, &status);
408 if (ret < 0) 408 if (ret < 0)
409 status = 0; 409 status = 0;
410 else 410 else {
411 mcp->cached_gpio = status;
411 status = !!(status & (1 << offset)); 412 status = !!(status & (1 << offset));
412 413 }
413 mcp->cached_gpio = status;
414 414
415 mutex_unlock(&mcp->lock); 415 mutex_unlock(&mcp->lock);
416 return status; 416 return status;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 85de30f93a9c..56a8195096a2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
254{ 254{
255 struct acpi_device *device = bl_get_data(b); 255 struct acpi_device *device = bl_get_data(b);
256 256
257 if (b->props.power == FB_BLANK_POWERDOWN) 257 if (fext) {
258 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); 258 if (b->props.power == FB_BLANK_POWERDOWN)
259 else 259 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
260 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); 260 else
261 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
262 }
261 263
262 return set_lcd_level(device, b->props.brightness); 264 return set_lcd_level(device, b->props.brightness);
263} 265}
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index bb792a52248b..e03fa31446ca 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,6 +33,7 @@
33#include <linux/suspend.h> 33#include <linux/suspend.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35#include <linux/io-64-nonatomic-lo-hi.h> 35#include <linux/io-64-nonatomic-lo-hi.h>
36#include <linux/spinlock.h>
36 37
37#include <asm/intel_pmc_ipc.h> 38#include <asm/intel_pmc_ipc.h>
38 39
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
131 /* gcr */ 132 /* gcr */
132 void __iomem *gcr_mem_base; 133 void __iomem *gcr_mem_base;
133 bool has_gcr_regs; 134 bool has_gcr_regs;
135 spinlock_t gcr_lock;
134 136
135 /* punit */ 137 /* punit */
136 struct platform_device *punit_dev; 138 struct platform_device *punit_dev;
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
225{ 227{
226 int ret; 228 int ret;
227 229
228 mutex_lock(&ipclock); 230 spin_lock(&ipcdev.gcr_lock);
229 231
230 ret = is_gcr_valid(offset); 232 ret = is_gcr_valid(offset);
231 if (ret < 0) { 233 if (ret < 0) {
232 mutex_unlock(&ipclock); 234 spin_unlock(&ipcdev.gcr_lock);
233 return ret; 235 return ret;
234 } 236 }
235 237
236 *data = readl(ipcdev.gcr_mem_base + offset); 238 *data = readl(ipcdev.gcr_mem_base + offset);
237 239
238 mutex_unlock(&ipclock); 240 spin_unlock(&ipcdev.gcr_lock);
239 241
240 return 0; 242 return 0;
241} 243}
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
255{ 257{
256 int ret; 258 int ret;
257 259
258 mutex_lock(&ipclock); 260 spin_lock(&ipcdev.gcr_lock);
259 261
260 ret = is_gcr_valid(offset); 262 ret = is_gcr_valid(offset);
261 if (ret < 0) { 263 if (ret < 0) {
262 mutex_unlock(&ipclock); 264 spin_unlock(&ipcdev.gcr_lock);
263 return ret; 265 return ret;
264 } 266 }
265 267
266 writel(data, ipcdev.gcr_mem_base + offset); 268 writel(data, ipcdev.gcr_mem_base + offset);
267 269
268 mutex_unlock(&ipclock); 270 spin_unlock(&ipcdev.gcr_lock);
269 271
270 return 0; 272 return 0;
271} 273}
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
287 u32 new_val; 289 u32 new_val;
288 int ret = 0; 290 int ret = 0;
289 291
290 mutex_lock(&ipclock); 292 spin_lock(&ipcdev.gcr_lock);
291 293
292 ret = is_gcr_valid(offset); 294 ret = is_gcr_valid(offset);
293 if (ret < 0) 295 if (ret < 0)
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
309 } 311 }
310 312
311gcr_ipc_unlock: 313gcr_ipc_unlock:
312 mutex_unlock(&ipclock); 314 spin_unlock(&ipcdev.gcr_lock);
313 return ret; 315 return ret;
314} 316}
315EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); 317EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
480 482
481static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 483static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
482{ 484{
483 resource_size_t pci_resource; 485 struct intel_pmc_ipc_dev *pmc = &ipcdev;
484 int ret; 486 int ret;
485 int len;
486 487
487 ipcdev.dev = &pci_dev_get(pdev)->dev; 488 /* Only one PMC is supported */
488 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; 489 if (pmc->dev)
490 return -EBUSY;
489 491
490 ret = pci_enable_device(pdev); 492 pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
493
494 spin_lock_init(&ipcdev.gcr_lock);
495
496 ret = pcim_enable_device(pdev);
491 if (ret) 497 if (ret)
492 return ret; 498 return ret;
493 499
494 ret = pci_request_regions(pdev, "intel_pmc_ipc"); 500 ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
495 if (ret) 501 if (ret)
496 return ret; 502 return ret;
497 503
498 pci_resource = pci_resource_start(pdev, 0); 504 init_completion(&pmc->cmd_complete);
499 len = pci_resource_len(pdev, 0);
500 if (!pci_resource || !len) {
501 dev_err(&pdev->dev, "Failed to get resource\n");
502 return -ENOMEM;
503 }
504 505
505 init_completion(&ipcdev.cmd_complete); 506 pmc->ipc_base = pcim_iomap_table(pdev)[0];
506 507
507 if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) { 508 ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
509 pmc);
510 if (ret) {
508 dev_err(&pdev->dev, "Failed to request irq\n"); 511 dev_err(&pdev->dev, "Failed to request irq\n");
509 return -EBUSY; 512 return ret;
510 } 513 }
511 514
512 ipcdev.ipc_base = ioremap_nocache(pci_resource, len); 515 pmc->dev = &pdev->dev;
513 if (!ipcdev.ipc_base) {
514 dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
515 free_irq(pdev->irq, &ipcdev);
516 ret = -ENOMEM;
517 }
518 516
519 return ret; 517 pci_set_drvdata(pdev, pmc);
520}
521 518
522static void ipc_pci_remove(struct pci_dev *pdev) 519 return 0;
523{
524 free_irq(pdev->irq, &ipcdev);
525 pci_release_regions(pdev);
526 pci_dev_put(pdev);
527 iounmap(ipcdev.ipc_base);
528 ipcdev.dev = NULL;
529} 520}
530 521
531static const struct pci_device_id ipc_pci_ids[] = { 522static const struct pci_device_id ipc_pci_ids[] = {
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
540 .name = "intel_pmc_ipc", 531 .name = "intel_pmc_ipc",
541 .id_table = ipc_pci_ids, 532 .id_table = ipc_pci_ids,
542 .probe = ipc_pci_probe, 533 .probe = ipc_pci_probe,
543 .remove = ipc_pci_remove,
544}; 534};
545 535
546static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, 536static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
850 return -ENXIO; 840 return -ENXIO;
851 } 841 }
852 size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; 842 size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
843 res->end = res->start + size - 1;
844
845 addr = devm_ioremap_resource(&pdev->dev, res);
846 if (IS_ERR(addr))
847 return PTR_ERR(addr);
853 848
854 if (!request_mem_region(res->start, size, pdev->name)) {
855 dev_err(&pdev->dev, "Failed to request ipc resource\n");
856 return -EBUSY;
857 }
858 addr = ioremap_nocache(res->start, size);
859 if (!addr) {
860 dev_err(&pdev->dev, "I/O memory remapping failed\n");
861 release_mem_region(res->start, size);
862 return -ENOMEM;
863 }
864 ipcdev.ipc_base = addr; 849 ipcdev.ipc_base = addr;
865 850
866 ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET; 851 ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
917 902
918static int ipc_plat_probe(struct platform_device *pdev) 903static int ipc_plat_probe(struct platform_device *pdev)
919{ 904{
920 struct resource *res;
921 int ret; 905 int ret;
922 906
923 ipcdev.dev = &pdev->dev; 907 ipcdev.dev = &pdev->dev;
924 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; 908 ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
925 init_completion(&ipcdev.cmd_complete); 909 init_completion(&ipcdev.cmd_complete);
910 spin_lock_init(&ipcdev.gcr_lock);
926 911
927 ipcdev.irq = platform_get_irq(pdev, 0); 912 ipcdev.irq = platform_get_irq(pdev, 0);
928 if (ipcdev.irq < 0) { 913 if (ipcdev.irq < 0) {
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
939 ret = ipc_create_pmc_devices(); 924 ret = ipc_create_pmc_devices();
940 if (ret) { 925 if (ret) {
941 dev_err(&pdev->dev, "Failed to create pmc devices\n"); 926 dev_err(&pdev->dev, "Failed to create pmc devices\n");
942 goto err_device; 927 return ret;
943 } 928 }
944 929
945 if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND, 930 if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
946 "intel_pmc_ipc", &ipcdev)) { 931 "intel_pmc_ipc", &ipcdev)) {
947 dev_err(&pdev->dev, "Failed to request irq\n"); 932 dev_err(&pdev->dev, "Failed to request irq\n");
948 ret = -EBUSY; 933 ret = -EBUSY;
949 goto err_irq; 934 goto err_irq;
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
960 945
961 return 0; 946 return 0;
962err_sys: 947err_sys:
963 free_irq(ipcdev.irq, &ipcdev); 948 devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
964err_irq: 949err_irq:
965 platform_device_unregister(ipcdev.tco_dev); 950 platform_device_unregister(ipcdev.tco_dev);
966 platform_device_unregister(ipcdev.punit_dev); 951 platform_device_unregister(ipcdev.punit_dev);
967 platform_device_unregister(ipcdev.telemetry_dev); 952 platform_device_unregister(ipcdev.telemetry_dev);
968err_device: 953
969 iounmap(ipcdev.ipc_base);
970 res = platform_get_resource(pdev, IORESOURCE_MEM,
971 PLAT_RESOURCE_IPC_INDEX);
972 if (res) {
973 release_mem_region(res->start,
974 PLAT_RESOURCE_IPC_SIZE +
975 PLAT_RESOURCE_GCR_SIZE);
976 }
977 return ret; 954 return ret;
978} 955}
979 956
980static int ipc_plat_remove(struct platform_device *pdev) 957static int ipc_plat_remove(struct platform_device *pdev)
981{ 958{
982 struct resource *res;
983
984 sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); 959 sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
985 free_irq(ipcdev.irq, &ipcdev); 960 devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
986 platform_device_unregister(ipcdev.tco_dev); 961 platform_device_unregister(ipcdev.tco_dev);
987 platform_device_unregister(ipcdev.punit_dev); 962 platform_device_unregister(ipcdev.punit_dev);
988 platform_device_unregister(ipcdev.telemetry_dev); 963 platform_device_unregister(ipcdev.telemetry_dev);
989 iounmap(ipcdev.ipc_base);
990 res = platform_get_resource(pdev, IORESOURCE_MEM,
991 PLAT_RESOURCE_IPC_INDEX);
992 if (res) {
993 release_mem_region(res->start,
994 PLAT_RESOURCE_IPC_SIZE +
995 PLAT_RESOURCE_GCR_SIZE);
996 }
997 ipcdev.dev = NULL; 964 ipcdev.dev = NULL;
998 return 0; 965 return 0;
999} 966}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 315a4be8dc1e..9a68914100ad 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO);
51MODULE_PARM_DESC(mbox_sel, 51MODULE_PARM_DESC(mbox_sel,
52 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); 52 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
53 53
54static DEFINE_SPINLOCK(tsi721_maint_lock);
55
54static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); 56static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
55static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); 57static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
56 58
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
124 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); 126 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
125 struct tsi721_dma_desc *bd_ptr; 127 struct tsi721_dma_desc *bd_ptr;
126 u32 rd_count, swr_ptr, ch_stat; 128 u32 rd_count, swr_ptr, ch_stat;
129 unsigned long flags;
127 int i, err = 0; 130 int i, err = 0;
128 u32 op = do_wr ? MAINT_WR : MAINT_RD; 131 u32 op = do_wr ? MAINT_WR : MAINT_RD;
129 132
130 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 133 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
131 return -EINVAL; 134 return -EINVAL;
132 135
136 spin_lock_irqsave(&tsi721_maint_lock, flags);
137
133 bd_ptr = priv->mdma.bd_base; 138 bd_ptr = priv->mdma.bd_base;
134 139
135 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); 140 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
197 */ 202 */
198 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); 203 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
199 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); 204 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
205
200err_out: 206err_out:
207 spin_unlock_irqrestore(&tsi721_maint_lock, flags);
201 208
202 return err; 209 return err;
203} 210}
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index a3824baca2e5..3ee9af83b638 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -14,16 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16/* 16/*
17 * These interrupt-safe spinlocks protect all accesses to RIO
18 * configuration space and doorbell access.
19 */
20static DEFINE_SPINLOCK(rio_config_lock);
21static DEFINE_SPINLOCK(rio_doorbell_lock);
22
23/*
24 * Wrappers for all RIO configuration access functions. They just check 17 * Wrappers for all RIO configuration access functions. They just check
25 * alignment, do locking and call the low-level functions pointed to 18 * alignment and call the low-level functions pointed to by rio_mport->ops.
26 * by rio_mport->ops.
27 */ 19 */
28 20
29#define RIO_8_BAD 0 21#define RIO_8_BAD 0
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \
44 (struct rio_mport *mport, u32 offset, type *value) \ 36 (struct rio_mport *mport, u32 offset, type *value) \
45{ \ 37{ \
46 int res; \ 38 int res; \
47 unsigned long flags; \
48 u32 data = 0; \ 39 u32 data = 0; \
49 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 40 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
50 spin_lock_irqsave(&rio_config_lock, flags); \
51 res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ 41 res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
52 *value = (type)data; \ 42 *value = (type)data; \
53 spin_unlock_irqrestore(&rio_config_lock, flags); \
54 return res; \ 43 return res; \
55} 44}
56 45
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \
67int __rio_local_write_config_##size \ 56int __rio_local_write_config_##size \
68 (struct rio_mport *mport, u32 offset, type value) \ 57 (struct rio_mport *mport, u32 offset, type value) \
69{ \ 58{ \
70 int res; \
71 unsigned long flags; \
72 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 59 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
73 spin_lock_irqsave(&rio_config_lock, flags); \ 60 return mport->ops->lcwrite(mport, mport->id, offset, len, value);\
74 res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
75 spin_unlock_irqrestore(&rio_config_lock, flags); \
76 return res; \
77} 61}
78 62
79RIO_LOP_READ(8, u8, 1) 63RIO_LOP_READ(8, u8, 1)
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \
104 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ 88 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \
105{ \ 89{ \
106 int res; \ 90 int res; \
107 unsigned long flags; \
108 u32 data = 0; \ 91 u32 data = 0; \
109 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 92 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
110 spin_lock_irqsave(&rio_config_lock, flags); \
111 res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ 93 res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
112 *value = (type)data; \ 94 *value = (type)data; \
113 spin_unlock_irqrestore(&rio_config_lock, flags); \
114 return res; \ 95 return res; \
115} 96}
116 97
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \
127int rio_mport_write_config_##size \ 108int rio_mport_write_config_##size \
128 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ 109 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \
129{ \ 110{ \
130 int res; \
131 unsigned long flags; \
132 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 111 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
133 spin_lock_irqsave(&rio_config_lock, flags); \ 112 return mport->ops->cwrite(mport, mport->id, destid, hopcount, \
134 res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ 113 offset, len, value); \
135 spin_unlock_irqrestore(&rio_config_lock, flags); \
136 return res; \
137} 114}
138 115
139RIO_OP_READ(8, u8, 1) 116RIO_OP_READ(8, u8, 1)
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
162 */ 139 */
163int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) 140int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
164{ 141{
165 int res; 142 return mport->ops->dsend(mport, mport->id, destid, data);
166 unsigned long flags;
167
168 spin_lock_irqsave(&rio_doorbell_lock, flags);
169 res = mport->ops->dsend(mport, mport->id, destid, data);
170 spin_unlock_irqrestore(&rio_doorbell_lock, flags);
171
172 return res;
173} 143}
174 144
175EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); 145EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index d0e5d6ee882c..e2c1988cd7c0 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str)
523 if (*str == '=') 523 if (*str == '=')
524 str++; 524 str++;
525 525
526 if (!strncmp(str, "cec_disable", 7)) 526 if (!strcmp(str, "cec_disable"))
527 ce_arr.disabled = 1; 527 ce_arr.disabled = 1;
528 else 528 else
529 return 0; 529 return 0;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f18b36dd57dd..376a99b7cf5d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
590 case AXP803_DCDC3: 590 case AXP803_DCDC3:
591 return !!(reg & BIT(6)); 591 return !!(reg & BIT(6));
592 case AXP803_DCDC6: 592 case AXP803_DCDC6:
593 return !!(reg & BIT(7)); 593 return !!(reg & BIT(5));
594 } 594 }
595 break; 595 break;
596 596
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index ef2be56460fe..790a4a73ea2c 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
29}; 29};
30 30
31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ 31#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
32 [RN5T618_##rid] = { \ 32 { \
33 .name = #rid, \ 33 .name = #rid, \
34 .of_match = of_match_ptr(#rid), \ 34 .of_match = of_match_ptr(#rid), \
35 .regulators_node = of_match_ptr("regulators"), \ 35 .regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index df63e44526ac..bf04479456a0 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL
109 depends on OF && ARCH_QCOM 109 depends on OF && ARCH_QCOM
110 depends on QCOM_SMEM 110 depends on QCOM_SMEM
111 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) 111 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
112 depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
112 select MFD_SYSCON 113 select MFD_SYSCON
113 select QCOM_RPROC_COMMON 114 select QCOM_RPROC_COMMON
114 select QCOM_SCM 115 select QCOM_SCM
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL
120 tristate "Qualcomm WCNSS Peripheral Image Loader" 121 tristate "Qualcomm WCNSS Peripheral Image Loader"
121 depends on OF && ARCH_QCOM 122 depends on OF && ARCH_QCOM
122 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) 123 depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
124 depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
123 depends on QCOM_SMEM 125 depends on QCOM_SMEM
124 select QCOM_MDT_LOADER 126 select QCOM_MDT_LOADER
125 select QCOM_RPROC_COMMON 127 select QCOM_RPROC_COMMON
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 612d91403341..633268e9d550 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
264 if (!(att->flags & ATT_OWN)) 264 if (!(att->flags & ATT_OWN))
265 continue; 265 continue;
266 266
267 if (b > IMX7D_RPROC_MEM_MAX) 267 if (b >= IMX7D_RPROC_MEM_MAX)
268 break; 268 break;
269 269
270 priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, 270 priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
271 att->sa, att->size); 271 att->sa, att->size);
272 if (IS_ERR(priv->mem[b].cpu_addr)) { 272 if (!priv->mem[b].cpu_addr) {
273 dev_err(dev, "devm_ioremap_resource failed\n"); 273 dev_err(dev, "devm_ioremap_resource failed\n");
274 err = PTR_ERR(priv->mem[b].cpu_addr); 274 return -ENOMEM;
275 return err;
276 } 275 }
277 priv->mem[b].sys_addr = att->sa; 276 priv->mem[b].sys_addr = att->sa;
278 priv->mem[b].size = att->size; 277 priv->mem[b].size = att->size;
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
296 return err; 295 return err;
297 } 296 }
298 297
299 if (b > IMX7D_RPROC_MEM_MAX) 298 if (b >= IMX7D_RPROC_MEM_MAX)
300 break; 299 break;
301 300
302 priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); 301 priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e0c393214264..e2baecbb9dd3 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -34,11 +34,12 @@ config RESET_BERLIN
34 help 34 help
35 This enables the reset controller driver for Marvell Berlin SoCs. 35 This enables the reset controller driver for Marvell Berlin SoCs.
36 36
37config RESET_HSDK_V1 37config RESET_HSDK
38 bool "HSDK v1 Reset Driver" 38 bool "Synopsys HSDK Reset Driver"
39 default n 39 depends on HAS_IOMEM
40 depends on ARC_SOC_HSDK || COMPILE_TEST
40 help 41 help
41 This enables the reset controller driver for HSDK v1. 42 This enables the reset controller driver for HSDK board.
42 43
43config RESET_IMX7 44config RESET_IMX7
44 bool "i.MX7 Reset Driver" if COMPILE_TEST 45 bool "i.MX7 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d368367110e5..af1c15c330b3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
5obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o 5obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
6obj-$(CONFIG_RESET_ATH79) += reset-ath79.o 6obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
7obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o 7obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
8obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o 8obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
9obj-$(CONFIG_RESET_IMX7) += reset-imx7.o 9obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
10obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o 10obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
11obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o 11obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c
index bca13e4bf622..8bce391c6943 100644
--- a/drivers/reset/reset-hsdk-v1.c
+++ b/drivers/reset/reset-hsdk.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2017 Synopsys. 2 * Copyright (C) 2017 Synopsys.
3 * 3 *
4 * Synopsys HSDKv1 SDP reset driver. 4 * Synopsys HSDK Development platform reset driver.
5 * 5 *
6 * This file is licensed under the terms of the GNU General Public 6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any 7 * License version 2. This program is licensed "as is" without any
@@ -18,9 +18,9 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/types.h> 19#include <linux/types.h>
20 20
21#define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev) 21#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
22 22
23struct hsdkv1_rst { 23struct hsdk_rst {
24 void __iomem *regs_ctl; 24 void __iomem *regs_ctl;
25 void __iomem *regs_rst; 25 void __iomem *regs_rst;
26 spinlock_t lock; 26 spinlock_t lock;
@@ -49,12 +49,12 @@ static const u32 rst_map[] = {
49#define CGU_IP_SW_RESET_RESET BIT(0) 49#define CGU_IP_SW_RESET_RESET BIT(0)
50#define SW_RESET_TIMEOUT 10000 50#define SW_RESET_TIMEOUT 10000
51 51
52static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id) 52static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
53{ 53{
54 writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); 54 writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
55} 55}
56 56
57static int hsdkv1_reset_do(struct hsdkv1_rst *rst) 57static int hsdk_reset_do(struct hsdk_rst *rst)
58{ 58{
59 u32 reg; 59 u32 reg;
60 60
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
69 !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); 69 !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
70} 70}
71 71
72static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev, 72static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
73 unsigned long id) 73 unsigned long id)
74{ 74{
75 struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev); 75 struct hsdk_rst *rst = to_hsdk_rst(rcdev);
76 unsigned long flags; 76 unsigned long flags;
77 int ret; 77 int ret;
78 78
79 spin_lock_irqsave(&rst->lock, flags); 79 spin_lock_irqsave(&rst->lock, flags);
80 hsdkv1_reset_config(rst, id); 80 hsdk_reset_config(rst, id);
81 ret = hsdkv1_reset_do(rst); 81 ret = hsdk_reset_do(rst);
82 spin_unlock_irqrestore(&rst->lock, flags); 82 spin_unlock_irqrestore(&rst->lock, flags);
83 83
84 return ret; 84 return ret;
85} 85}
86 86
87static const struct reset_control_ops hsdkv1_reset_ops = { 87static const struct reset_control_ops hsdk_reset_ops = {
88 .reset = hsdkv1_reset_reset, 88 .reset = hsdk_reset_reset,
89}; 89};
90 90
91static int hsdkv1_reset_probe(struct platform_device *pdev) 91static int hsdk_reset_probe(struct platform_device *pdev)
92{ 92{
93 struct hsdkv1_rst *rst; 93 struct hsdk_rst *rst;
94 struct resource *mem; 94 struct resource *mem;
95 95
96 rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); 96 rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
110 spin_lock_init(&rst->lock); 110 spin_lock_init(&rst->lock);
111 111
112 rst->rcdev.owner = THIS_MODULE; 112 rst->rcdev.owner = THIS_MODULE;
113 rst->rcdev.ops = &hsdkv1_reset_ops; 113 rst->rcdev.ops = &hsdk_reset_ops;
114 rst->rcdev.of_node = pdev->dev.of_node; 114 rst->rcdev.of_node = pdev->dev.of_node;
115 rst->rcdev.nr_resets = HSDK_MAX_RESETS; 115 rst->rcdev.nr_resets = HSDK_MAX_RESETS;
116 rst->rcdev.of_reset_n_cells = 1; 116 rst->rcdev.of_reset_n_cells = 1;
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
118 return reset_controller_register(&rst->rcdev); 118 return reset_controller_register(&rst->rcdev);
119} 119}
120 120
121static const struct of_device_id hsdkv1_reset_dt_match[] = { 121static const struct of_device_id hsdk_reset_dt_match[] = {
122 { .compatible = "snps,hsdk-v1.0-reset" }, 122 { .compatible = "snps,hsdk-reset" },
123 { }, 123 { },
124}; 124};
125 125
126static struct platform_driver hsdkv1_reset_driver = { 126static struct platform_driver hsdk_reset_driver = {
127 .probe = hsdkv1_reset_probe, 127 .probe = hsdk_reset_probe,
128 .driver = { 128 .driver = {
129 .name = "hsdk-v1.0-reset", 129 .name = "hsdk-reset",
130 .of_match_table = hsdkv1_reset_dt_match, 130 .of_match_table = hsdk_reset_dt_match,
131 }, 131 },
132}; 132};
133builtin_platform_driver(hsdkv1_reset_driver); 133builtin_platform_driver(hsdk_reset_driver);
134 134
135MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); 135MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
136MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver"); 136MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
137MODULE_LICENSE("GPL v2"); 137MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index c60904ff40b8..3907bbc9c6cf 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
40 struct socfpga_reset_data *data = container_of(rcdev, 40 struct socfpga_reset_data *data = container_of(rcdev,
41 struct socfpga_reset_data, 41 struct socfpga_reset_data,
42 rcdev); 42 rcdev);
43 int bank = id / BITS_PER_LONG; 43 int reg_width = sizeof(u32);
44 int offset = id % BITS_PER_LONG; 44 int bank = id / (reg_width * BITS_PER_BYTE);
45 int offset = id % (reg_width * BITS_PER_BYTE);
45 unsigned long flags; 46 unsigned long flags;
46 u32 reg; 47 u32 reg;
47 48
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
61 struct socfpga_reset_data, 62 struct socfpga_reset_data,
62 rcdev); 63 rcdev);
63 64
64 int bank = id / BITS_PER_LONG; 65 int reg_width = sizeof(u32);
65 int offset = id % BITS_PER_LONG; 66 int bank = id / (reg_width * BITS_PER_BYTE);
67 int offset = id % (reg_width * BITS_PER_BYTE);
66 unsigned long flags; 68 unsigned long flags;
67 u32 reg; 69 u32 reg;
68 70
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
81{ 83{
82 struct socfpga_reset_data *data = container_of(rcdev, 84 struct socfpga_reset_data *data = container_of(rcdev,
83 struct socfpga_reset_data, rcdev); 85 struct socfpga_reset_data, rcdev);
84 int bank = id / BITS_PER_LONG; 86 int reg_width = sizeof(u32);
85 int offset = id % BITS_PER_LONG; 87 int bank = id / (reg_width * BITS_PER_BYTE);
88 int offset = id % (reg_width * BITS_PER_BYTE);
86 u32 reg; 89 u32 reg;
87 90
88 reg = readl(data->membase + (bank * BANK_INCREMENT)); 91 reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
132 spin_lock_init(&data->lock); 135 spin_lock_init(&data->lock);
133 136
134 data->rcdev.owner = THIS_MODULE; 137 data->rcdev.owner = THIS_MODULE;
135 data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; 138 data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
136 data->rcdev.ops = &socfpga_reset_ops; 139 data->rcdev.ops = &socfpga_reset_ops;
137 data->rcdev.of_node = pdev->dev.of_node; 140 data->rcdev.of_node = pdev->dev.of_node;
138 141
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 5a5e927ea50f..5dcc9bf1c5bc 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
635 unsigned long flags; 635 unsigned long flags;
636 636
637 intent = kzalloc(sizeof(*intent), GFP_KERNEL); 637 intent = kzalloc(sizeof(*intent), GFP_KERNEL);
638
639 if (!intent) 638 if (!intent)
640 return NULL; 639 return NULL;
641 640
642 intent->data = kzalloc(size, GFP_KERNEL); 641 intent->data = kzalloc(size, GFP_KERNEL);
643 if (!intent->data) 642 if (!intent->data)
644 return NULL; 643 goto free_intent;
645 644
646 spin_lock_irqsave(&channel->intent_lock, flags); 645 spin_lock_irqsave(&channel->intent_lock, flags);
647 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); 646 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
648 if (ret < 0) { 647 if (ret < 0) {
649 spin_unlock_irqrestore(&channel->intent_lock, flags); 648 spin_unlock_irqrestore(&channel->intent_lock, flags);
650 return NULL; 649 goto free_data;
651 } 650 }
652 spin_unlock_irqrestore(&channel->intent_lock, flags); 651 spin_unlock_irqrestore(&channel->intent_lock, flags);
653 652
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink,
656 intent->reuse = reuseable; 655 intent->reuse = reuseable;
657 656
658 return intent; 657 return intent;
658
659free_data:
660 kfree(intent->data);
661free_intent:
662 kfree(intent);
663 return NULL;
659} 664}
660 665
661static void qcom_glink_handle_rx_done(struct qcom_glink *glink, 666static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
1197 1202
1198 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); 1203 ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
1199 if (ret) 1204 if (ret)
1200 return ret; 1205 goto unlock;
1201 1206
1202 ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); 1207 ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ);
1203 if (!ret) { 1208 if (!ret) {
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink,
1207 ret = channel->intent_req_result ? 0 : -ECANCELED; 1212 ret = channel->intent_req_result ? 0 : -ECANCELED;
1208 } 1213 }
1209 1214
1215unlock:
1210 mutex_unlock(&channel->intent_req_lock); 1216 mutex_unlock(&channel->intent_req_lock);
1211 return ret; 1217 return ret;
1212} 1218}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 82ac331d9125..84752152d41f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
357 357
358 adapter->next_port_scan = jiffies; 358 adapter->next_port_scan = jiffies;
359 359
360 adapter->erp_action.adapter = adapter;
361
360 if (zfcp_qdio_setup(adapter)) 362 if (zfcp_qdio_setup(adapter))
361 goto failed; 363 goto failed;
362 364
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
513 port->dev.groups = zfcp_port_attr_groups; 515 port->dev.groups = zfcp_port_attr_groups;
514 port->dev.release = zfcp_port_release; 516 port->dev.release = zfcp_port_release;
515 517
518 port->erp_action.adapter = adapter;
519 port->erp_action.port = port;
520
516 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { 521 if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
517 kfree(port); 522 kfree(port);
518 goto err_out; 523 goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 37408f5f81ce..ec2532ee1822 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
193 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, 193 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
194 &zfcp_sdev->status); 194 &zfcp_sdev->status);
195 erp_action = &zfcp_sdev->erp_action; 195 erp_action = &zfcp_sdev->erp_action;
196 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 196 WARN_ON_ONCE(erp_action->port != port);
197 erp_action->port = port; 197 WARN_ON_ONCE(erp_action->sdev != sdev);
198 erp_action->sdev = sdev;
199 if (!(atomic_read(&zfcp_sdev->status) & 198 if (!(atomic_read(&zfcp_sdev->status) &
200 ZFCP_STATUS_COMMON_RUNNING)) 199 ZFCP_STATUS_COMMON_RUNNING))
201 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 200 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
208 zfcp_erp_action_dismiss_port(port); 207 zfcp_erp_action_dismiss_port(port);
209 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 208 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
210 erp_action = &port->erp_action; 209 erp_action = &port->erp_action;
211 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 210 WARN_ON_ONCE(erp_action->port != port);
212 erp_action->port = port; 211 WARN_ON_ONCE(erp_action->sdev != NULL);
213 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) 212 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
214 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 213 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
215 break; 214 break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
219 zfcp_erp_action_dismiss_adapter(adapter); 218 zfcp_erp_action_dismiss_adapter(adapter);
220 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 219 atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
221 erp_action = &adapter->erp_action; 220 erp_action = &adapter->erp_action;
222 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 221 WARN_ON_ONCE(erp_action->port != NULL);
222 WARN_ON_ONCE(erp_action->sdev != NULL);
223 if (!(atomic_read(&adapter->status) & 223 if (!(atomic_read(&adapter->status) &
224 ZFCP_STATUS_COMMON_RUNNING)) 224 ZFCP_STATUS_COMMON_RUNNING))
225 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; 225 act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
229 return NULL; 229 return NULL;
230 } 230 }
231 231
232 erp_action->adapter = adapter; 232 WARN_ON_ONCE(erp_action->adapter != adapter);
233 memset(&erp_action->list, 0, sizeof(erp_action->list));
234 memset(&erp_action->timer, 0, sizeof(erp_action->timer));
235 erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
236 erp_action->fsf_req_id = 0;
233 erp_action->action = need; 237 erp_action->action = need;
234 erp_action->status = act_status; 238 erp_action->status = act_status;
235 239
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ec3ddd1d31d5..6cf8732627e0 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
115 struct zfcp_unit *unit; 115 struct zfcp_unit *unit;
116 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; 116 int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
117 117
118 zfcp_sdev->erp_action.adapter = adapter;
119 zfcp_sdev->erp_action.sdev = sdev;
120
118 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 121 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
119 if (!port) 122 if (!port)
120 return -ENXIO; 123 return -ENXIO;
121 124
125 zfcp_sdev->erp_action.port = port;
126
122 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); 127 unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
123 if (unit) 128 if (unit)
124 put_device(&unit->dev); 129 put_device(&unit->dev);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a64285ab0728..af3e4d3f9735 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
699 int status; 699 int status;
700 700
701 dresp = (struct aac_mount *) fib_data(fibptr); 701 dresp = (struct aac_mount *) fib_data(fibptr);
702 if (!(fibptr->dev->supplement_adapter_info.supported_options2 & 702 if (!aac_supports_2T(fibptr->dev)) {
703 AAC_OPTION_VARIABLE_BLOCK_SIZE))
704 dresp->mnt[0].capacityhigh = 0; 703 dresp->mnt[0].capacityhigh = 0;
705 if ((le32_to_cpu(dresp->status) != ST_OK) || 704 if ((le32_to_cpu(dresp->status) == ST_OK) &&
706 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { 705 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
707 _aac_probe_container2(context, fibptr); 706 _aac_probe_container2(context, fibptr);
708 return; 707 return;
708 }
709 } 709 }
710 scsicmd = (struct scsi_cmnd *) context; 710 scsicmd = (struct scsi_cmnd *) context;
711 711
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 92fabf2b0c24..403a639574e5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev)
2701 return 0; 2701 return 0;
2702} 2702}
2703 2703
2704static inline int aac_supports_2T(struct aac_dev *dev)
2705{
2706 return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
2707}
2708
2704char * get_container_type(unsigned type); 2709char * get_container_type(unsigned type);
2705extern int numacb; 2710extern int numacb;
2706extern char aac_driver_version[]; 2711extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 97d269f16888..1bc623ad3faf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
302 return -ENOMEM; 302 return -ENOMEM;
303 aac_fib_init(fibctx); 303 aac_fib_init(fibctx);
304 304
305 mutex_lock(&dev->ioctl_mutex); 305 if (!dev->adapter_shutdown) {
306 dev->adapter_shutdown = 1; 306 mutex_lock(&dev->ioctl_mutex);
307 mutex_unlock(&dev->ioctl_mutex); 307 dev->adapter_shutdown = 1;
308 mutex_unlock(&dev->ioctl_mutex);
309 }
308 310
309 cmd = (struct aac_close *) fib_data(fibctx); 311 cmd = (struct aac_close *) fib_data(fibctx);
310 cmd->command = cpu_to_le32(VM_CloseAll); 312 cmd->command = cpu_to_le32(VM_CloseAll);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 87cc4a93e637..c9252b138c1f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
906 906
907 bus = aac_logical_to_phys(scmd_channel(cmd)); 907 bus = aac_logical_to_phys(scmd_channel(cmd));
908 cid = scmd_id(cmd); 908 cid = scmd_id(cmd);
909 info = &aac->hba_map[bus][cid]; 909
910 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || 910 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
911 info->devtype != AAC_DEVTYPE_NATIVE_RAW)
912 return FAILED; 911 return FAILED;
913 912
914 if (info->reset_state > 0) 913 info = &aac->hba_map[bus][cid];
914
915 if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
916 info->reset_state > 0)
915 return FAILED; 917 return FAILED;
916 918
917 pr_err("%s: Host adapter reset request. SCSI hang ?\n", 919 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
962 964
963 bus = aac_logical_to_phys(scmd_channel(cmd)); 965 bus = aac_logical_to_phys(scmd_channel(cmd));
964 cid = scmd_id(cmd); 966 cid = scmd_id(cmd);
965 info = &aac->hba_map[bus][cid]; 967
966 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || 968 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
967 info->devtype != AAC_DEVTYPE_NATIVE_RAW)
968 return FAILED; 969 return FAILED;
969 970
970 if (info->reset_state > 0) 971 info = &aac->hba_map[bus][cid];
972
973 if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
974 info->reset_state > 0)
971 return FAILED; 975 return FAILED;
972 976
973 pr_err("%s: Host adapter reset request. SCSI hang ?\n", 977 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -1547,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
1547{ 1551{
1548 int i; 1552 int i;
1549 1553
1554 mutex_lock(&aac->ioctl_mutex);
1550 aac->adapter_shutdown = 1; 1555 aac->adapter_shutdown = 1;
1551 aac_send_shutdown(aac); 1556 mutex_unlock(&aac->ioctl_mutex);
1552 1557
1553 if (aac->aif_thread) { 1558 if (aac->aif_thread) {
1554 int i; 1559 int i;
@@ -1561,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
1561 } 1566 }
1562 kthread_stop(aac->thread); 1567 kthread_stop(aac->thread);
1563 } 1568 }
1569
1570 aac_send_shutdown(aac);
1571
1564 aac_adapter_disable_int(aac); 1572 aac_adapter_disable_int(aac);
1573
1565 if (aac_is_src(aac)) { 1574 if (aac_is_src(aac)) {
1566 if (aac->max_msix > 1) { 1575 if (aac->max_msix > 1) {
1567 for (i = 0; i < aac->max_msix; i++) { 1576 for (i = 0; i < aac->max_msix; i++) {
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 48c2b2b34b72..0c9361c87ec8 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
740 aac_set_intx_mode(dev); 740 aac_set_intx_mode(dev);
741 741
742 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); 742 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
743
744 msleep(5000);
743} 745}
744 746
745static void aac_send_hardware_soft_reset(struct aac_dev *dev) 747static void aac_send_hardware_soft_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9abe81021484..4ed3d26ffdde 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
4091 memset(id_ctlr, 0, sizeof(*id_ctlr)); 4091 memset(id_ctlr, 0, sizeof(*id_ctlr));
4092 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); 4092 rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
4093 if (!rc) 4093 if (!rc)
4094 if (id_ctlr->configured_logical_drive_count < 256) 4094 if (id_ctlr->configured_logical_drive_count < 255)
4095 *nlocals = id_ctlr->configured_logical_drive_count; 4095 *nlocals = id_ctlr->configured_logical_drive_count;
4096 else 4096 else
4097 *nlocals = le16_to_cpu( 4097 *nlocals = le16_to_cpu(
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 785fb42f6650..2799a6b08f73 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3767 */ 3767 */
3768 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { 3768 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3769 pr_err("write_pending failed since: %d\n", vscsi->flags); 3769 pr_err("write_pending failed since: %d\n", vscsi->flags);
3770 return 0; 3770 return -EIO;
3771 } 3771 }
3772 3772
3773 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3773 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 520325867e2b..31d31aad3de1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
383 fc_rport_enter_flogi(rdata); 383 fc_rport_enter_flogi(rdata);
384 mutex_unlock(&rdata->rp_mutex); 384 mutex_unlock(&rdata->rp_mutex);
385 } else { 385 } else {
386 mutex_unlock(&rdata->rp_mutex);
386 FC_RPORT_DBG(rdata, "work delete\n"); 387 FC_RPORT_DBG(rdata, "work delete\n");
387 mutex_lock(&lport->disc.disc_mutex); 388 mutex_lock(&lport->disc.disc_mutex);
388 list_del_rcu(&rdata->peers); 389 list_del_rcu(&rdata->peers);
389 mutex_unlock(&lport->disc.disc_mutex); 390 mutex_unlock(&lport->disc.disc_mutex);
390 mutex_unlock(&rdata->rp_mutex);
391 kref_put(&rdata->kref, fc_rport_destroy); 391 kref_put(&rdata->kref, fc_rport_destroy);
392 } 392 }
393 } else { 393 } else {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bd4605a34f54..f8dc1601efd5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1728 1728
1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { 1729 if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
1730 reason = FAILURE_SESSION_IN_RECOVERY; 1730 reason = FAILURE_SESSION_IN_RECOVERY;
1731 sc->result = DID_REQUEUE; 1731 sc->result = DID_REQUEUE << 16;
1732 goto fault; 1732 goto fault;
1733 } 1733 }
1734 1734
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
2851/** 2851/**
2852 * iscsi_session_teardown - destroy session, host, and cls_session 2852 * iscsi_session_teardown - destroy session, host, and cls_session
2853 * @cls_session: iscsi session 2853 * @cls_session: iscsi session
2854 *
2855 * The driver must have called iscsi_remove_session before
2856 * calling this.
2857 */ 2854 */
2858void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2855void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2859{ 2856{
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2863 2860
2864 iscsi_pool_free(&session->cmdpool); 2861 iscsi_pool_free(&session->cmdpool);
2865 2862
2863 iscsi_remove_session(cls_session);
2864
2866 kfree(session->password); 2865 kfree(session->password);
2867 kfree(session->password_in); 2866 kfree(session->password_in);
2868 kfree(session->username); 2867 kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2877 kfree(session->portal_type); 2876 kfree(session->portal_type);
2878 kfree(session->discovery_parent_type); 2877 kfree(session->discovery_parent_type);
2879 2878
2880 iscsi_destroy_session(cls_session); 2879 iscsi_free_session(cls_session);
2880
2881 iscsi_host_dec_session_cnt(shost); 2881 iscsi_host_dec_session_cnt(shost);
2882 module_put(owner); 2882 module_put(owner);
2883} 2883}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7e7ae786121b..100bc4c8798d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6131 "Extents and RPI headers enabled.\n"); 6131 "Extents and RPI headers enabled.\n");
6132 } 6132 }
6133 mempool_free(mboxq, phba->mbox_mem_pool); 6133 mempool_free(mboxq, phba->mbox_mem_pool);
6134 rc = -EIO;
6134 goto out_free_bsmbx; 6135 goto out_free_bsmbx;
6135 } 6136 }
6136 6137
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 79ba3ce063a4..23bdb1ca106e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -884,7 +884,7 @@ out_err:
884 wcqe->total_data_placed); 884 wcqe->total_data_placed);
885 nCmd->transferred_length = 0; 885 nCmd->transferred_length = 0;
886 nCmd->rcv_rsplen = 0; 886 nCmd->rcv_rsplen = 0;
887 nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; 887 nCmd->status = NVME_SC_INTERNAL;
888 } 888 }
889 } 889 }
890 890
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1f59e7a74c7b..6b33a1f24f56 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res)
180 goto rel; 180 goto rel;
181 181
182 if (unlikely(res == QLA_FUNCTION_FAILED)) 182 if (unlikely(res == QLA_FUNCTION_FAILED))
183 fd->status = NVME_SC_FC_TRANSPORT_ERROR; 183 fd->status = NVME_SC_INTERNAL;
184 else 184 else
185 fd->status = 0; 185 fd->status = 0;
186 186
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5b2437a5ea44..3bd956d3bc5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3061 host->max_cmd_len, host->max_channel, host->max_lun, 3061 host->max_cmd_len, host->max_channel, host->max_lun,
3062 host->transportt, sht->vendor_id); 3062 host->transportt, sht->vendor_id);
3063 3063
3064 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3065
3064 /* Set up the irqs */ 3066 /* Set up the irqs */
3065 ret = qla2x00_request_irqs(ha, rsp); 3067 ret = qla2x00_request_irqs(ha, rsp);
3066 if (ret) 3068 if (ret)
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3223 */ 3225 */
3224 qla2xxx_wake_dpc(base_vha); 3226 qla2xxx_wake_dpc(base_vha);
3225 3227
3226 INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3227 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); 3228 INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3228 3229
3229 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { 3230 if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 38942050b265..dab876c65473 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
580 if (sshdr.asc == 0x20 || /* Invalid command operation code */ 580 if (sshdr.asc == 0x20 || /* Invalid command operation code */
581 sshdr.asc == 0x21 || /* Logical block address out of range */ 581 sshdr.asc == 0x21 || /* Logical block address out of range */
582 sshdr.asc == 0x24 || /* Invalid field in cdb */ 582 sshdr.asc == 0x24 || /* Invalid field in cdb */
583 sshdr.asc == 0x26) { /* Parameter value invalid */ 583 sshdr.asc == 0x26 || /* Parameter value invalid */
584 sshdr.asc == 0x27) { /* Write protected */
584 set_host_byte(scmd, DID_TARGET_FAILURE); 585 set_host_byte(scmd, DID_TARGET_FAILURE);
585 } 586 }
586 return SUCCESS; 587 return SUCCESS;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9cf6a80fe297..ad3ea24f0885 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1379 1379
1380 ret = scsi_setup_cmnd(sdev, req); 1380 ret = scsi_setup_cmnd(sdev, req);
1381out: 1381out:
1382 if (ret != BLKPREP_OK)
1383 cmd->flags &= ~SCMD_INITIALIZED;
1384 return scsi_prep_return(q, req, ret); 1382 return scsi_prep_return(q, req, ret);
1385} 1383}
1386 1384
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
1900 struct scsi_device *sdev = req->q->queuedata; 1898 struct scsi_device *sdev = req->q->queuedata;
1901 struct Scsi_Host *shost = sdev->host; 1899 struct Scsi_Host *shost = sdev->host;
1902 struct scatterlist *sg; 1900 struct scatterlist *sg;
1903 int ret;
1904 1901
1905 scsi_init_command(sdev, cmd); 1902 scsi_init_command(sdev, cmd);
1906 1903
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
1934 1931
1935 blk_mq_start_request(req); 1932 blk_mq_start_request(req);
1936 1933
1937 ret = scsi_setup_cmnd(sdev, req); 1934 return scsi_setup_cmnd(sdev, req);
1938 if (ret != BLK_STS_OK)
1939 cmd->flags &= ~SCMD_INITIALIZED;
1940 return ret;
1941} 1935}
1942 1936
1943static void scsi_mq_done(struct scsi_cmnd *cmd) 1937static void scsi_mq_done(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e7818afeda2b..15590a063ad9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
956 if (*bflags & BLIST_NO_DIF) 956 if (*bflags & BLIST_NO_DIF)
957 sdev->no_dif = 1; 957 sdev->no_dif = 1;
958 958
959 if (*bflags & BLIST_UNMAP_LIMIT_WS)
960 sdev->unmap_limit_for_ws = 1;
961
959 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; 962 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
960 963
961 if (*bflags & BLIST_TRY_VPD_PAGES) 964 if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index bf53356f41f0..f796bd61f3f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
1376 spin_lock_irqsave(shost->host_lock, flags); 1376 spin_lock_irqsave(shost->host_lock, flags);
1377 restart: 1377 restart:
1378 list_for_each_entry(sdev, &shost->__devices, siblings) { 1378 list_for_each_entry(sdev, &shost->__devices, siblings) {
1379 /*
1380 * We cannot call scsi_device_get() here, as
1381 * we might've been called from rmmod() causing
1382 * scsi_device_get() to fail the module_is_live()
1383 * check.
1384 */
1379 if (sdev->channel != starget->channel || 1385 if (sdev->channel != starget->channel ||
1380 sdev->id != starget->id || 1386 sdev->id != starget->id ||
1381 scsi_device_get(sdev)) 1387 !get_device(&sdev->sdev_gendev))
1382 continue; 1388 continue;
1383 spin_unlock_irqrestore(shost->host_lock, flags); 1389 spin_unlock_irqrestore(shost->host_lock, flags);
1384 scsi_remove_device(sdev); 1390 scsi_remove_device(sdev);
1385 scsi_device_put(sdev); 1391 put_device(&sdev->sdev_gendev);
1386 spin_lock_irqsave(shost->host_lock, flags); 1392 spin_lock_irqsave(shost->host_lock, flags);
1387 goto restart; 1393 goto restart;
1388 } 1394 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index ba9d70f8a6a1..8c46a6d536af 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2739 2739
2740 list_for_each_entry(rport, &fc_host->rports, peers) { 2740 list_for_each_entry(rport, &fc_host->rports, peers) {
2741 2741
2742 if ((rport->port_state == FC_PORTSTATE_BLOCKED) && 2742 if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
2743 rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
2743 (rport->channel == channel)) { 2744 (rport->channel == channel)) {
2744 2745
2745 switch (fc_host->tgtid_bind_type) { 2746 switch (fc_host->tgtid_bind_type) {
@@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2876 memcpy(&rport->port_name, &ids->port_name, 2877 memcpy(&rport->port_name, &ids->port_name,
2877 sizeof(rport->port_name)); 2878 sizeof(rport->port_name));
2878 rport->port_id = ids->port_id; 2879 rport->port_id = ids->port_id;
2879 rport->roles = ids->roles;
2880 rport->port_state = FC_PORTSTATE_ONLINE; 2880 rport->port_state = FC_PORTSTATE_ONLINE;
2881 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 2881 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2882 2882
@@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2885 fci->f->dd_fcrport_size); 2885 fci->f->dd_fcrport_size);
2886 spin_unlock_irqrestore(shost->host_lock, flags); 2886 spin_unlock_irqrestore(shost->host_lock, flags);
2887 2887
2888 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { 2888 fc_remote_port_rolechg(rport, ids->roles);
2889 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
2890
2891 /* initiate a scan of the target */
2892 spin_lock_irqsave(shost->host_lock, flags);
2893 rport->flags |= FC_RPORT_SCAN_PENDING;
2894 scsi_queue_work(shost, &rport->scan_work);
2895 spin_unlock_irqrestore(shost->host_lock, flags);
2896 }
2897 return rport; 2889 return rport;
2898 } 2890 }
2899 } 2891 }
@@ -3328,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3328{ 3320{
3329 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 3321 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3330 3322
3323 if (WARN_ON_ONCE(!rport))
3324 return FAST_IO_FAIL;
3325
3331 return fc_block_rport(rport); 3326 return fc_block_rport(rport);
3332} 3327}
3333EXPORT_SYMBOL(fc_block_scsi_eh); 3328EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8934f19bce8e..7404d26895f5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
2211EXPORT_SYMBOL_GPL(iscsi_free_session); 2211EXPORT_SYMBOL_GPL(iscsi_free_session);
2212 2212
2213/** 2213/**
2214 * iscsi_destroy_session - destroy iscsi session
2215 * @session: iscsi_session
2216 *
2217 * Can be called by a LLD or iscsi_transport. There must not be
2218 * any running connections.
2219 */
2220int iscsi_destroy_session(struct iscsi_cls_session *session)
2221{
2222 iscsi_remove_session(session);
2223 ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
2224 iscsi_free_session(session);
2225 return 0;
2226}
2227EXPORT_SYMBOL_GPL(iscsi_destroy_session);
2228
2229/**
2230 * iscsi_create_conn - create iscsi class connection 2214 * iscsi_create_conn - create iscsi class connection
2231 * @session: iscsi cls session 2215 * @session: iscsi cls session
2232 * @dd_size: private driver data size 2216 * @dd_size: private driver data size
@@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb)
3689 uint32_t group; 3673 uint32_t group;
3690 3674
3691 nlh = nlmsg_hdr(skb); 3675 nlh = nlmsg_hdr(skb);
3692 if (nlh->nlmsg_len < sizeof(*nlh) || 3676 if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
3693 skb->len < nlh->nlmsg_len) { 3677 skb->len < nlh->nlmsg_len) {
3694 break; 3678 break;
3695 } 3679 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fb9f8b5f4673..d175c5c5ccf8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
715 break; 715 break;
716 716
717 case SD_LBP_WS16: 717 case SD_LBP_WS16:
718 max_blocks = min_not_zero(sdkp->max_ws_blocks, 718 if (sdkp->device->unmap_limit_for_ws)
719 (u32)SD_MAX_WS16_BLOCKS); 719 max_blocks = sdkp->max_unmap_blocks;
720 else
721 max_blocks = sdkp->max_ws_blocks;
722
723 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
720 break; 724 break;
721 725
722 case SD_LBP_WS10: 726 case SD_LBP_WS10:
723 max_blocks = min_not_zero(sdkp->max_ws_blocks, 727 if (sdkp->device->unmap_limit_for_ws)
724 (u32)SD_MAX_WS10_BLOCKS); 728 max_blocks = sdkp->max_unmap_blocks;
729 else
730 max_blocks = sdkp->max_ws_blocks;
731
732 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
725 break; 733 break;
726 734
727 case SD_LBP_ZERO: 735 case SD_LBP_ZERO:
@@ -3099,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
3099 sd_read_security(sdkp, buffer); 3107 sd_read_security(sdkp, buffer);
3100 } 3108 }
3101 3109
3102 sdkp->first_scan = 0;
3103
3104 /* 3110 /*
3105 * We now have all cache related info, determine how we deal 3111 * We now have all cache related info, determine how we deal
3106 * with flush requests. 3112 * with flush requests.
@@ -3115,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
3115 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3121 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3116 3122
3117 /* 3123 /*
3118 * Use the device's preferred I/O size for reads and writes 3124 * Determine the device's preferred I/O size for reads and writes
3119 * unless the reported value is unreasonably small, large, or 3125 * unless the reported value is unreasonably small, large, or
3120 * garbage. 3126 * garbage.
3121 */ 3127 */
@@ -3129,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
3129 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3135 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3130 (sector_t)BLK_DEF_MAX_SECTORS); 3136 (sector_t)BLK_DEF_MAX_SECTORS);
3131 3137
3132 /* Combine with controller limits */ 3138 /* Do not exceed controller limit */
3133 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 3139 rw_max = min(rw_max, queue_max_hw_sectors(q));
3140
3141 /*
3142 * Only update max_sectors if previously unset or if the current value
3143 * exceeds the capabilities of the hardware.
3144 */
3145 if (sdkp->first_scan ||
3146 q->limits.max_sectors > q->limits.max_dev_sectors ||
3147 q->limits.max_sectors > q->limits.max_hw_sectors)
3148 q->limits.max_sectors = rw_max;
3149
3150 sdkp->first_scan = 0;
3134 3151
3135 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); 3152 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
3136 sd_config_write_same(sdkp); 3153 sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0419c2298eab..aa28874e8fb9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
837 837
838 val = 0; 838 val = 0;
839 list_for_each_entry(srp, &sfp->rq_list, entry) { 839 list_for_each_entry(srp, &sfp->rq_list, entry) {
840 if (val > SG_MAX_QUEUE) 840 if (val >= SG_MAX_QUEUE)
841 break; 841 break;
842 rinfo[val].req_state = srp->done + 1; 842 rinfo[val].req_state = srp->done + 1;
843 rinfo[val].problem = 843 rinfo[val].problem =
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 6c7d7a460689..568e1c65aa82 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -99,11 +99,6 @@
99/* A3700_SPI_IF_TIME_REG */ 99/* A3700_SPI_IF_TIME_REG */
100#define A3700_SPI_CLK_CAPT_EDGE BIT(7) 100#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
101 101
102/* Flags and macros for struct a3700_spi */
103#define A3700_INSTR_CNT 1
104#define A3700_ADDR_CNT 3
105#define A3700_DUMMY_CNT 1
106
107struct a3700_spi { 102struct a3700_spi {
108 struct spi_master *master; 103 struct spi_master *master;
109 void __iomem *base; 104 void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
117 u8 byte_len; 112 u8 byte_len;
118 u32 wait_mask; 113 u32 wait_mask;
119 struct completion done; 114 struct completion done;
120 u32 addr_cnt;
121 u32 instr_cnt;
122 size_t hdr_cnt;
123}; 115};
124 116
125static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) 117static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
161} 153}
162 154
163static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, 155static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
164 unsigned int pin_mode) 156 unsigned int pin_mode, bool receiving)
165{ 157{
166 u32 val; 158 u32 val;
167 159
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
177 break; 169 break;
178 case SPI_NBITS_QUAD: 170 case SPI_NBITS_QUAD:
179 val |= A3700_SPI_DATA_PIN1; 171 val |= A3700_SPI_DATA_PIN1;
172 /* RX during address reception uses 4-pin */
173 if (receiving)
174 val |= A3700_SPI_ADDR_PIN;
180 break; 175 break;
181 default: 176 default:
182 dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); 177 dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
392 387
393 spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); 388 spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
394 389
395 return true; 390 /* Timeout was reached */
391 return false;
396} 392}
397 393
398static bool a3700_spi_transfer_wait(struct spi_device *spi, 394static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
446 442
447static void a3700_spi_header_set(struct a3700_spi *a3700_spi) 443static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
448{ 444{
449 u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; 445 unsigned int addr_cnt;
450 u32 val = 0; 446 u32 val = 0;
451 447
452 /* Clear the header registers */ 448 /* Clear the header registers */
453 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); 449 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
454 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); 450 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
455 spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); 451 spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
452 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
456 453
457 /* Set header counters */ 454 /* Set header counters */
458 if (a3700_spi->tx_buf) { 455 if (a3700_spi->tx_buf) {
459 if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { 456 /*
460 instr_cnt = a3700_spi->buf_len; 457 * when tx data is not 4 bytes aligned, there will be unexpected
461 } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + 458 * bytes out of SPI output register, since it always shifts out
462 a3700_spi->addr_cnt)) { 459 * as whole 4 bytes. This might cause incorrect transaction with
463 instr_cnt = a3700_spi->instr_cnt; 460 * some devices. To avoid that, use SPI header count feature to
464 addr_cnt = a3700_spi->buf_len - instr_cnt; 461 * transfer up to 3 bytes of data first, and then make the rest
465 } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { 462 * of data 4-byte aligned.
466 instr_cnt = a3700_spi->instr_cnt; 463 */
467 addr_cnt = a3700_spi->addr_cnt; 464 addr_cnt = a3700_spi->buf_len % 4;
468 /* Need to handle the normal write case with 1 byte 465 if (addr_cnt) {
469 * data 466 val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
470 */ 467 << A3700_SPI_ADDR_CNT_BIT;
471 if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) 468 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
472 dummy_cnt = a3700_spi->buf_len - instr_cnt - 469
473 addr_cnt; 470 /* Update the buffer length to be transferred */
471 a3700_spi->buf_len -= addr_cnt;
472
473 /* transfer 1~3 bytes through address count */
474 val = 0;
475 while (addr_cnt--) {
476 val = (val << 8) | a3700_spi->tx_buf[0];
477 a3700_spi->tx_buf++;
478 }
479 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
474 } 480 }
475 val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
476 << A3700_SPI_INSTR_CNT_BIT);
477 val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
478 << A3700_SPI_ADDR_CNT_BIT);
479 val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
480 << A3700_SPI_DUMMY_CNT_BIT);
481 } 481 }
482 spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
483
484 /* Update the buffer length to be transferred */
485 a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
486
487 /* Set Instruction */
488 val = 0;
489 while (instr_cnt--) {
490 val = (val << 8) | a3700_spi->tx_buf[0];
491 a3700_spi->tx_buf++;
492 }
493 spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
494
495 /* Set Address */
496 val = 0;
497 while (addr_cnt--) {
498 val = (val << 8) | a3700_spi->tx_buf[0];
499 a3700_spi->tx_buf++;
500 }
501 spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
502} 482}
503 483
504static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) 484static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
512static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) 492static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
513{ 493{
514 u32 val; 494 u32 val;
515 int i = 0;
516 495
517 while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { 496 while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
518 val = 0; 497 val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
519 if (a3700_spi->buf_len >= 4) { 498 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
520 val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); 499 a3700_spi->buf_len -= 4;
521 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); 500 a3700_spi->tx_buf += 4;
522
523 a3700_spi->buf_len -= 4;
524 a3700_spi->tx_buf += 4;
525 } else {
526 /*
527 * If the remained buffer length is less than 4-bytes,
528 * we should pad the write buffer with all ones. So that
529 * it avoids overwrite the unexpected bytes following
530 * the last one.
531 */
532 val = GENMASK(31, 0);
533 while (a3700_spi->buf_len) {
534 val &= ~(0xff << (8 * i));
535 val |= *a3700_spi->tx_buf++ << (8 * i);
536 i++;
537 a3700_spi->buf_len--;
538
539 spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
540 val);
541 }
542 break;
543 }
544 } 501 }
545 502
546 return 0; 503 return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
645 a3700_spi->rx_buf = xfer->rx_buf; 602 a3700_spi->rx_buf = xfer->rx_buf;
646 a3700_spi->buf_len = xfer->len; 603 a3700_spi->buf_len = xfer->len;
647 604
648 /* SPI transfer headers */
649 a3700_spi_header_set(a3700_spi);
650
651 if (xfer->tx_buf) 605 if (xfer->tx_buf)
652 nbits = xfer->tx_nbits; 606 nbits = xfer->tx_nbits;
653 else if (xfer->rx_buf) 607 else if (xfer->rx_buf)
654 nbits = xfer->rx_nbits; 608 nbits = xfer->rx_nbits;
655 609
656 a3700_spi_pin_mode_set(a3700_spi, nbits); 610 a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
611
612 /* Flush the FIFOs */
613 a3700_spi_fifo_flush(a3700_spi);
614
615 /* Transfer first bytes of data when buffer is not 4-byte aligned */
616 a3700_spi_header_set(a3700_spi);
657 617
658 if (xfer->rx_buf) { 618 if (xfer->rx_buf) {
659 /* Set read data length */ 619 /* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
733 dev_err(&spi->dev, "wait wfifo empty timed out\n"); 693 dev_err(&spi->dev, "wait wfifo empty timed out\n");
734 return -ETIMEDOUT; 694 return -ETIMEDOUT;
735 } 695 }
736 } else { 696 }
737 /* 697
738 * If the instruction in SPI_INSTR does not require data 698 if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
739 * to be written to the SPI device, wait until SPI_RDY 699 dev_err(&spi->dev, "wait xfer ready timed out\n");
740 * is 1 for the SPI interface to be in idle. 700 return -ETIMEDOUT;
741 */
742 if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
743 dev_err(&spi->dev, "wait xfer ready timed out\n");
744 return -ETIMEDOUT;
745 }
746 } 701 }
747 702
748 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); 703 val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
834 memset(spi, 0, sizeof(struct a3700_spi)); 789 memset(spi, 0, sizeof(struct a3700_spi));
835 790
836 spi->master = master; 791 spi->master = master;
837 spi->instr_cnt = A3700_INSTR_CNT;
838 spi->addr_cnt = A3700_ADDR_CNT;
839 spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
840 A3700_DUMMY_CNT;
841 792
842 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 793 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
843 spi->base = devm_ioremap_resource(dev, res); 794 spi->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6ef6c44f39f5..a172ab299e80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1250 goto qspi_probe_err; 1250 goto qspi_probe_err;
1251 } 1251 }
1252 } else { 1252 } else {
1253 goto qspi_probe_err; 1253 goto qspi_resource_err;
1254 } 1254 }
1255 1255
1256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); 1256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1272 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); 1272 qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
1273 if (IS_ERR(qspi->base[CHIP_SELECT])) { 1273 if (IS_ERR(qspi->base[CHIP_SELECT])) {
1274 ret = PTR_ERR(qspi->base[CHIP_SELECT]); 1274 ret = PTR_ERR(qspi->base[CHIP_SELECT]);
1275 goto qspi_probe_err; 1275 goto qspi_resource_err;
1276 } 1276 }
1277 } 1277 }
1278 1278
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
1280 GFP_KERNEL); 1280 GFP_KERNEL);
1281 if (!qspi->dev_ids) { 1281 if (!qspi->dev_ids) {
1282 ret = -ENOMEM; 1282 ret = -ENOMEM;
1283 goto qspi_probe_err; 1283 goto qspi_resource_err;
1284 } 1284 }
1285 1285
1286 for (val = 0; val < num_irqs; val++) { 1286 for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
1369 bcm_qspi_hw_uninit(qspi); 1369 bcm_qspi_hw_uninit(qspi);
1370 clk_disable_unprepare(qspi->clk); 1370 clk_disable_unprepare(qspi->clk);
1371qspi_probe_err: 1371qspi_probe_err:
1372 spi_master_put(master);
1373 kfree(qspi->dev_ids); 1372 kfree(qspi->dev_ids);
1373qspi_resource_err:
1374 spi_master_put(master);
1374 return ret; 1375 return ret;
1375} 1376}
1376/* probe function to be called by SoC specific platform driver probe */ 1377/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 680cdf549506..ba9743fa2326 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
263 * no need to check it there. 263 * no need to check it there.
264 * However, we need to ensure the following calculations. 264 * However, we need to ensure the following calculations.
265 */ 265 */
266 if ((div < SPI_MBR_DIV_MIN) && 266 if (div < SPI_MBR_DIV_MIN ||
267 (div > SPI_MBR_DIV_MAX)) 267 div > SPI_MBR_DIV_MAX)
268 return -EINVAL; 268 return -EINVAL;
269 269
270 /* Determine the first power of 2 greater than or equal to div */ 270 /* Determine the first power of 2 greater than or equal to div */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6e65524cbfd9..e8b5a5e21b2e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -45,7 +45,6 @@
45 45
46#define CREATE_TRACE_POINTS 46#define CREATE_TRACE_POINTS
47#include <trace/events/spi.h> 47#include <trace/events/spi.h>
48#define SPI_DYN_FIRST_BUS_NUM 0
49 48
50static DEFINE_IDR(spi_master_idr); 49static DEFINE_IDR(spi_master_idr);
51 50
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr)
2086 struct device *dev = ctlr->dev.parent; 2085 struct device *dev = ctlr->dev.parent;
2087 struct boardinfo *bi; 2086 struct boardinfo *bi;
2088 int status = -ENODEV; 2087 int status = -ENODEV;
2089 int id; 2088 int id, first_dynamic;
2090 2089
2091 if (!dev) 2090 if (!dev)
2092 return -ENODEV; 2091 return -ENODEV;
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr)
2116 } 2115 }
2117 } 2116 }
2118 if (ctlr->bus_num < 0) { 2117 if (ctlr->bus_num < 0) {
2118 first_dynamic = of_alias_get_highest_id("spi");
2119 if (first_dynamic < 0)
2120 first_dynamic = 0;
2121 else
2122 first_dynamic++;
2123
2119 mutex_lock(&board_lock); 2124 mutex_lock(&board_lock);
2120 id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0, 2125 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2121 GFP_KERNEL); 2126 0, GFP_KERNEL);
2122 mutex_unlock(&board_lock); 2127 mutex_unlock(&board_lock);
2123 if (WARN(id < 0, "couldn't get idr")) 2128 if (WARN(id < 0, "couldn't get idr"))
2124 return id; 2129 return id;
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index d11c6de9c777..6150d2780e22 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -223,11 +223,9 @@ static int ad7192_setup(struct ad7192_state *st,
223 struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi); 223 struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
224 unsigned long long scale_uv; 224 unsigned long long scale_uv;
225 int i, ret, id; 225 int i, ret, id;
226 u8 ones[6];
227 226
228 /* reset the serial interface */ 227 /* reset the serial interface */
229 memset(&ones, 0xFF, 6); 228 ret = ad_sd_reset(&st->sd, 48);
230 ret = spi_write(st->sd.spi, &ones, 6);
231 if (ret < 0) 229 if (ret < 0)
232 goto out; 230 goto out;
233 usleep_range(500, 1000); /* Wait for at least 500us */ 231 usleep_range(500, 1000); /* Wait for at least 500us */
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 1691760339da..02573c517d9d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
172 reg_address); 172 reg_address);
173 goto error_ret; 173 goto error_ret;
174 } 174 }
175 *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) | 175 *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
176 (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5]; 176 (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
177 177
178error_ret: 178error_ret:
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index d96f4512224f..b55e5ebba8b4 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
400 struct media_link, list); 400 struct media_link, list);
401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); 401 ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
402 if (ret) 402 if (ret)
403 break; 403 return ret;
404 } 404 }
405 405
406 return ret; 406 return 0;
407} 407}
408 408
409/* async subdev complete notifier */ 409/* async subdev complete notifier */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 13eaf16ecd16..87595c594b12 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -496,8 +496,12 @@ static int spinand_program_page(struct spi_device *spi_nand,
496 if (!wbuf) 496 if (!wbuf)
497 return -ENOMEM; 497 return -ENOMEM;
498 498
499 enable_read_hw_ecc = 0; 499 enable_read_hw_ecc = 1;
500 spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf); 500 retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
501 if (retval < 0) {
502 dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
503 return retval;
504 }
501 505
502 for (i = offset, j = 0; i < len; i++, j++) 506 for (i = offset, j = 0; i < len; i++, j++)
503 wbuf[i] &= buf[j]; 507 wbuf[i] &= buf[j];
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index c4b1b218ea38..290b419aa9dd 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -570,12 +570,6 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
570 dev_dbg(&spi->dev, "set: DIO mapping"); 570 dev_dbg(&spi->dev, "set: DIO mapping");
571 #endif 571 #endif
572 572
573 // check DIO number
574 if (DIONumber > 5) {
575 dev_dbg(&spi->dev, "set: illegal input param");
576 return -EINVAL;
577 }
578
579 switch (DIONumber) { 573 switch (DIONumber) {
580 case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break; 574 case 0: mask=MASK_DIO0; shift=SHIFT_DIO0; regaddr=REG_DIOMAPPING1; break;
581 case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break; 575 case 1: mask=MASK_DIO1; shift=SHIFT_DIO1; regaddr=REG_DIOMAPPING1; break;
@@ -583,6 +577,9 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 DIONumber, u8 value)
583 case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break; 577 case 3: mask=MASK_DIO3; shift=SHIFT_DIO3; regaddr=REG_DIOMAPPING1; break;
584 case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break; 578 case 4: mask=MASK_DIO4; shift=SHIFT_DIO4; regaddr=REG_DIOMAPPING2; break;
585 case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break; 579 case 5: mask=MASK_DIO5; shift=SHIFT_DIO5; regaddr=REG_DIOMAPPING2; break;
580 default:
581 dev_dbg(&spi->dev, "set: illegal input param");
582 return -EINVAL;
586 } 583 }
587 584
588 // read reg 585 // read reg
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 6b778206a1a3..cb8a95aabd6c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -119,9 +119,8 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
119 119
120void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) 120void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
121{ 121{
122 rtw_free_mlme_priv_ie_data(pmlmepriv);
123
124 if (pmlmepriv) { 122 if (pmlmepriv) {
123 rtw_free_mlme_priv_ie_data(pmlmepriv);
125 if (pmlmepriv->free_bss_buf) { 124 if (pmlmepriv->free_bss_buf) {
126 vfree(pmlmepriv->free_bss_buf); 125 vfree(pmlmepriv->free_bss_buf);
127 } 126 }
diff --git a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
index 92277457aba4..ce1dd6f9036f 100644
--- a/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
+++ b/drivers/staging/rtl8723bs/os_dep/rtw_proc.c
@@ -311,6 +311,8 @@ static ssize_t proc_set_cam(struct file *file, const char __user *buffer, size_t
311 311
312 if (num < 2) 312 if (num < 2)
313 return count; 313 return count;
314 if (id >= TOTAL_CAM_ENTRY)
315 return -EINVAL;
314 316
315 if (strcmp("c", cmd) == 0) { 317 if (strcmp("c", cmd) == 0) {
316 _clear_cam_entry(adapter, id); 318 _clear_cam_entry(adapter, id);
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
index 5f84526cb5b5..edbf6af1c8b7 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c
@@ -2901,11 +2901,11 @@ halmac_update_datapack_88xx(struct halmac_adapter *halmac_adapter,
2901 if (halmac_adapter->fw_version.h2c_version < 4) 2901 if (halmac_adapter->fw_version.h2c_version < 4)
2902 return HALMAC_RET_FW_NO_SUPPORT; 2902 return HALMAC_RET_FW_NO_SUPPORT;
2903 2903
2904 driver_adapter = halmac_adapter->driver_adapter;
2905
2904 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 2906 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
2905 "[TRACE]%s ==========>\n", __func__); 2907 "[TRACE]%s ==========>\n", __func__);
2906 2908
2907 driver_adapter = halmac_adapter->driver_adapter;
2908
2909 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 2909 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
2910 "[TRACE]%s <==========\n", __func__); 2910 "[TRACE]%s <==========\n", __func__);
2911 2911
diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
index f33024e4d853..544f638ed3ef 100644
--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
@@ -1618,10 +1618,11 @@ halmac_send_h2c_set_pwr_mode_88xx(struct halmac_adapter *halmac_adapter,
1618 void *driver_adapter = NULL; 1618 void *driver_adapter = NULL;
1619 enum halmac_ret_status status = HALMAC_RET_SUCCESS; 1619 enum halmac_ret_status status = HALMAC_RET_SUCCESS;
1620 1620
1621 driver_adapter = halmac_adapter->driver_adapter;
1622
1621 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 1623 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
1622 "%s!!\n", __func__); 1624 "%s!!\n", __func__);
1623 1625
1624 driver_adapter = halmac_adapter->driver_adapter;
1625 h2c_header = h2c_buff; 1626 h2c_header = h2c_buff;
1626 h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; 1627 h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
1627 1628
@@ -1713,10 +1714,11 @@ halmac_media_status_rpt_88xx(struct halmac_adapter *halmac_adapter, u8 op_mode,
1713 void *driver_adapter = NULL; 1714 void *driver_adapter = NULL;
1714 enum halmac_ret_status status = HALMAC_RET_SUCCESS; 1715 enum halmac_ret_status status = HALMAC_RET_SUCCESS;
1715 1716
1717 driver_adapter = halmac_adapter->driver_adapter;
1718
1716 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 1719 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
1717 "halmac_send_h2c_set_pwr_mode_88xx!!\n"); 1720 "halmac_send_h2c_set_pwr_mode_88xx!!\n");
1718 1721
1719 driver_adapter = halmac_adapter->driver_adapter;
1720 h2c_header = H2c_buff; 1722 h2c_header = H2c_buff;
1721 h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX; 1723 h2c_cmd = h2c_header + HALMAC_H2C_CMD_HDR_SIZE_88XX;
1722 1724
@@ -2143,10 +2145,11 @@ halmac_func_ctrl_ch_switch_88xx(struct halmac_adapter *halmac_adapter,
2143 enum halmac_cmd_process_status *process_status = 2145 enum halmac_cmd_process_status *process_status =
2144 &halmac_adapter->halmac_state.scan_state_set.process_status; 2146 &halmac_adapter->halmac_state.scan_state_set.process_status;
2145 2147
2148 driver_adapter = halmac_adapter->driver_adapter;
2149
2146 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 2150 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
2147 "halmac_ctrl_ch_switch!!\n"); 2151 "halmac_ctrl_ch_switch!!\n");
2148 2152
2149 driver_adapter = halmac_adapter->driver_adapter;
2150 halmac_api = (struct halmac_api *)halmac_adapter->halmac_api; 2153 halmac_api = (struct halmac_api *)halmac_adapter->halmac_api;
2151 2154
2152 if (halmac_transition_scan_state_88xx( 2155 if (halmac_transition_scan_state_88xx(
@@ -2276,15 +2279,13 @@ enum halmac_ret_status halmac_send_h2c_update_bcn_parse_info_88xx(
2276{ 2279{
2277 u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0}; 2280 u8 h2c_buff[HALMAC_H2C_CMD_SIZE_88XX] = {0};
2278 u16 h2c_seq_mum = 0; 2281 u16 h2c_seq_mum = 0;
2279 void *driver_adapter = NULL; 2282 void *driver_adapter = halmac_adapter->driver_adapter;
2280 struct halmac_h2c_header_info h2c_header_info; 2283 struct halmac_h2c_header_info h2c_header_info;
2281 enum halmac_ret_status status = HALMAC_RET_SUCCESS; 2284 enum halmac_ret_status status = HALMAC_RET_SUCCESS;
2282 2285
2283 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG, 2286 HALMAC_RT_TRACE(driver_adapter, HALMAC_MSG_H2C, DBG_DMESG,
2284 "%s!!\n", __func__); 2287 "%s!!\n", __func__);
2285 2288
2286 driver_adapter = halmac_adapter->driver_adapter;
2287
2288 UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en); 2289 UPDATE_BEACON_PARSING_INFO_SET_FUNC_EN(h2c_buff, bcn_ie_info->func_en);
2289 UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th); 2290 UPDATE_BEACON_PARSING_INFO_SET_SIZE_TH(h2c_buff, bcn_ie_info->size_th);
2290 UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout); 2291 UPDATE_BEACON_PARSING_INFO_SET_TIMEOUT(h2c_buff, bcn_ie_info->timeout);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 67956e24779c..56f7be6af1f6 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1376,6 +1376,8 @@ static void reset_highlight_buffers(struct vc_data *);
1376 1376
1377static int read_all_key; 1377static int read_all_key;
1378 1378
1379static int in_keyboard_notifier;
1380
1379static void start_read_all_timer(struct vc_data *vc, int command); 1381static void start_read_all_timer(struct vc_data *vc, int command);
1380 1382
1381enum { 1383enum {
@@ -1408,7 +1410,10 @@ static void read_all_doc(struct vc_data *vc)
1408 cursor_track = read_all_mode; 1410 cursor_track = read_all_mode;
1409 spk_reset_index_count(0); 1411 spk_reset_index_count(0);
1410 if (get_sentence_buf(vc, 0) == -1) { 1412 if (get_sentence_buf(vc, 0) == -1) {
1411 kbd_fakekey2(vc, RA_DOWN_ARROW); 1413 del_timer(&cursor_timer);
1414 if (!in_keyboard_notifier)
1415 speakup_fake_down_arrow();
1416 start_read_all_timer(vc, RA_DOWN_ARROW);
1412 } else { 1417 } else {
1413 say_sentence_num(0, 0); 1418 say_sentence_num(0, 0);
1414 synth_insert_next_index(0); 1419 synth_insert_next_index(0);
@@ -2212,8 +2217,10 @@ static int keyboard_notifier_call(struct notifier_block *nb,
2212 int ret = NOTIFY_OK; 2217 int ret = NOTIFY_OK;
2213 static int keycode; /* to hold the current keycode */ 2218 static int keycode; /* to hold the current keycode */
2214 2219
2220 in_keyboard_notifier = 1;
2221
2215 if (vc->vc_mode == KD_GRAPHICS) 2222 if (vc->vc_mode == KD_GRAPHICS)
2216 return ret; 2223 goto out;
2217 2224
2218 /* 2225 /*
2219 * First, determine whether we are handling a fake keypress on 2226 * First, determine whether we are handling a fake keypress on
@@ -2225,7 +2232,7 @@ static int keyboard_notifier_call(struct notifier_block *nb,
2225 */ 2232 */
2226 2233
2227 if (speakup_fake_key_pressed()) 2234 if (speakup_fake_key_pressed())
2228 return ret; 2235 goto out;
2229 2236
2230 switch (code) { 2237 switch (code) {
2231 case KBD_KEYCODE: 2238 case KBD_KEYCODE:
@@ -2266,6 +2273,8 @@ static int keyboard_notifier_call(struct notifier_block *nb,
2266 break; 2273 break;
2267 } 2274 }
2268 } 2275 }
2276out:
2277 in_keyboard_notifier = 0;
2269 return ret; 2278 return ret;
2270} 2279}
2271 2280
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 74cce4f1a7bd..27ecf6fb49fd 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1826,7 +1826,7 @@ static __init int visorutil_spar_detect(void)
1826 return 0; 1826 return 0;
1827} 1827}
1828 1828
1829static int init_unisys(void) 1829static int __init init_unisys(void)
1830{ 1830{
1831 int result; 1831 int result;
1832 1832
@@ -1841,7 +1841,7 @@ static int init_unisys(void)
1841 return 0; 1841 return 0;
1842}; 1842};
1843 1843
1844static void exit_unisys(void) 1844static void __exit exit_unisys(void)
1845{ 1845{
1846 acpi_bus_unregister_driver(&unisys_acpi_driver); 1846 acpi_bus_unregister_driver(&unisys_acpi_driver);
1847} 1847}
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 5f3d8f2339e3..4be864dbd41c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
390 __func__, instance); 390 __func__, instance);
391 instance->alsa_stream = alsa_stream; 391 instance->alsa_stream = alsa_stream;
392 alsa_stream->instance = instance; 392 alsa_stream->instance = instance;
393 ret = 0; // xxx todo -1; 393 return 0;
394 goto err_free_mem;
395 } 394 }
396 395
397 /* Initialize and create a VCHI connection */ 396 /* Initialize and create a VCHI connection */
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
401 LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n", 400 LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
402 __func__, ret); 401 __func__, ret);
403 402
404 ret = -EIO; 403 return -EIO;
405 goto err_free_mem;
406 } 404 }
407 ret = vchi_connect(NULL, 0, vchi_instance); 405 ret = vchi_connect(NULL, 0, vchi_instance);
408 if (ret) { 406 if (ret) {
409 LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n", 407 LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
410 __func__, ret); 408 __func__, ret);
411 409
412 ret = -EIO; 410 kfree(vchi_instance);
413 goto err_free_mem; 411 return -EIO;
414 } 412 }
415 initted = 1; 413 initted = 1;
416 } 414 }
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
421 if (IS_ERR(instance)) { 419 if (IS_ERR(instance)) {
422 LOG_ERR("%s: failed to initialize audio service\n", __func__); 420 LOG_ERR("%s: failed to initialize audio service\n", __func__);
423 421
424 ret = PTR_ERR(instance); 422 /* vchi_instance is retained for use the next time. */
425 goto err_free_mem; 423 return PTR_ERR(instance);
426 } 424 }
427 425
428 instance->alsa_stream = alsa_stream; 426 instance->alsa_stream = alsa_stream;
429 alsa_stream->instance = instance; 427 alsa_stream->instance = instance;
430 428
431 LOG_DBG(" success !\n"); 429 LOG_DBG(" success !\n");
432 ret = 0;
433err_free_mem:
434 kfree(vchi_instance);
435 430
436 return ret; 431 return 0;
437} 432}
438 433
439int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) 434int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 0159ca4407d8..be08849175ea 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -612,18 +612,20 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
612 if (head_bytes > actual) 612 if (head_bytes > actual)
613 head_bytes = actual; 613 head_bytes = actual;
614 614
615 memcpy((char *)page_address(pages[0]) + 615 memcpy((char *)kmap(pages[0]) +
616 pagelist->offset, 616 pagelist->offset,
617 fragments, 617 fragments,
618 head_bytes); 618 head_bytes);
619 kunmap(pages[0]);
619 } 620 }
620 if ((actual >= 0) && (head_bytes < actual) && 621 if ((actual >= 0) && (head_bytes < actual) &&
621 (tail_bytes != 0)) { 622 (tail_bytes != 0)) {
622 memcpy((char *)page_address(pages[num_pages - 1]) + 623 memcpy((char *)kmap(pages[num_pages - 1]) +
623 ((pagelist->offset + actual) & 624 ((pagelist->offset + actual) &
624 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)), 625 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
625 fragments + g_cache_line_size, 626 fragments + g_cache_line_size,
626 tail_bytes); 627 tail_bytes);
628 kunmap(pages[num_pages - 1]);
627 } 629 }
628 630
629 down(&g_free_fragments_mutex); 631 down(&g_free_fragments_mutex);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 1c0c9553bc05..7dd38047ba23 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -246,11 +246,11 @@ struct mxser_port {
246 unsigned char err_shadow; 246 unsigned char err_shadow;
247 247
248 struct async_icount icount; /* kernel counters for 4 input interrupts */ 248 struct async_icount icount; /* kernel counters for 4 input interrupts */
249 int timeout; 249 unsigned int timeout;
250 250
251 int read_status_mask; 251 int read_status_mask;
252 int ignore_status_mask; 252 int ignore_status_mask;
253 int xmit_fifo_size; 253 unsigned int xmit_fifo_size;
254 int xmit_head; 254 int xmit_head;
255 int xmit_tail; 255 int xmit_tail;
256 int xmit_cnt; 256 int xmit_cnt;
@@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
572static int mxser_set_baud(struct tty_struct *tty, long newspd) 572static int mxser_set_baud(struct tty_struct *tty, long newspd)
573{ 573{
574 struct mxser_port *info = tty->driver_data; 574 struct mxser_port *info = tty->driver_data;
575 int quot = 0, baud; 575 unsigned int quot = 0, baud;
576 unsigned char cval; 576 unsigned char cval;
577 u64 timeout;
577 578
578 if (!info->ioaddr) 579 if (!info->ioaddr)
579 return -1; 580 return -1;
@@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
594 quot = 0; 595 quot = 0;
595 } 596 }
596 597
597 info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base); 598 /*
598 info->timeout += HZ / 50; /* Add .02 seconds of slop */ 599 * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the
600 * u64 domain
601 */
602 timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
603 do_div(timeout, info->baud_base);
604 info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
599 605
600 if (quot) { 606 if (quot) {
601 info->MCR |= UART_MCR_DTR; 607 info->MCR |= UART_MCR_DTR;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 583c9a0c7ecc..8c48c3784831 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port,
507{ 507{
508 unsigned int ctl, baud, quot, ier; 508 unsigned int ctl, baud, quot, ier;
509 unsigned long flags; 509 unsigned long flags;
510 int tries;
510 511
511 spin_lock_irqsave(&port->lock, flags); 512 spin_lock_irqsave(&port->lock, flags);
512 513
514 /* Drain the hot tub fully before we power it off for the winter. */
515 for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
516 mdelay(10);
517
513 /* disable uart while changing speed */ 518 /* disable uart while changing speed */
514 bcm_uart_disable(port); 519 bcm_uart_disable(port);
515 bcm_uart_flush(port); 520 bcm_uart_flush(port);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 849c1f9991ce..f0252184291e 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport)
1276static int lpuart_startup(struct uart_port *port) 1276static int lpuart_startup(struct uart_port *port)
1277{ 1277{
1278 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1278 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1279 int ret;
1280 unsigned long flags; 1279 unsigned long flags;
1281 unsigned char temp; 1280 unsigned char temp;
1282 1281
@@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port)
1291 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & 1290 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
1292 UARTPFIFO_FIFOSIZE_MASK) + 1); 1291 UARTPFIFO_FIFOSIZE_MASK) + 1);
1293 1292
1294 ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
1295 DRIVER_NAME, sport);
1296 if (ret)
1297 return ret;
1298
1299 spin_lock_irqsave(&sport->port.lock, flags); 1293 spin_lock_irqsave(&sport->port.lock, flags);
1300 1294
1301 lpuart_setup_watermark(sport); 1295 lpuart_setup_watermark(sport);
@@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port)
1333static int lpuart32_startup(struct uart_port *port) 1327static int lpuart32_startup(struct uart_port *port)
1334{ 1328{
1335 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1329 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1336 int ret;
1337 unsigned long flags; 1330 unsigned long flags;
1338 unsigned long temp; 1331 unsigned long temp;
1339 1332
@@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port)
1346 sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & 1339 sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
1347 UARTFIFO_FIFOSIZE_MASK) - 1); 1340 UARTFIFO_FIFOSIZE_MASK) - 1);
1348 1341
1349 ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0,
1350 DRIVER_NAME, sport);
1351 if (ret)
1352 return ret;
1353
1354 spin_lock_irqsave(&sport->port.lock, flags); 1342 spin_lock_irqsave(&sport->port.lock, flags);
1355 1343
1356 lpuart32_setup_watermark(sport); 1344 lpuart32_setup_watermark(sport);
@@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port)
1380 1368
1381 spin_unlock_irqrestore(&port->lock, flags); 1369 spin_unlock_irqrestore(&port->lock, flags);
1382 1370
1383 devm_free_irq(port->dev, port->irq, sport);
1384
1385 if (sport->lpuart_dma_rx_use) { 1371 if (sport->lpuart_dma_rx_use) {
1386 del_timer_sync(&sport->lpuart_timer); 1372 del_timer_sync(&sport->lpuart_timer);
1387 lpuart_dma_rx_free(&sport->port); 1373 lpuart_dma_rx_free(&sport->port);
@@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port)
1400 1386
1401static void lpuart32_shutdown(struct uart_port *port) 1387static void lpuart32_shutdown(struct uart_port *port)
1402{ 1388{
1403 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1404 unsigned long temp; 1389 unsigned long temp;
1405 unsigned long flags; 1390 unsigned long flags;
1406 1391
@@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port)
1413 lpuart32_write(port, temp, UARTCTRL); 1398 lpuart32_write(port, temp, UARTCTRL);
1414 1399
1415 spin_unlock_irqrestore(&port->lock, flags); 1400 spin_unlock_irqrestore(&port->lock, flags);
1416
1417 devm_free_irq(port->dev, port->irq, sport);
1418} 1401}
1419 1402
1420static void 1403static void
@@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev)
2212 2195
2213 platform_set_drvdata(pdev, &sport->port); 2196 platform_set_drvdata(pdev, &sport->port);
2214 2197
2215 if (lpuart_is_32(sport)) 2198 if (lpuart_is_32(sport)) {
2216 lpuart_reg.cons = LPUART32_CONSOLE; 2199 lpuart_reg.cons = LPUART32_CONSOLE;
2217 else 2200 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
2201 DRIVER_NAME, sport);
2202 } else {
2218 lpuart_reg.cons = LPUART_CONSOLE; 2203 lpuart_reg.cons = LPUART_CONSOLE;
2204 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
2205 DRIVER_NAME, sport);
2206 }
2207
2208 if (ret)
2209 goto failed_irq_request;
2219 2210
2220 ret = uart_add_one_port(&lpuart_reg, &sport->port); 2211 ret = uart_add_one_port(&lpuart_reg, &sport->port);
2221 if (ret) { 2212 if (ret)
2222 clk_disable_unprepare(sport->clk); 2213 goto failed_attach_port;
2223 return ret;
2224 }
2225 2214
2226 sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); 2215 sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
2227 if (!sport->dma_tx_chan) 2216 if (!sport->dma_tx_chan)
@@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev)
2240 } 2229 }
2241 2230
2242 return 0; 2231 return 0;
2232
2233failed_attach_port:
2234failed_irq_request:
2235 clk_disable_unprepare(sport->clk);
2236 return ret;
2243} 2237}
2244 2238
2245static int lpuart_remove(struct platform_device *pdev) 2239static int lpuart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index cdd2f942317c..b9c7a904c1ea 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev)
889 goto err_out; 889 goto err_out;
890 uartclk = 0; 890 uartclk = 0;
891 } else { 891 } else {
892 clk_prepare_enable(clk); 892 ret = clk_prepare_enable(clk);
893 if (ret)
894 goto err_out;
895
896 ret = devm_add_action_or_reset(&pdev->dev,
897 (void(*)(void *))clk_disable_unprepare,
898 clk);
899 if (ret)
900 goto err_out;
901
893 uartclk = clk_get_rate(clk); 902 uartclk = clk_get_rate(clk);
894 } 903 }
895 904
@@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev)
988 uart_unregister_driver(&s->uart); 997 uart_unregister_driver(&s->uart);
989err_out: 998err_out:
990 if (!IS_ERR(s->regulator)) 999 if (!IS_ERR(s->regulator))
991 return regulator_disable(s->regulator); 1000 regulator_disable(s->regulator);
992 1001
993 return ret; 1002 return ret;
994} 1003}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 2fe216b276e2..84a8ac2a779f 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
694 tty_set_termios_ldisc(tty, disc); 694 tty_set_termios_ldisc(tty, disc);
695 retval = tty_ldisc_open(tty, tty->ldisc); 695 retval = tty_ldisc_open(tty, tty->ldisc);
696 if (retval) { 696 if (retval) {
697 if (!WARN_ON(disc == N_TTY)) { 697 tty_ldisc_put(tty->ldisc);
698 tty_ldisc_put(tty->ldisc); 698 tty->ldisc = NULL;
699 tty->ldisc = NULL;
700 }
701 } 699 }
702 return retval; 700 return retval;
703} 701}
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
752 750
753 if (tty->ldisc) { 751 if (tty->ldisc) {
754 if (reinit) { 752 if (reinit) {
755 if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) 753 if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
756 tty_ldisc_reinit(tty, N_TTY); 754 tty_ldisc_reinit(tty, N_TTY) < 0)
755 WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
757 } else 756 } else
758 tty_ldisc_kill(tty); 757 tty_ldisc_kill(tty);
759 } 758 }
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e056064259c..18c923a4c16e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
1832 { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ 1832 { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
1833 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1833 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1834 }, 1834 },
1835 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1836 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1837 },
1835 1838
1836 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1839 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1837 .driver_info = CLEAR_HALT_CONDITIONS, 1840 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 5aacea1978a5..3e865dbf878c 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb)
190 /* 190 /*
191 * only set a new error if there is no previous error. 191 * only set a new error if there is no previous error.
192 * Errors are only cleared during read/open 192 * Errors are only cleared during read/open
193 * Avoid propagating -EPIPE (stall) to userspace since it is
194 * better handled as an empty read
193 */ 195 */
194 if (desc->rerr == 0) 196 if (desc->rerr == 0 && status != -EPIPE)
195 desc->rerr = status; 197 desc->rerr = status;
196 198
197 if (length + desc->length > desc->wMaxCommand) { 199 if (length + desc->length > desc->wMaxCommand) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 4be52c602e9b..883549ee946c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
643 643
644 } else if (header->bDescriptorType == 644 } else if (header->bDescriptorType ==
645 USB_DT_INTERFACE_ASSOCIATION) { 645 USB_DT_INTERFACE_ASSOCIATION) {
646 struct usb_interface_assoc_descriptor *d;
647
648 d = (struct usb_interface_assoc_descriptor *)header;
649 if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
650 dev_warn(ddev,
651 "config %d has an invalid interface association descriptor of length %d, skipping\n",
652 cfgno, d->bLength);
653 continue;
654 }
655
646 if (iad_num == USB_MAXIADS) { 656 if (iad_num == USB_MAXIADS) {
647 dev_warn(ddev, "found more Interface " 657 dev_warn(ddev, "found more Interface "
648 "Association Descriptors " 658 "Association Descriptors "
649 "than allocated for in " 659 "than allocated for in "
650 "configuration %d\n", cfgno); 660 "configuration %d\n", cfgno);
651 } else { 661 } else {
652 config->intf_assoc[iad_num] = 662 config->intf_assoc[iad_num] = d;
653 (struct usb_interface_assoc_descriptor
654 *)header;
655 iad_num++; 663 iad_num++;
656 } 664 }
657 665
@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
852 } 860 }
853 861
854 if (dev->quirks & USB_QUIRK_DELAY_INIT) 862 if (dev->quirks & USB_QUIRK_DELAY_INIT)
855 msleep(100); 863 msleep(200);
856 864
857 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 865 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
858 bigbuffer, length); 866 bigbuffer, length);
@@ -952,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
952 for (i = 0; i < num; i++) { 960 for (i = 0; i < num; i++) {
953 buffer += length; 961 buffer += length;
954 cap = (struct usb_dev_cap_header *)buffer; 962 cap = (struct usb_dev_cap_header *)buffer;
955 length = cap->bLength;
956 963
957 if (total_len < length) 964 if (total_len < sizeof(*cap) || total_len < cap->bLength) {
965 dev->bos->desc->bNumDeviceCaps = i;
958 break; 966 break;
967 }
968 length = cap->bLength;
959 total_len -= length; 969 total_len -= length;
960 970
961 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { 971 if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 318bb3b96687..e9326f31db8d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
140MODULE_PARM_DESC(usbfs_memory_mb, 140MODULE_PARM_DESC(usbfs_memory_mb,
141 "maximum MB allowed for usbfs buffers (0 = no limit)"); 141 "maximum MB allowed for usbfs buffers (0 = no limit)");
142 142
143/* Hard limit, necessary to avoid arithmetic overflow */
144#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
145
143static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ 146static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
144 147
145/* Check whether it's okay to allocate more memory for a transfer */ 148/* Check whether it's okay to allocate more memory for a transfer */
@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1460 USBDEVFS_URB_ZERO_PACKET | 1463 USBDEVFS_URB_ZERO_PACKET |
1461 USBDEVFS_URB_NO_INTERRUPT)) 1464 USBDEVFS_URB_NO_INTERRUPT))
1462 return -EINVAL; 1465 return -EINVAL;
1466 if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
1467 return -EINVAL;
1463 if (uurb->buffer_length > 0 && !uurb->buffer) 1468 if (uurb->buffer_length > 0 && !uurb->buffer)
1464 return -EINVAL; 1469 return -EINVAL;
1465 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && 1470 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 41eaf0b52518..e9ce6bb0b22d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
2710 if (!(portstatus & USB_PORT_STAT_CONNECTION)) 2710 if (!(portstatus & USB_PORT_STAT_CONNECTION))
2711 return -ENOTCONN; 2711 return -ENOTCONN;
2712 2712
2713 /* bomb out completely if the connection bounced. A USB 3.0 2713 /* Retry if connect change is set but status is still connected.
2714 * connection may bounce if multiple warm resets were issued, 2714 * A USB 3.0 connection may bounce if multiple warm resets were issued,
2715 * but the device may have successfully re-connected. Ignore it. 2715 * but the device may have successfully re-connected. Ignore it.
2716 */ 2716 */
2717 if (!hub_is_superspeed(hub->hdev) && 2717 if (!hub_is_superspeed(hub->hdev) &&
2718 (portchange & USB_PORT_STAT_C_CONNECTION)) 2718 (portchange & USB_PORT_STAT_C_CONNECTION)) {
2719 return -ENOTCONN; 2719 usb_clear_port_feature(hub->hdev, port1,
2720 USB_PORT_FEAT_C_CONNECTION);
2721 return -EAGAIN;
2722 }
2720 2723
2721 if (!(portstatus & USB_PORT_STAT_ENABLE)) 2724 if (!(portstatus & USB_PORT_STAT_ENABLE))
2722 return -EBUSY; 2725 return -EBUSY;
@@ -4838,7 +4841,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4838 goto loop; 4841 goto loop;
4839 4842
4840 if (udev->quirks & USB_QUIRK_DELAY_INIT) 4843 if (udev->quirks & USB_QUIRK_DELAY_INIT)
4841 msleep(1000); 4844 msleep(2000);
4842 4845
4843 /* consecutive bus-powered hubs aren't reliable; they can 4846 /* consecutive bus-powered hubs aren't reliable; they can
4844 * violate the voltage drop budget. if the new child has 4847 * violate the voltage drop budget. if the new child has
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4c38ea41ae96..371a07d874a3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2069 elength = 1; 2069 elength = 1;
2070 goto next_desc; 2070 goto next_desc;
2071 } 2071 }
2072 if ((buflen < elength) || (elength < 3)) {
2073 dev_err(&intf->dev, "invalid descriptor buffer length\n");
2074 break;
2075 }
2072 if (buffer[1] != USB_DT_CS_INTERFACE) { 2076 if (buffer[1] != USB_DT_CS_INTERFACE) {
2073 dev_err(&intf->dev, "skipping garbage\n"); 2077 dev_err(&intf->dev, "skipping garbage\n");
2074 goto next_desc; 2078 goto next_desc;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e311202..a6aaf2f193a4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
221 /* Corsair Strafe RGB */ 221 /* Corsair Strafe RGB */
222 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 222 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
223 223
224 /* MIDI keyboard WORLDE MINI */
225 { USB_DEVICE(0x1c75, 0x0204), .driver_info =
226 USB_QUIRK_CONFIG_INTF_STRINGS },
227
224 /* Acer C120 LED Projector */ 228 /* Acer C120 LED Projector */
225 { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, 229 { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
226 230
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 4cef7d4f9cd0..a26d1fde0f5e 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
177 { .compatible = "rockchip,rk3399-dwc3" }, 177 { .compatible = "rockchip,rk3399-dwc3" },
178 { .compatible = "xlnx,zynqmp-dwc3" }, 178 { .compatible = "xlnx,zynqmp-dwc3" },
179 { .compatible = "cavium,octeon-7130-usb-uctl" }, 179 { .compatible = "cavium,octeon-7130-usb-uctl" },
180 { .compatible = "sprd,sc9860-dwc3" },
180 { /* Sentinel */ } 181 { /* Sentinel */ }
181}; 182};
182MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); 183MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 827e376bfa97..75e6cb044eb2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
990 DWC3_TRBCTL_CONTROL_DATA, 990 DWC3_TRBCTL_CONTROL_DATA,
991 true); 991 true);
992 992
993 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
994
993 /* Now prepare one extra TRB to align transfer size */ 995 /* Now prepare one extra TRB to align transfer size */
994 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, 996 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
995 maxpacket - rem, 997 maxpacket - rem,
@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1015 DWC3_TRBCTL_CONTROL_DATA, 1017 DWC3_TRBCTL_CONTROL_DATA,
1016 true); 1018 true);
1017 1019
1020 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1021
1018 /* Now prepare one extra TRB to align transfer size */ 1022 /* Now prepare one extra TRB to align transfer size */
1019 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, 1023 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1020 0, DWC3_TRBCTL_CONTROL_DATA, 1024 0, DWC3_TRBCTL_CONTROL_DATA,
@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1029 dwc3_ep0_prepare_one_trb(dep, req->request.dma, 1033 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1030 req->request.length, DWC3_TRBCTL_CONTROL_DATA, 1034 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1031 false); 1035 false);
1036
1037 req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1038
1032 ret = dwc3_ep0_start_trans(dep); 1039 ret = dwc3_ep0_start_trans(dep);
1033 } 1040 }
1034 1041
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dd74c99d6ce1..5d061b3d8224 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended);
2026static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) 2026static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
2027{ 2027{
2028 struct usb_composite_dev *cdev = get_gadget_data(gadget); 2028 struct usb_composite_dev *cdev = get_gadget_data(gadget);
2029 struct usb_gadget_strings *gstr = cdev->driver->strings[0];
2030 struct usb_string *dev_str = gstr->strings;
2029 2031
2030 /* composite_disconnect() must already have been called 2032 /* composite_disconnect() must already have been called
2031 * by the underlying peripheral controller driver! 2033 * by the underlying peripheral controller driver!
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
2045 2047
2046 composite_dev_cleanup(cdev); 2048 composite_dev_cleanup(cdev);
2047 2049
2050 if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
2051 dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
2052
2048 kfree(cdev->def_manufacturer); 2053 kfree(cdev->def_manufacturer);
2049 kfree(cdev); 2054 kfree(cdev);
2050 set_gadget_data(gadget, NULL); 2055 set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index a22a892de7b7..aeb9f3c40521 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = {
1143 NULL 1143 NULL
1144}; 1144};
1145 1145
1146int usb_os_desc_prepare_interf_dir(struct config_group *parent, 1146struct config_group *usb_os_desc_prepare_interf_dir(
1147 int n_interf, 1147 struct config_group *parent,
1148 struct usb_os_desc **desc, 1148 int n_interf,
1149 char **names, 1149 struct usb_os_desc **desc,
1150 struct module *owner) 1150 char **names,
1151 struct module *owner)
1151{ 1152{
1152 struct config_group *os_desc_group; 1153 struct config_group *os_desc_group;
1153 struct config_item_type *os_desc_type, *interface_type; 1154 struct config_item_type *os_desc_type, *interface_type;
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1159 1160
1160 char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); 1161 char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL);
1161 if (!vlabuf) 1162 if (!vlabuf)
1162 return -ENOMEM; 1163 return ERR_PTR(-ENOMEM);
1163 1164
1164 os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); 1165 os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
1165 os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); 1166 os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1184 configfs_add_default_group(&d->group, os_desc_group); 1185 configfs_add_default_group(&d->group, os_desc_group);
1185 } 1186 }
1186 1187
1187 return 0; 1188 return os_desc_group;
1188} 1189}
1189EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); 1190EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir);
1190 1191
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index 36c468c4f5e9..540d5e92ed22 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -5,11 +5,12 @@
5 5
6void unregister_gadget_item(struct config_item *item); 6void unregister_gadget_item(struct config_item *item);
7 7
8int usb_os_desc_prepare_interf_dir(struct config_group *parent, 8struct config_group *usb_os_desc_prepare_interf_dir(
9 int n_interf, 9 struct config_group *parent,
10 struct usb_os_desc **desc, 10 int n_interf,
11 char **names, 11 struct usb_os_desc **desc,
12 struct module *owner); 12 char **names,
13 struct module *owner);
13 14
14static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) 15static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
15{ 16{
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 9990944a7245..8b342587f8ad 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,7 +46,8 @@
46static void ffs_data_get(struct ffs_data *ffs); 46static void ffs_data_get(struct ffs_data *ffs);
47static void ffs_data_put(struct ffs_data *ffs); 47static void ffs_data_put(struct ffs_data *ffs);
48/* Creates new ffs_data object. */ 48/* Creates new ffs_data object. */
49static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); 49static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
50 __attribute__((malloc));
50 51
51/* Opened counter handling. */ 52/* Opened counter handling. */
52static void ffs_data_opened(struct ffs_data *ffs); 53static void ffs_data_opened(struct ffs_data *ffs);
@@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
780 struct usb_request *req) 781 struct usb_request *req)
781{ 782{
782 struct ffs_io_data *io_data = req->context; 783 struct ffs_io_data *io_data = req->context;
784 struct ffs_data *ffs = io_data->ffs;
783 785
784 ENTER(); 786 ENTER();
785 787
786 INIT_WORK(&io_data->work, ffs_user_copy_worker); 788 INIT_WORK(&io_data->work, ffs_user_copy_worker);
787 schedule_work(&io_data->work); 789 queue_work(ffs->io_completion_wq, &io_data->work);
788} 790}
789 791
790static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) 792static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
@@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1500 if (unlikely(ret < 0)) 1502 if (unlikely(ret < 0))
1501 return ERR_PTR(ret); 1503 return ERR_PTR(ret);
1502 1504
1503 ffs = ffs_data_new(); 1505 ffs = ffs_data_new(dev_name);
1504 if (unlikely(!ffs)) 1506 if (unlikely(!ffs))
1505 return ERR_PTR(-ENOMEM); 1507 return ERR_PTR(-ENOMEM);
1506 ffs->file_perms = data.perms; 1508 ffs->file_perms = data.perms;
@@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs)
1610 BUG_ON(waitqueue_active(&ffs->ev.waitq) || 1612 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1611 waitqueue_active(&ffs->ep0req_completion.wait) || 1613 waitqueue_active(&ffs->ep0req_completion.wait) ||
1612 waitqueue_active(&ffs->wait)); 1614 waitqueue_active(&ffs->wait));
1615 destroy_workqueue(ffs->io_completion_wq);
1613 kfree(ffs->dev_name); 1616 kfree(ffs->dev_name);
1614 kfree(ffs); 1617 kfree(ffs);
1615 } 1618 }
@@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs)
1642 ffs_data_put(ffs); 1645 ffs_data_put(ffs);
1643} 1646}
1644 1647
1645static struct ffs_data *ffs_data_new(void) 1648static struct ffs_data *ffs_data_new(const char *dev_name)
1646{ 1649{
1647 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1650 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1648 if (unlikely(!ffs)) 1651 if (unlikely(!ffs))
@@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void)
1650 1653
1651 ENTER(); 1654 ENTER();
1652 1655
1656 ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
1657 if (!ffs->io_completion_wq) {
1658 kfree(ffs);
1659 return NULL;
1660 }
1661
1653 refcount_set(&ffs->ref, 1); 1662 refcount_set(&ffs->ref, 1);
1654 atomic_set(&ffs->opened, 0); 1663 atomic_set(&ffs->opened, 0);
1655 ffs->state = FFS_READ_DESCRIPTORS; 1664 ffs->state = FFS_READ_DESCRIPTORS;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d6bd0244b008..5153e29870c3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -307,8 +307,6 @@ struct fsg_common {
307 struct completion thread_notifier; 307 struct completion thread_notifier;
308 struct task_struct *thread_task; 308 struct task_struct *thread_task;
309 309
310 /* Callback functions. */
311 const struct fsg_operations *ops;
312 /* Gadget's private data. */ 310 /* Gadget's private data. */
313 void *private_data; 311 void *private_data;
314 312
@@ -2438,6 +2436,7 @@ static void handle_exception(struct fsg_common *common)
2438static int fsg_main_thread(void *common_) 2436static int fsg_main_thread(void *common_)
2439{ 2437{
2440 struct fsg_common *common = common_; 2438 struct fsg_common *common = common_;
2439 int i;
2441 2440
2442 /* 2441 /*
2443 * Allow the thread to be killed by a signal, but set the signal mask 2442 * Allow the thread to be killed by a signal, but set the signal mask
@@ -2476,21 +2475,16 @@ static int fsg_main_thread(void *common_)
2476 common->thread_task = NULL; 2475 common->thread_task = NULL;
2477 spin_unlock_irq(&common->lock); 2476 spin_unlock_irq(&common->lock);
2478 2477
2479 if (!common->ops || !common->ops->thread_exits 2478 /* Eject media from all LUNs */
2480 || common->ops->thread_exits(common) < 0) {
2481 int i;
2482 2479
2483 down_write(&common->filesem); 2480 down_write(&common->filesem);
2484 for (i = 0; i < ARRAY_SIZE(common->luns); i++) { 2481 for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2485 struct fsg_lun *curlun = common->luns[i]; 2482 struct fsg_lun *curlun = common->luns[i];
2486 if (!curlun || !fsg_lun_is_open(curlun))
2487 continue;
2488 2483
2484 if (curlun && fsg_lun_is_open(curlun))
2489 fsg_lun_close(curlun); 2485 fsg_lun_close(curlun);
2490 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2491 }
2492 up_write(&common->filesem);
2493 } 2486 }
2487 up_write(&common->filesem);
2494 2488
2495 /* Let fsg_unbind() know the thread has exited */ 2489 /* Let fsg_unbind() know the thread has exited */
2496 complete_and_exit(&common->thread_notifier, 0); 2490 complete_and_exit(&common->thread_notifier, 0);
@@ -2681,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
2681} 2675}
2682EXPORT_SYMBOL_GPL(fsg_common_remove_luns); 2676EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2683 2677
2684void fsg_common_set_ops(struct fsg_common *common,
2685 const struct fsg_operations *ops)
2686{
2687 common->ops = ops;
2688}
2689EXPORT_SYMBOL_GPL(fsg_common_set_ops);
2690
2691void fsg_common_free_buffers(struct fsg_common *common) 2678void fsg_common_free_buffers(struct fsg_common *common)
2692{ 2679{
2693 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2680 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index d3902313b8ac..dc05ca0c4359 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
60struct fsg_common; 60struct fsg_common;
61 61
62/* FSF callback functions */ 62/* FSF callback functions */
63struct fsg_operations {
64 /*
65 * Callback function to call when thread exits. If no
66 * callback is set or it returns value lower then zero MSF
67 * will force eject all LUNs it operates on (including those
68 * marked as non-removable or with prevent_medium_removal flag
69 * set).
70 */
71 int (*thread_exits)(struct fsg_common *common);
72};
73
74struct fsg_lun_opts { 63struct fsg_lun_opts {
75 struct config_group group; 64 struct config_group group;
76 struct fsg_lun *lun; 65 struct fsg_lun *lun;
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
142 131
143void fsg_common_remove_luns(struct fsg_common *common); 132void fsg_common_remove_luns(struct fsg_common *common);
144 133
145void fsg_common_set_ops(struct fsg_common *common,
146 const struct fsg_operations *ops);
147
148int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, 134int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
149 unsigned int id, const char *name, 135 unsigned int id, const char *name,
150 const char **name_pfx); 136 const char **name_pfx);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 8df244fc9d80..ea0da35a44e2 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
555 size_t size; /* Amount of data in a TX request. */ 555 size_t size; /* Amount of data in a TX request. */
556 size_t bytes_copied = 0; 556 size_t bytes_copied = 0;
557 struct usb_request *req; 557 struct usb_request *req;
558 int value;
558 559
559 DBG(dev, "printer_write trying to send %d bytes\n", (int)len); 560 DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
560 561
@@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
634 return -EAGAIN; 635 return -EAGAIN;
635 } 636 }
636 637
637 if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { 638 /* here, we unlock, and only unlock, to avoid deadlock. */
639 spin_unlock(&dev->lock);
640 value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
641 spin_lock(&dev->lock);
642 if (value) {
638 list_add(&req->list, &dev->tx_reqs); 643 list_add(&req->list, &dev->tx_reqs);
639 spin_unlock_irqrestore(&dev->lock, flags); 644 spin_unlock_irqrestore(&dev->lock, flags);
640 mutex_unlock(&dev->lock_printer_io); 645 mutex_unlock(&dev->lock_printer_io);
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e1d5853ef1e4..c7c5b3ce1d98 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f)
908 free_netdev(opts->net); 908 free_netdev(opts->net);
909 } 909 }
910 910
911 kfree(opts->rndis_interf_group); /* single VLA chunk */
911 kfree(opts); 912 kfree(opts);
912} 913}
913 914
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
916 struct f_rndis_opts *opts; 917 struct f_rndis_opts *opts;
917 struct usb_os_desc *descs[1]; 918 struct usb_os_desc *descs[1];
918 char *names[1]; 919 char *names[1];
920 struct config_group *rndis_interf_group;
919 921
920 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 922 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
921 if (!opts) 923 if (!opts)
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void)
940 names[0] = "rndis"; 942 names[0] = "rndis";
941 config_group_init_type_name(&opts->func_inst.group, "", 943 config_group_init_type_name(&opts->func_inst.group, "",
942 &rndis_func_type); 944 &rndis_func_type);
943 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, 945 rndis_interf_group =
944 names, THIS_MODULE); 946 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
947 names, THIS_MODULE);
948 if (IS_ERR(rndis_interf_group)) {
949 rndis_free_inst(&opts->func_inst);
950 return ERR_CAST(rndis_interf_group);
951 }
952 opts->rndis_interf_group = rndis_interf_group;
945 953
946 return &opts->func_inst; 954 return &opts->func_inst;
947} 955}
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 540f1c48c1a8..79f70ebf85dc 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -279,6 +279,7 @@ struct ffs_data {
279 } file_perms; 279 } file_perms;
280 280
281 struct eventfd_ctx *ffs_eventfd; 281 struct eventfd_ctx *ffs_eventfd;
282 struct workqueue_struct *io_completion_wq;
282 bool no_disconnect; 283 bool no_disconnect;
283 struct work_struct reset_work; 284 struct work_struct reset_work;
284 285
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h
index a35ee3c2545d..efdb7ac381d9 100644
--- a/drivers/usb/gadget/function/u_rndis.h
+++ b/drivers/usb/gadget/function/u_rndis.h
@@ -26,6 +26,7 @@ struct f_rndis_opts {
26 bool bound; 26 bool bound;
27 bool borrowed_net; 27 bool borrowed_net;
28 28
29 struct config_group *rndis_interf_group;
29 struct usb_os_desc rndis_os_desc; 30 struct usb_os_desc rndis_os_desc;
30 char rndis_ext_compat_id[16]; 31 char rndis_ext_compat_id[16];
31 32
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 684900fcfe24..5c28bee327e1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -28,7 +28,7 @@
28#include <linux/aio.h> 28#include <linux/aio.h>
29#include <linux/uio.h> 29#include <linux/uio.h>
30#include <linux/refcount.h> 30#include <linux/refcount.h>
31 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34 34
@@ -116,6 +116,7 @@ enum ep0_state {
116struct dev_data { 116struct dev_data {
117 spinlock_t lock; 117 spinlock_t lock;
118 refcount_t count; 118 refcount_t count;
119 int udc_usage;
119 enum ep0_state state; /* P: lock */ 120 enum ep0_state state; /* P: lock */
120 struct usb_gadgetfs_event event [N_EVENT]; 121 struct usb_gadgetfs_event event [N_EVENT];
121 unsigned ev_next; 122 unsigned ev_next;
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
513 INIT_WORK(&priv->work, ep_user_copy_worker); 514 INIT_WORK(&priv->work, ep_user_copy_worker);
514 schedule_work(&priv->work); 515 schedule_work(&priv->work);
515 } 516 }
516 spin_unlock(&epdata->dev->lock);
517 517
518 usb_ep_free_request(ep, req); 518 usb_ep_free_request(ep, req);
519 spin_unlock(&epdata->dev->lock);
519 put_ep(epdata); 520 put_ep(epdata);
520} 521}
521 522
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
939 struct usb_request *req = dev->req; 940 struct usb_request *req = dev->req;
940 941
941 if ((retval = setup_req (ep, req, 0)) == 0) { 942 if ((retval = setup_req (ep, req, 0)) == 0) {
943 ++dev->udc_usage;
942 spin_unlock_irq (&dev->lock); 944 spin_unlock_irq (&dev->lock);
943 retval = usb_ep_queue (ep, req, GFP_KERNEL); 945 retval = usb_ep_queue (ep, req, GFP_KERNEL);
944 spin_lock_irq (&dev->lock); 946 spin_lock_irq (&dev->lock);
947 --dev->udc_usage;
945 } 948 }
946 dev->state = STATE_DEV_CONNECTED; 949 dev->state = STATE_DEV_CONNECTED;
947 950
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
983 retval = -EIO; 986 retval = -EIO;
984 else { 987 else {
985 len = min (len, (size_t)dev->req->actual); 988 len = min (len, (size_t)dev->req->actual);
986// FIXME don't call this with the spinlock held ... 989 ++dev->udc_usage;
990 spin_unlock_irq(&dev->lock);
987 if (copy_to_user (buf, dev->req->buf, len)) 991 if (copy_to_user (buf, dev->req->buf, len))
988 retval = -EFAULT; 992 retval = -EFAULT;
989 else 993 else
990 retval = len; 994 retval = len;
995 spin_lock_irq(&dev->lock);
996 --dev->udc_usage;
991 clean_req (dev->gadget->ep0, dev->req); 997 clean_req (dev->gadget->ep0, dev->req);
992 /* NOTE userspace can't yet choose to stall */ 998 /* NOTE userspace can't yet choose to stall */
993 } 999 }
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1131 retval = setup_req (dev->gadget->ep0, dev->req, len); 1137 retval = setup_req (dev->gadget->ep0, dev->req, len);
1132 if (retval == 0) { 1138 if (retval == 0) {
1133 dev->state = STATE_DEV_CONNECTED; 1139 dev->state = STATE_DEV_CONNECTED;
1140 ++dev->udc_usage;
1134 spin_unlock_irq (&dev->lock); 1141 spin_unlock_irq (&dev->lock);
1135 if (copy_from_user (dev->req->buf, buf, len)) 1142 if (copy_from_user (dev->req->buf, buf, len))
1136 retval = -EFAULT; 1143 retval = -EFAULT;
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1142 GFP_KERNEL); 1149 GFP_KERNEL);
1143 } 1150 }
1144 spin_lock_irq(&dev->lock); 1151 spin_lock_irq(&dev->lock);
1152 --dev->udc_usage;
1145 if (retval < 0) { 1153 if (retval < 0) {
1146 clean_req (dev->gadget->ep0, dev->req); 1154 clean_req (dev->gadget->ep0, dev->req);
1147 } else 1155 } else
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1243 struct usb_gadget *gadget = dev->gadget; 1251 struct usb_gadget *gadget = dev->gadget;
1244 long ret = -ENOTTY; 1252 long ret = -ENOTTY;
1245 1253
1246 if (gadget->ops->ioctl) 1254 spin_lock_irq(&dev->lock);
1255 if (dev->state == STATE_DEV_OPENED ||
1256 dev->state == STATE_DEV_UNBOUND) {
1257 /* Not bound to a UDC */
1258 } else if (gadget->ops->ioctl) {
1259 ++dev->udc_usage;
1260 spin_unlock_irq(&dev->lock);
1261
1247 ret = gadget->ops->ioctl (gadget, code, value); 1262 ret = gadget->ops->ioctl (gadget, code, value);
1248 1263
1264 spin_lock_irq(&dev->lock);
1265 --dev->udc_usage;
1266 }
1267 spin_unlock_irq(&dev->lock);
1268
1249 return ret; 1269 return ret;
1250} 1270}
1251 1271
@@ -1463,10 +1483,12 @@ delegate:
1463 if (value < 0) 1483 if (value < 0)
1464 break; 1484 break;
1465 1485
1486 ++dev->udc_usage;
1466 spin_unlock (&dev->lock); 1487 spin_unlock (&dev->lock);
1467 value = usb_ep_queue (gadget->ep0, dev->req, 1488 value = usb_ep_queue (gadget->ep0, dev->req,
1468 GFP_KERNEL); 1489 GFP_KERNEL);
1469 spin_lock (&dev->lock); 1490 spin_lock (&dev->lock);
1491 --dev->udc_usage;
1470 if (value < 0) { 1492 if (value < 0) {
1471 clean_req (gadget->ep0, dev->req); 1493 clean_req (gadget->ep0, dev->req);
1472 break; 1494 break;
@@ -1490,8 +1512,12 @@ delegate:
1490 req->length = value; 1512 req->length = value;
1491 req->zero = value < w_length; 1513 req->zero = value < w_length;
1492 1514
1515 ++dev->udc_usage;
1493 spin_unlock (&dev->lock); 1516 spin_unlock (&dev->lock);
1494 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); 1517 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1518 spin_lock(&dev->lock);
1519 --dev->udc_usage;
1520 spin_unlock(&dev->lock);
1495 if (value < 0) { 1521 if (value < 0) {
1496 DBG (dev, "ep_queue --> %d\n", value); 1522 DBG (dev, "ep_queue --> %d\n", value);
1497 req->status = 0; 1523 req->status = 0;
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
1518 /* break link to FS */ 1544 /* break link to FS */
1519 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); 1545 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1520 list_del_init (&ep->epfiles); 1546 list_del_init (&ep->epfiles);
1547 spin_unlock_irq (&dev->lock);
1548
1521 dentry = ep->dentry; 1549 dentry = ep->dentry;
1522 ep->dentry = NULL; 1550 ep->dentry = NULL;
1523 parent = d_inode(dentry->d_parent); 1551 parent = d_inode(dentry->d_parent);
1524 1552
1525 /* break link to controller */ 1553 /* break link to controller */
1554 mutex_lock(&ep->lock);
1526 if (ep->state == STATE_EP_ENABLED) 1555 if (ep->state == STATE_EP_ENABLED)
1527 (void) usb_ep_disable (ep->ep); 1556 (void) usb_ep_disable (ep->ep);
1528 ep->state = STATE_EP_UNBOUND; 1557 ep->state = STATE_EP_UNBOUND;
1529 usb_ep_free_request (ep->ep, ep->req); 1558 usb_ep_free_request (ep->ep, ep->req);
1530 ep->ep = NULL; 1559 ep->ep = NULL;
1560 mutex_unlock(&ep->lock);
1561
1531 wake_up (&ep->wait); 1562 wake_up (&ep->wait);
1532 put_ep (ep); 1563 put_ep (ep);
1533 1564
1534 spin_unlock_irq (&dev->lock);
1535
1536 /* break link to dcache */ 1565 /* break link to dcache */
1537 inode_lock(parent); 1566 inode_lock(parent);
1538 d_delete (dentry); 1567 d_delete (dentry);
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
1603 1632
1604 spin_lock_irq (&dev->lock); 1633 spin_lock_irq (&dev->lock);
1605 dev->state = STATE_DEV_UNBOUND; 1634 dev->state = STATE_DEV_UNBOUND;
1635 while (dev->udc_usage > 0) {
1636 spin_unlock_irq(&dev->lock);
1637 usleep_range(1000, 2000);
1638 spin_lock_irq(&dev->lock);
1639 }
1606 spin_unlock_irq (&dev->lock); 1640 spin_unlock_irq (&dev->lock);
1607 1641
1608 destroy_ep_files (dev); 1642 destroy_ep_files (dev);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index e99ab57ee3e5..fcba59782f26 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
107 107
108FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); 108FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
109 109
110static unsigned long msg_registered;
111static void msg_cleanup(void);
112
113static int msg_thread_exits(struct fsg_common *common)
114{
115 msg_cleanup();
116 return 0;
117}
118
119static int msg_do_config(struct usb_configuration *c) 110static int msg_do_config(struct usb_configuration *c)
120{ 111{
121 struct fsg_opts *opts; 112 struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
154 145
155static int msg_bind(struct usb_composite_dev *cdev) 146static int msg_bind(struct usb_composite_dev *cdev)
156{ 147{
157 static const struct fsg_operations ops = {
158 .thread_exits = msg_thread_exits,
159 };
160 struct fsg_opts *opts; 148 struct fsg_opts *opts;
161 struct fsg_config config; 149 struct fsg_config config;
162 int status; 150 int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
173 if (status) 161 if (status)
174 goto fail; 162 goto fail;
175 163
176 fsg_common_set_ops(opts->common, &ops);
177
178 status = fsg_common_set_cdev(opts->common, cdev, config.can_stall); 164 status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
179 if (status) 165 if (status)
180 goto fail_set_cdev; 166 goto fail_set_cdev;
@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
256 242
257static int __init msg_init(void) 243static int __init msg_init(void)
258{ 244{
259 int ret; 245 return usb_composite_probe(&msg_driver);
260
261 ret = usb_composite_probe(&msg_driver);
262 set_bit(0, &msg_registered);
263
264 return ret;
265} 246}
266module_init(msg_init); 247module_init(msg_init);
267 248
268static void msg_cleanup(void) 249static void __exit msg_cleanup(void)
269{ 250{
270 if (test_and_clear_bit(0, &msg_registered)) 251 usb_composite_unregister(&msg_driver);
271 usb_composite_unregister(&msg_driver);
272} 252}
273module_exit(msg_cleanup); 253module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 7cd5c969fcbe..1e9567091d86 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -273,6 +273,7 @@ config USB_SNP_CORE
273config USB_SNP_UDC_PLAT 273config USB_SNP_UDC_PLAT
274 tristate "Synopsys USB 2.0 Device controller" 274 tristate "Synopsys USB 2.0 Device controller"
275 depends on USB_GADGET && OF && HAS_DMA 275 depends on USB_GADGET && OF && HAS_DMA
276 depends on EXTCON || EXTCON=n
276 select USB_GADGET_DUALSPEED 277 select USB_GADGET_DUALSPEED
277 select USB_SNP_CORE 278 select USB_SNP_CORE
278 default ARCH_BCM_IPROC 279 default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 98d71400f8a1..a884c022df7a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -29,6 +29,8 @@
29#include <linux/of_gpio.h> 29#include <linux/of_gpio.h>
30 30
31#include "atmel_usba_udc.h" 31#include "atmel_usba_udc.h"
32#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
33 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
32 34
33#ifdef CONFIG_USB_GADGET_DEBUG_FS 35#ifdef CONFIG_USB_GADGET_DEBUG_FS
34#include <linux/debugfs.h> 36#include <linux/debugfs.h>
@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
2361 IRQ_NOAUTOEN); 2363 IRQ_NOAUTOEN);
2362 ret = devm_request_threaded_irq(&pdev->dev, 2364 ret = devm_request_threaded_irq(&pdev->dev,
2363 gpio_to_irq(udc->vbus_pin), NULL, 2365 gpio_to_irq(udc->vbus_pin), NULL,
2364 usba_vbus_irq_thread, IRQF_ONESHOT, 2366 usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
2365 "atmel_usba_udc", udc); 2367 "atmel_usba_udc", udc);
2366 if (ret) { 2368 if (ret) {
2367 udc->vbus_pin = -ENODEV; 2369 udc->vbus_pin = -ENODEV;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 75c51ca4ee0f..d41d07aae0ce 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
1320 udc->dev.driver = &driver->driver; 1320 udc->dev.driver = &driver->driver;
1321 udc->gadget->dev.driver = &driver->driver; 1321 udc->gadget->dev.driver = &driver->driver;
1322 1322
1323 if (driver->max_speed < udc->gadget->max_speed) 1323 usb_gadget_udc_set_speed(udc, driver->max_speed);
1324 usb_gadget_udc_set_speed(udc, driver->max_speed);
1325 1324
1326 ret = driver->bind(udc->gadget, driver); 1325 ret = driver->bind(udc->gadget, driver);
1327 if (ret) 1326 if (ret)
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a030d7923d7d..f04e91ef9e7c 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -237,6 +237,8 @@ struct dummy_hcd {
237 237
238 struct usb_device *udev; 238 struct usb_device *udev;
239 struct list_head urbp_list; 239 struct list_head urbp_list;
240 struct urbp *next_frame_urbp;
241
240 u32 stream_en_ep; 242 u32 stream_en_ep;
241 u8 num_stream[30 / 2]; 243 u8 num_stream[30 / 2];
242 244
@@ -253,11 +255,13 @@ struct dummy {
253 */ 255 */
254 struct dummy_ep ep[DUMMY_ENDPOINTS]; 256 struct dummy_ep ep[DUMMY_ENDPOINTS];
255 int address; 257 int address;
258 int callback_usage;
256 struct usb_gadget gadget; 259 struct usb_gadget gadget;
257 struct usb_gadget_driver *driver; 260 struct usb_gadget_driver *driver;
258 struct dummy_request fifo_req; 261 struct dummy_request fifo_req;
259 u8 fifo_buf[FIFO_SIZE]; 262 u8 fifo_buf[FIFO_SIZE];
260 u16 devstatus; 263 u16 devstatus;
264 unsigned ints_enabled:1;
261 unsigned udc_suspended:1; 265 unsigned udc_suspended:1;
262 unsigned pullup:1; 266 unsigned pullup:1;
263 267
@@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
375 USB_PORT_STAT_CONNECTION) == 0) 379 USB_PORT_STAT_CONNECTION) == 0)
376 dum_hcd->port_status |= 380 dum_hcd->port_status |=
377 (USB_PORT_STAT_C_CONNECTION << 16); 381 (USB_PORT_STAT_C_CONNECTION << 16);
378 if ((dum_hcd->port_status & 382 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
379 USB_PORT_STAT_ENABLE) == 1 && 383 (dum_hcd->port_status &
380 (dum_hcd->port_status & 384 USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
381 USB_SS_PORT_LS_U0) == 1 && 385 dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
382 dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
383 dum_hcd->active = 1; 386 dum_hcd->active = 1;
384 } 387 }
385 } else { 388 } else {
@@ -416,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
416static void set_link_state(struct dummy_hcd *dum_hcd) 419static void set_link_state(struct dummy_hcd *dum_hcd)
417{ 420{
418 struct dummy *dum = dum_hcd->dum; 421 struct dummy *dum = dum_hcd->dum;
422 unsigned int power_bit;
419 423
420 dum_hcd->active = 0; 424 dum_hcd->active = 0;
421 if (dum->pullup) 425 if (dum->pullup)
@@ -426,32 +430,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
426 return; 430 return;
427 431
428 set_link_state_by_speed(dum_hcd); 432 set_link_state_by_speed(dum_hcd);
433 power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
434 USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
429 435
430 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || 436 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
431 dum_hcd->active) 437 dum_hcd->active)
432 dum_hcd->resuming = 0; 438 dum_hcd->resuming = 0;
433 439
434 /* Currently !connected or in reset */ 440 /* Currently !connected or in reset */
435 if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || 441 if ((dum_hcd->port_status & power_bit) == 0 ||
436 (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { 442 (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
437 unsigned disconnect = USB_PORT_STAT_CONNECTION & 443 unsigned int disconnect = power_bit &
438 dum_hcd->old_status & (~dum_hcd->port_status); 444 dum_hcd->old_status & (~dum_hcd->port_status);
439 unsigned reset = USB_PORT_STAT_RESET & 445 unsigned int reset = USB_PORT_STAT_RESET &
440 (~dum_hcd->old_status) & dum_hcd->port_status; 446 (~dum_hcd->old_status) & dum_hcd->port_status;
441 447
442 /* Report reset and disconnect events to the driver */ 448 /* Report reset and disconnect events to the driver */
443 if (dum->driver && (disconnect || reset)) { 449 if (dum->ints_enabled && (disconnect || reset)) {
444 stop_activity(dum); 450 stop_activity(dum);
451 ++dum->callback_usage;
452 spin_unlock(&dum->lock);
445 if (reset) 453 if (reset)
446 usb_gadget_udc_reset(&dum->gadget, dum->driver); 454 usb_gadget_udc_reset(&dum->gadget, dum->driver);
447 else 455 else
448 dum->driver->disconnect(&dum->gadget); 456 dum->driver->disconnect(&dum->gadget);
457 spin_lock(&dum->lock);
458 --dum->callback_usage;
449 } 459 }
450 } else if (dum_hcd->active != dum_hcd->old_active) { 460 } else if (dum_hcd->active != dum_hcd->old_active &&
461 dum->ints_enabled) {
462 ++dum->callback_usage;
463 spin_unlock(&dum->lock);
451 if (dum_hcd->old_active && dum->driver->suspend) 464 if (dum_hcd->old_active && dum->driver->suspend)
452 dum->driver->suspend(&dum->gadget); 465 dum->driver->suspend(&dum->gadget);
453 else if (!dum_hcd->old_active && dum->driver->resume) 466 else if (!dum_hcd->old_active && dum->driver->resume)
454 dum->driver->resume(&dum->gadget); 467 dum->driver->resume(&dum->gadget);
468 spin_lock(&dum->lock);
469 --dum->callback_usage;
455 } 470 }
456 471
457 dum_hcd->old_status = dum_hcd->port_status; 472 dum_hcd->old_status = dum_hcd->port_status;
@@ -972,8 +987,11 @@ static int dummy_udc_start(struct usb_gadget *g,
972 * can't enumerate without help from the driver we're binding. 987 * can't enumerate without help from the driver we're binding.
973 */ 988 */
974 989
990 spin_lock_irq(&dum->lock);
975 dum->devstatus = 0; 991 dum->devstatus = 0;
976 dum->driver = driver; 992 dum->driver = driver;
993 dum->ints_enabled = 1;
994 spin_unlock_irq(&dum->lock);
977 995
978 return 0; 996 return 0;
979} 997}
@@ -984,6 +1002,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
984 struct dummy *dum = dum_hcd->dum; 1002 struct dummy *dum = dum_hcd->dum;
985 1003
986 spin_lock_irq(&dum->lock); 1004 spin_lock_irq(&dum->lock);
1005 dum->ints_enabled = 0;
1006 stop_activity(dum);
1007
1008 /* emulate synchronize_irq(): wait for callbacks to finish */
1009 while (dum->callback_usage > 0) {
1010 spin_unlock_irq(&dum->lock);
1011 usleep_range(1000, 2000);
1012 spin_lock_irq(&dum->lock);
1013 }
1014
987 dum->driver = NULL; 1015 dum->driver = NULL;
988 spin_unlock_irq(&dum->lock); 1016 spin_unlock_irq(&dum->lock);
989 1017
@@ -1037,7 +1065,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
1037 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); 1065 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
1038 dum->gadget.name = gadget_name; 1066 dum->gadget.name = gadget_name;
1039 dum->gadget.ops = &dummy_ops; 1067 dum->gadget.ops = &dummy_ops;
1040 dum->gadget.max_speed = USB_SPEED_SUPER; 1068 if (mod_data.is_super_speed)
1069 dum->gadget.max_speed = USB_SPEED_SUPER;
1070 else if (mod_data.is_high_speed)
1071 dum->gadget.max_speed = USB_SPEED_HIGH;
1072 else
1073 dum->gadget.max_speed = USB_SPEED_FULL;
1041 1074
1042 dum->gadget.dev.parent = &pdev->dev; 1075 dum->gadget.dev.parent = &pdev->dev;
1043 init_dummy_udc_hw(dum); 1076 init_dummy_udc_hw(dum);
@@ -1246,6 +1279,8 @@ static int dummy_urb_enqueue(
1246 1279
1247 list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); 1280 list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
1248 urb->hcpriv = urbp; 1281 urb->hcpriv = urbp;
1282 if (!dum_hcd->next_frame_urbp)
1283 dum_hcd->next_frame_urbp = urbp;
1249 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) 1284 if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
1250 urb->error_count = 1; /* mark as a new urb */ 1285 urb->error_count = 1; /* mark as a new urb */
1251 1286
@@ -1521,6 +1556,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
1521 if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? 1556 if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
1522 dum->ss_hcd : dum->hs_hcd))) 1557 dum->ss_hcd : dum->hs_hcd)))
1523 return NULL; 1558 return NULL;
1559 if (!dum->ints_enabled)
1560 return NULL;
1524 if ((address & ~USB_DIR_IN) == 0) 1561 if ((address & ~USB_DIR_IN) == 0)
1525 return &dum->ep[0]; 1562 return &dum->ep[0];
1526 for (i = 1; i < DUMMY_ENDPOINTS; i++) { 1563 for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1762,6 +1799,7 @@ static void dummy_timer(unsigned long _dum_hcd)
1762 spin_unlock_irqrestore(&dum->lock, flags); 1799 spin_unlock_irqrestore(&dum->lock, flags);
1763 return; 1800 return;
1764 } 1801 }
1802 dum_hcd->next_frame_urbp = NULL;
1765 1803
1766 for (i = 0; i < DUMMY_ENDPOINTS; i++) { 1804 for (i = 0; i < DUMMY_ENDPOINTS; i++) {
1767 if (!ep_info[i].name) 1805 if (!ep_info[i].name)
@@ -1778,6 +1816,10 @@ restart:
1778 int type; 1816 int type;
1779 int status = -EINPROGRESS; 1817 int status = -EINPROGRESS;
1780 1818
1819 /* stop when we reach URBs queued after the timer interrupt */
1820 if (urbp == dum_hcd->next_frame_urbp)
1821 break;
1822
1781 urb = urbp->urb; 1823 urb = urbp->urb;
1782 if (urb->unlinked) 1824 if (urb->unlinked)
1783 goto return_urb; 1825 goto return_urb;
@@ -1857,10 +1899,12 @@ restart:
1857 * until setup() returns; no reentrancy issues etc. 1899 * until setup() returns; no reentrancy issues etc.
1858 */ 1900 */
1859 if (value > 0) { 1901 if (value > 0) {
1902 ++dum->callback_usage;
1860 spin_unlock(&dum->lock); 1903 spin_unlock(&dum->lock);
1861 value = dum->driver->setup(&dum->gadget, 1904 value = dum->driver->setup(&dum->gadget,
1862 &setup); 1905 &setup);
1863 spin_lock(&dum->lock); 1906 spin_lock(&dum->lock);
1907 --dum->callback_usage;
1864 1908
1865 if (value >= 0) { 1909 if (value >= 0) {
1866 /* no delays (max 64KB data stage) */ 1910 /* no delays (max 64KB data stage) */
@@ -2561,8 +2605,6 @@ static struct hc_driver dummy_hcd = {
2561 .product_desc = "Dummy host controller", 2605 .product_desc = "Dummy host controller",
2562 .hcd_priv_size = sizeof(struct dummy_hcd), 2606 .hcd_priv_size = sizeof(struct dummy_hcd),
2563 2607
2564 .flags = HCD_USB3 | HCD_SHARED,
2565
2566 .reset = dummy_setup, 2608 .reset = dummy_setup,
2567 .start = dummy_start, 2609 .start = dummy_start,
2568 .stop = dummy_stop, 2610 .stop = dummy_stop,
@@ -2591,8 +2633,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
2591 dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); 2633 dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
2592 dum = *((void **)dev_get_platdata(&pdev->dev)); 2634 dum = *((void **)dev_get_platdata(&pdev->dev));
2593 2635
2594 if (!mod_data.is_super_speed) 2636 if (mod_data.is_super_speed)
2637 dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
2638 else if (mod_data.is_high_speed)
2595 dummy_hcd.flags = HCD_USB2; 2639 dummy_hcd.flags = HCD_USB2;
2640 else
2641 dummy_hcd.flags = HCD_USB11;
2596 hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); 2642 hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
2597 if (!hs_hcd) 2643 if (!hs_hcd)
2598 return -ENOMEM; 2644 return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index df37c1e6e9d5..63a206122058 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
1038 usb3_ep->ep.maxpacket); 1038 usb3_ep->ep.maxpacket);
1039 u8 *buf = usb3_req->req.buf + usb3_req->req.actual; 1039 u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
1040 u32 tmp = 0; 1040 u32 tmp = 0;
1041 bool is_last; 1041 bool is_last = !len ? true : false;
1042 1042
1043 if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) 1043 if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
1044 return -EBUSY; 1044 return -EBUSY;
@@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
1059 usb3_write(usb3, tmp, fifo_reg); 1059 usb3_write(usb3, tmp, fifo_reg);
1060 } 1060 }
1061 1061
1062 is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); 1062 if (!is_last)
1063 is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
1063 /* Send the data */ 1064 /* Send the data */
1064 usb3_set_px_con_send(usb3_ep, len, is_last); 1065 usb3_set_px_con_send(usb3_ep, len, is_last);
1065 1066
@@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
1150 usb3_set_p0_con_for_ctrl_read_data(usb3); 1151 usb3_set_p0_con_for_ctrl_read_data(usb3);
1151 } else { 1152 } else {
1152 usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); 1153 usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
1153 usb3_set_p0_con_for_ctrl_write_data(usb3); 1154 if (usb3_req->req.length)
1155 usb3_set_p0_con_for_ctrl_write_data(usb3);
1154 } 1156 }
1155 1157
1156 usb3_p0_xfer(usb3_ep, usb3_req); 1158 usb3_p0_xfer(usb3_ep, usb3_req);
@@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size)
2053static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, 2055static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
2054 const struct usb_endpoint_descriptor *desc) 2056 const struct usb_endpoint_descriptor *desc)
2055{ 2057{
2056 return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc)); 2058 int i;
2059 const u32 max_packet_array[] = {8, 16, 32, 64, 512};
2060 u32 mpkt = PN_RAMMAP_MPKT(1024);
2061
2062 for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
2063 if (usb_endpoint_maxp(desc) <= max_packet_array[i])
2064 mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
2065 }
2066
2067 return usb3_ep->rammap_val | mpkt;
2057} 2068}
2058 2069
2059static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, 2070static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 658d9d1f9ea3..6dda3623a276 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
447 if ((value & ASMT_CONTROL_WRITE_BIT) == 0) 447 if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
448 return 0; 448 return 0;
449 449
450 usleep_range(40, 60); 450 udelay(50);
451 } 451 }
452 452
453 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); 453 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1022 * 1022 *
1023 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 1023 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1024 * It signals to the BIOS that the OS wants control of the host controller, 1024 * It signals to the BIOS that the OS wants control of the host controller,
1025 * and then waits 5 seconds for the BIOS to hand over control. 1025 * and then waits 1 second for the BIOS to hand over control.
1026 * If we timeout, assume the BIOS is broken and take control anyway. 1026 * If we timeout, assume the BIOS is broken and take control anyway.
1027 */ 1027 */
1028static void quirk_usb_handoff_xhci(struct pci_dev *pdev) 1028static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1069 if (val & XHCI_HC_BIOS_OWNED) { 1069 if (val & XHCI_HC_BIOS_OWNED) {
1070 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 1070 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1071 1071
1072 /* Wait for 5 seconds with 10 microsecond polling interval */ 1072 /* Wait for 1 second with 10 microsecond polling interval */
1073 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 1073 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1074 0, 5000, 10); 1074 0, 1000000, 10);
1075 1075
1076 /* Assume a buggy BIOS and take HC ownership anyway */ 1076 /* Assume a buggy BIOS and take HC ownership anyway */
1077 if (timeout) { 1077 if (timeout) {
@@ -1100,7 +1100,7 @@ hc_init:
1100 * operational or runtime registers. Wait 5 seconds and no more. 1100 * operational or runtime registers. Wait 5 seconds and no more.
1101 */ 1101 */
1102 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 1102 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1103 5000, 10); 1103 5000000, 10);
1104 /* Assume a buggy HC and start HC initialization anyway */ 1104 /* Assume a buggy HC and start HC initialization anyway */
1105 if (timeout) { 1105 if (timeout) {
1106 val = readl(op_reg_base + XHCI_STS_OFFSET); 1106 val = readl(op_reg_base + XHCI_STS_OFFSET);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index ad89a6d4111b..a2336deb5e36 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
112 112
113 /* If PSI table exists, add the custom speed attributes from it */ 113 /* If PSI table exists, add the custom speed attributes from it */
114 if (usb3_1 && xhci->usb3_rhub.psi_count) { 114 if (usb3_1 && xhci->usb3_rhub.psi_count) {
115 u32 ssp_cap_base, bm_attrib, psi; 115 u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
116 int offset; 116 int offset;
117 117
118 ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; 118 ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
139 for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { 139 for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
140 psi = xhci->usb3_rhub.psi[i]; 140 psi = xhci->usb3_rhub.psi[i];
141 psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; 141 psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
142 psi_exp = XHCI_EXT_PORT_PSIE(psi);
143 psi_mant = XHCI_EXT_PORT_PSIM(psi);
144
145 /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
146 for (; psi_exp < 3; psi_exp++)
147 psi_mant /= 1000;
148 if (psi_mant >= 10)
149 psi |= BIT(14);
150
142 if ((psi & PLT_MASK) == PLT_SYM) { 151 if ((psi & PLT_MASK) == PLT_SYM) {
143 /* Symmetric, create SSA RX and TX from one PSI entry */ 152 /* Symmetric, create SSA RX and TX from one PSI entry */
144 put_unaligned_le32(psi, &buf[offset]); 153 put_unaligned_le32(psi, &buf[offset]);
@@ -411,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
411 GFP_NOWAIT); 420 GFP_NOWAIT);
412 if (!command) { 421 if (!command) {
413 spin_unlock_irqrestore(&xhci->lock, flags); 422 spin_unlock_irqrestore(&xhci->lock, flags);
414 xhci_free_command(xhci, cmd); 423 ret = -ENOMEM;
415 return -ENOMEM; 424 goto cmd_cleanup;
425 }
426
427 ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
428 i, suspend);
429 if (ret) {
430 spin_unlock_irqrestore(&xhci->lock, flags);
431 xhci_free_command(xhci, command);
432 goto cmd_cleanup;
416 } 433 }
417 xhci_queue_stop_endpoint(xhci, command, slot_id, i,
418 suspend);
419 } 434 }
420 } 435 }
421 xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); 436 ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
437 if (ret) {
438 spin_unlock_irqrestore(&xhci->lock, flags);
439 goto cmd_cleanup;
440 }
441
422 xhci_ring_cmd_db(xhci); 442 xhci_ring_cmd_db(xhci);
423 spin_unlock_irqrestore(&xhci->lock, flags); 443 spin_unlock_irqrestore(&xhci->lock, flags);
424 444
@@ -430,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
430 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 450 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
431 ret = -ETIME; 451 ret = -ETIME;
432 } 452 }
453
454cmd_cleanup:
433 xhci_free_command(xhci, cmd); 455 xhci_free_command(xhci, cmd);
434 return ret; 456 return ret;
435} 457}
@@ -1506,9 +1528,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1506 t2 |= PORT_WKOC_E | PORT_WKCONN_E; 1528 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
1507 t2 &= ~PORT_WKDISC_E; 1529 t2 &= ~PORT_WKDISC_E;
1508 } 1530 }
1509 if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
1510 (hcd->speed < HCD_USB3))
1511 t2 &= ~PORT_WAKE_BITS;
1512 } else 1531 } else
1513 t2 &= ~PORT_WAKE_BITS; 1532 t2 &= ~PORT_WAKE_BITS;
1514 1533
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 8071c8fdd15e..76f392954733 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,11 +54,6 @@
54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
56 56
57#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
58#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
59#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
60#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
61
62#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 57#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
63 58
64static const char hcd_name[] = "xhci_hcd"; 59static const char hcd_name[] = "xhci_hcd";
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
142 if (pdev->vendor == PCI_VENDOR_ID_AMD) 137 if (pdev->vendor == PCI_VENDOR_ID_AMD)
143 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 138 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
144 139
145 if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
146 ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
147 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
148 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
149 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
150 xhci->quirks |= XHCI_U2_DISABLE_WAKE;
151
152 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 140 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
153 xhci->quirks |= XHCI_LPM_SUPPORT; 141 xhci->quirks |= XHCI_LPM_SUPPORT;
154 xhci->quirks |= XHCI_INTEL_HOST; 142 xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 163bafde709f..1cb6eaef4ae1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
178 * 2. xhci_plat is child of a device from firmware (dwc3-plat) 178 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
179 * 3. xhci_plat is grandchild of a pci device (dwc3-pci) 179 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
180 */ 180 */
181 sysdev = &pdev->dev; 181 for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
182 if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node) 182 if (is_of_node(sysdev->fwnode) ||
183 sysdev = sysdev->parent; 183 is_acpi_device_node(sysdev->fwnode))
184 break;
184#ifdef CONFIG_PCI 185#ifdef CONFIG_PCI
185 else if (sysdev->parent && sysdev->parent->parent && 186 else if (sysdev->bus == &pci_bus_type)
186 sysdev->parent->parent->bus == &pci_bus_type) 187 break;
187 sysdev = sysdev->parent->parent;
188#endif 188#endif
189 }
190
191 if (!sysdev)
192 sysdev = &pdev->dev;
189 193
190 /* Try to set 64-bit DMA first */ 194 /* Try to set 64-bit DMA first */
191 if (WARN_ON(!sysdev->dma_mask)) 195 if (WARN_ON(!sysdev->dma_mask))
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a9443651ce0f..82c746e2d85c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1309void xhci_cleanup_command_queue(struct xhci_hcd *xhci) 1309void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1310{ 1310{
1311 struct xhci_command *cur_cmd, *tmp_cmd; 1311 struct xhci_command *cur_cmd, *tmp_cmd;
1312 xhci->current_cmd = NULL;
1312 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) 1313 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1313 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); 1314 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1314} 1315}
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2579 (struct xhci_generic_trb *) ep_trb); 2580 (struct xhci_generic_trb *) ep_trb);
2580 2581
2581 /* 2582 /*
2582 * No-op TRB should not trigger interrupts. 2583 * No-op TRB could trigger interrupts in a case where
2583 * If ep_trb is a no-op TRB, it means the 2584 * a URB was killed and a STALL_ERROR happens right
2584 * corresponding TD has been cancelled. Just ignore 2585 * after the endpoint ring stopped. Reset the halted
2585 * the TD. 2586 * endpoint. Otherwise, the endpoint remains stalled
2587 * indefinitely.
2586 */ 2588 */
2587 if (trb_is_noop(ep_trb)) { 2589 if (trb_is_noop(ep_trb)) {
2588 xhci_dbg(xhci, 2590 if (trb_comp_code == COMP_STALL_ERROR ||
2589 "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n", 2591 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2590 slot_id, ep_index); 2592 trb_comp_code))
2593 xhci_cleanup_halted_endpoint(xhci, slot_id,
2594 ep_index,
2595 ep_ring->stream_id,
2596 td, ep_trb,
2597 EP_HARD_RESET);
2591 goto cleanup; 2598 goto cleanup;
2592 } 2599 }
2593 2600
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b2ff1ff1a02f..51535ba2bcd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1703 if (xhci->quirks & XHCI_MTK_HOST) { 1703 if (xhci->quirks & XHCI_MTK_HOST) {
1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); 1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1705 if (ret < 0) { 1705 if (ret < 0) {
1706 xhci_free_endpoint_ring(xhci, virt_dev, ep_index); 1706 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1707 virt_dev->eps[ep_index].new_ring = NULL;
1707 return ret; 1708 return ret;
1708 } 1709 }
1709 } 1710 }
@@ -4804,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4804 */ 4805 */
4805 hcd->has_tt = 1; 4806 hcd->has_tt = 1;
4806 } else { 4807 } else {
4807 if (xhci->sbrn == 0x31) { 4808 /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
4809 if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
4808 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); 4810 xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
4809 hcd->speed = HCD_USB31; 4811 hcd->speed = HCD_USB31;
4810 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 4812 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2abaa4d6d39d..2b48aa4f6b76 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -735,6 +735,8 @@ struct xhci_ep_ctx {
735#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 735#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
736/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ 736/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
737#define EP_HAS_LSA (1 << 15) 737#define EP_HAS_LSA (1 << 15)
738/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
739#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff)
738 740
739/* ep_info2 bitmasks */ 741/* ep_info2 bitmasks */
740/* 742/*
@@ -1681,7 +1683,7 @@ struct xhci_bus_state {
1681 1683
1682static inline unsigned int hcd_index(struct usb_hcd *hcd) 1684static inline unsigned int hcd_index(struct usb_hcd *hcd)
1683{ 1685{
1684 if (hcd->speed == HCD_USB3) 1686 if (hcd->speed >= HCD_USB3)
1685 return 0; 1687 return 0;
1686 else 1688 else
1687 return 1; 1689 return 1;
@@ -1826,7 +1828,7 @@ struct xhci_hcd {
1826/* For controller with a broken Port Disable implementation */ 1828/* For controller with a broken Port Disable implementation */
1827#define XHCI_BROKEN_PORT_PED (1 << 25) 1829#define XHCI_BROKEN_PORT_PED (1 << 25)
1828#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) 1830#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
1829#define XHCI_U2_DISABLE_WAKE (1 << 27) 1831/* Reserved. It was XHCI_U2_DISABLE_WAKE */
1830#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1832#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1831 1833
1832 unsigned int num_active_eps; 1834 unsigned int num_active_eps;
@@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
2540 u8 lsa; 2542 u8 lsa;
2541 u8 hid; 2543 u8 hid;
2542 2544
2543 esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 | 2545 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2544 EP_MAX_ESIT_PAYLOAD_LO(tx_info); 2546 CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2545 2547
2546 ep_state = info & EP_STATE_MASK; 2548 ep_state = info & EP_STATE_MASK;
2547 max_pstr = info & EP_MAXPSTREAMS_MASK; 2549 max_pstr = info & EP_MAXPSTREAMS_MASK;
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index eee82ca55b7b..b3fc602b2e24 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -202,12 +202,13 @@ found:
202 return tmp; 202 return tmp;
203 } 203 }
204 204
205 if (in) { 205 if (in)
206 dev->in_pipe = usb_rcvbulkpipe(udev, 206 dev->in_pipe = usb_rcvbulkpipe(udev,
207 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 207 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
208 if (out)
208 dev->out_pipe = usb_sndbulkpipe(udev, 209 dev->out_pipe = usb_sndbulkpipe(udev,
209 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 210 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
210 } 211
211 if (iso_in) { 212 if (iso_in) {
212 dev->iso_in = &iso_in->desc; 213 dev->iso_in = &iso_in->desc;
213 dev->in_iso_pipe = usb_rcvisocpipe(udev, 214 dev->in_iso_pipe = usb_rcvisocpipe(udev,
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
1964 int status = 0; 1965 int status = 0;
1965 struct urb *urbs[param->sglen]; 1966 struct urb *urbs[param->sglen];
1966 1967
1968 if (!param->sglen || param->iterations > UINT_MAX / param->sglen)
1969 return -EINVAL;
1970
1967 memset(&context, 0, sizeof(context)); 1971 memset(&context, 0, sizeof(context));
1968 context.count = param->iterations * param->sglen; 1972 context.count = param->iterations * param->sglen;
1969 context.dev = dev; 1973 context.dev = dev;
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param)
2087 2091
2088 if (param->iterations <= 0) 2092 if (param->iterations <= 0)
2089 return -EINVAL; 2093 return -EINVAL;
2094 if (param->sglen > MAX_SGLEN)
2095 return -EINVAL;
2090 /* 2096 /*
2091 * Just a bunch of test cases that every HCD is expected to handle. 2097 * Just a bunch of test cases that every HCD is expected to handle.
2092 * 2098 *
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 029692053dd3..ff5a1a8989d5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -906,7 +906,7 @@ b_host:
906 */ 906 */
907 if (int_usb & MUSB_INTR_RESET) { 907 if (int_usb & MUSB_INTR_RESET) {
908 handled = IRQ_HANDLED; 908 handled = IRQ_HANDLED;
909 if (devctl & MUSB_DEVCTL_HM) { 909 if (is_host_active(musb)) {
910 /* 910 /*
911 * When BABBLE happens what we can depends on which 911 * When BABBLE happens what we can depends on which
912 * platform MUSB is running, because some platforms 912 * platform MUSB is running, because some platforms
@@ -916,9 +916,7 @@ b_host:
916 * drop the session. 916 * drop the session.
917 */ 917 */
918 dev_err(musb->controller, "Babble\n"); 918 dev_err(musb->controller, "Babble\n");
919 919 musb_recover_from_babble(musb);
920 if (is_host_active(musb))
921 musb_recover_from_babble(musb);
922 } else { 920 } else {
923 musb_dbg(musb, "BUS RESET as %s", 921 musb_dbg(musb, "BUS RESET as %s",
924 usb_otg_state_string(musb->xceiv->otg->state)); 922 usb_otg_state_string(musb->xceiv->otg->state));
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1861 MUSB_DEVCTL_HR; 1859 MUSB_DEVCTL_HR;
1862 switch (devctl & ~s) { 1860 switch (devctl & ~s) {
1863 case MUSB_QUIRK_B_INVALID_VBUS_91: 1861 case MUSB_QUIRK_B_INVALID_VBUS_91:
1864 if (musb->quirk_retries--) { 1862 if (musb->quirk_retries && !musb->flush_irq_work) {
1865 musb_dbg(musb, 1863 musb_dbg(musb,
1866 "Poll devctl on invalid vbus, assume no session"); 1864 "Poll devctl on invalid vbus, assume no session");
1867 schedule_delayed_work(&musb->irq_work, 1865 schedule_delayed_work(&musb->irq_work,
1868 msecs_to_jiffies(1000)); 1866 msecs_to_jiffies(1000));
1869 1867 musb->quirk_retries--;
1870 return; 1868 return;
1871 } 1869 }
1872 /* fall through */ 1870 /* fall through */
1873 case MUSB_QUIRK_A_DISCONNECT_19: 1871 case MUSB_QUIRK_A_DISCONNECT_19:
1874 if (musb->quirk_retries--) { 1872 if (musb->quirk_retries && !musb->flush_irq_work) {
1875 musb_dbg(musb, 1873 musb_dbg(musb,
1876 "Poll devctl on possible host mode disconnect"); 1874 "Poll devctl on possible host mode disconnect");
1877 schedule_delayed_work(&musb->irq_work, 1875 schedule_delayed_work(&musb->irq_work,
1878 msecs_to_jiffies(1000)); 1876 msecs_to_jiffies(1000));
1879 1877 musb->quirk_retries--;
1880 return; 1878 return;
1881 } 1879 }
1882 if (!musb->session) 1880 if (!musb->session)
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
2681 2679
2682 musb_platform_disable(musb); 2680 musb_platform_disable(musb);
2683 musb_disable_interrupts(musb); 2681 musb_disable_interrupts(musb);
2682
2683 musb->flush_irq_work = true;
2684 while (flush_delayed_work(&musb->irq_work))
2685 ;
2686 musb->flush_irq_work = false;
2687
2684 if (!(musb->io.quirks & MUSB_PRESERVE_SESSION)) 2688 if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
2685 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2689 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2690
2686 WARN_ON(!list_empty(&musb->pending_list)); 2691 WARN_ON(!list_empty(&musb->pending_list));
2687 2692
2688 spin_lock_irqsave(&musb->lock, flags); 2693 spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index c748f4ac1154..20f4614178d9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -428,6 +428,8 @@ struct musb {
428 unsigned test_mode:1; 428 unsigned test_mode:1;
429 unsigned softconnect:1; 429 unsigned softconnect:1;
430 430
431 unsigned flush_irq_work:1;
432
431 u8 address; 433 u8 address;
432 u8 test_mode_nr; 434 u8 test_mode_nr;
433 u16 ackpend; /* ep0 */ 435 u16 ackpend; /* ep0 */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ba255280a624..1ec0a4947b6b 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -26,15 +26,28 @@
26 26
27#define MUSB_DMA_NUM_CHANNELS 15 27#define MUSB_DMA_NUM_CHANNELS 15
28 28
29#define DA8XX_USB_MODE 0x10
30#define DA8XX_USB_AUTOREQ 0x14
31#define DA8XX_USB_TEARDOWN 0x1c
32
33#define DA8XX_DMA_NUM_CHANNELS 4
34
29struct cppi41_dma_controller { 35struct cppi41_dma_controller {
30 struct dma_controller controller; 36 struct dma_controller controller;
31 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; 37 struct cppi41_dma_channel *rx_channel;
32 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; 38 struct cppi41_dma_channel *tx_channel;
33 struct hrtimer early_tx; 39 struct hrtimer early_tx;
34 struct list_head early_tx_list; 40 struct list_head early_tx_list;
35 u32 rx_mode; 41 u32 rx_mode;
36 u32 tx_mode; 42 u32 tx_mode;
37 u32 auto_req; 43 u32 auto_req;
44
45 u32 tdown_reg;
46 u32 autoreq_reg;
47
48 void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
49 unsigned int mode);
50 u8 num_channels;
38}; 51};
39 52
40static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) 53static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
349 } 362 }
350} 363}
351 364
365static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
366 unsigned int mode)
367{
368 struct cppi41_dma_controller *controller = cppi41_channel->controller;
369 struct musb *musb = controller->controller.musb;
370 unsigned int shift;
371 u32 port;
372 u32 new_mode;
373 u32 old_mode;
374
375 old_mode = controller->tx_mode;
376 port = cppi41_channel->port_num;
377
378 shift = (port - 1) * 4;
379 if (!cppi41_channel->is_tx)
380 shift += 16;
381 new_mode = old_mode & ~(3 << shift);
382 new_mode |= mode << shift;
383
384 if (new_mode == old_mode)
385 return;
386 controller->tx_mode = new_mode;
387 musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
388}
389
390
352static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, 391static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
353 unsigned mode) 392 unsigned mode)
354{ 393{
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
364 if (new_mode == old_mode) 403 if (new_mode == old_mode)
365 return; 404 return;
366 controller->auto_req = new_mode; 405 controller->auto_req = new_mode;
367 musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ, 406 musb_writel(controller->controller.musb->ctrl_base,
368 new_mode); 407 controller->autoreq_reg, new_mode);
369} 408}
370 409
371static bool cppi41_configure_channel(struct dma_channel *channel, 410static bool cppi41_configure_channel(struct dma_channel *channel,
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
373 dma_addr_t dma_addr, u32 len) 412 dma_addr_t dma_addr, u32 len)
374{ 413{
375 struct cppi41_dma_channel *cppi41_channel = channel->private_data; 414 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
415 struct cppi41_dma_controller *controller = cppi41_channel->controller;
376 struct dma_chan *dc = cppi41_channel->dc; 416 struct dma_chan *dc = cppi41_channel->dc;
377 struct dma_async_tx_descriptor *dma_desc; 417 struct dma_async_tx_descriptor *dma_desc;
378 enum dma_transfer_direction direction; 418 enum dma_transfer_direction direction;
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
398 musb_writel(musb->ctrl_base, 438 musb_writel(musb->ctrl_base,
399 RNDIS_REG(cppi41_channel->port_num), len); 439 RNDIS_REG(cppi41_channel->port_num), len);
400 /* gen rndis */ 440 /* gen rndis */
401 cppi41_set_dma_mode(cppi41_channel, 441 controller->set_dma_mode(cppi41_channel,
402 EP_MODE_DMA_GEN_RNDIS); 442 EP_MODE_DMA_GEN_RNDIS);
403 443
404 /* auto req */ 444 /* auto req */
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
407 } else { 447 } else {
408 musb_writel(musb->ctrl_base, 448 musb_writel(musb->ctrl_base,
409 RNDIS_REG(cppi41_channel->port_num), 0); 449 RNDIS_REG(cppi41_channel->port_num), 0);
410 cppi41_set_dma_mode(cppi41_channel, 450 controller->set_dma_mode(cppi41_channel,
411 EP_MODE_DMA_TRANSPARENT); 451 EP_MODE_DMA_TRANSPARENT);
412 cppi41_set_autoreq_mode(cppi41_channel, 452 cppi41_set_autoreq_mode(cppi41_channel,
413 EP_MODE_AUTOREQ_NONE); 453 EP_MODE_AUTOREQ_NONE);
414 } 454 }
415 } else { 455 } else {
416 /* fallback mode */ 456 /* fallback mode */
417 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); 457 controller->set_dma_mode(cppi41_channel,
458 EP_MODE_DMA_TRANSPARENT);
418 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 459 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
419 len = min_t(u32, packet_sz, len); 460 len = min_t(u32, packet_sz, len);
420 } 461 }
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
445 struct cppi41_dma_channel *cppi41_channel = NULL; 486 struct cppi41_dma_channel *cppi41_channel = NULL;
446 u8 ch_num = hw_ep->epnum - 1; 487 u8 ch_num = hw_ep->epnum - 1;
447 488
448 if (ch_num >= MUSB_DMA_NUM_CHANNELS) 489 if (ch_num >= controller->num_channels)
449 return NULL; 490 return NULL;
450 491
451 if (is_tx) 492 if (is_tx)
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
581 622
582 do { 623 do {
583 if (is_tx) 624 if (is_tx)
584 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 625 musb_writel(musb->ctrl_base, controller->tdown_reg,
626 tdbit);
585 ret = dmaengine_terminate_all(cppi41_channel->dc); 627 ret = dmaengine_terminate_all(cppi41_channel->dc);
586 } while (ret == -EAGAIN); 628 } while (ret == -EAGAIN);
587 629
588 if (is_tx) { 630 if (is_tx) {
589 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); 631 musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
590 632
591 csr = musb_readw(epio, MUSB_TXCSR); 633 csr = musb_readw(epio, MUSB_TXCSR);
592 if (csr & MUSB_TXCSR_TXPKTRDY) { 634 if (csr & MUSB_TXCSR_TXPKTRDY) {
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
604 struct dma_chan *dc; 646 struct dma_chan *dc;
605 int i; 647 int i;
606 648
607 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { 649 for (i = 0; i < ctrl->num_channels; i++) {
608 dc = ctrl->tx_channel[i].dc; 650 dc = ctrl->tx_channel[i].dc;
609 if (dc) 651 if (dc)
610 dma_release_channel(dc); 652 dma_release_channel(dc);
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
656 goto err; 698 goto err;
657 699
658 ret = -EINVAL; 700 ret = -EINVAL;
659 if (port > MUSB_DMA_NUM_CHANNELS || !port) 701 if (port > controller->num_channels || !port)
660 goto err; 702 goto err;
661 if (is_tx) 703 if (is_tx)
662 cppi41_channel = &controller->tx_channel[port - 1]; 704 cppi41_channel = &controller->tx_channel[port - 1];
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
697 739
698 hrtimer_cancel(&controller->early_tx); 740 hrtimer_cancel(&controller->early_tx);
699 cppi41_dma_controller_stop(controller); 741 cppi41_dma_controller_stop(controller);
742 kfree(controller->rx_channel);
743 kfree(controller->tx_channel);
700 kfree(controller); 744 kfree(controller);
701} 745}
702EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy); 746EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
@@ -705,6 +749,7 @@ struct dma_controller *
705cppi41_dma_controller_create(struct musb *musb, void __iomem *base) 749cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
706{ 750{
707 struct cppi41_dma_controller *controller; 751 struct cppi41_dma_controller *controller;
752 int channel_size;
708 int ret = 0; 753 int ret = 0;
709 754
710 if (!musb->controller->parent->of_node) { 755 if (!musb->controller->parent->of_node) {
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
727 controller->controller.is_compatible = cppi41_is_compatible; 772 controller->controller.is_compatible = cppi41_is_compatible;
728 controller->controller.musb = musb; 773 controller->controller.musb = musb;
729 774
775 if (musb->io.quirks & MUSB_DA8XX) {
776 controller->tdown_reg = DA8XX_USB_TEARDOWN;
777 controller->autoreq_reg = DA8XX_USB_AUTOREQ;
778 controller->set_dma_mode = da8xx_set_dma_mode;
779 controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
780 } else {
781 controller->tdown_reg = USB_TDOWN;
782 controller->autoreq_reg = USB_CTRL_AUTOREQ;
783 controller->set_dma_mode = cppi41_set_dma_mode;
784 controller->num_channels = MUSB_DMA_NUM_CHANNELS;
785 }
786
787 channel_size = controller->num_channels *
788 sizeof(struct cppi41_dma_channel);
789 controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
790 if (!controller->rx_channel)
791 goto rx_channel_alloc_fail;
792 controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
793 if (!controller->tx_channel)
794 goto tx_channel_alloc_fail;
795
730 ret = cppi41_dma_controller_start(controller); 796 ret = cppi41_dma_controller_start(controller);
731 if (ret) 797 if (ret)
732 goto plat_get_fail; 798 goto plat_get_fail;
733 return &controller->controller; 799 return &controller->controller;
734 800
735plat_get_fail: 801plat_get_fail:
802 kfree(controller->tx_channel);
803tx_channel_alloc_fail:
804 kfree(controller->rx_channel);
805rx_channel_alloc_fail:
736 kfree(controller); 806 kfree(controller);
737kzalloc_fail: 807kzalloc_fail:
738 if (ret == -EPROBE_DEFER) 808 if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c9a09b5bb6e5..dc353e24d53c 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
297 if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) 297 if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
298 sunxi_sram_release(musb->controller->parent); 298 sunxi_sram_release(musb->controller->parent);
299 299
300 devm_usb_put_phy(glue->dev, glue->xceiv);
301
300 return 0; 302 return 0;
301} 303}
302 304
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 5fe4a5704bde..ccc2bf5274b4 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
329 unsigned long val; 329 unsigned long val;
330 void __iomem *base = phy->regs; 330 void __iomem *base = phy->regs;
331 331
332 /*
333 * The USB driver may have already initiated the phy clock
334 * disable so wait to see if the clock turns off and if not
335 * then proceed with gating the clock.
336 */
337 if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0)
338 return;
339
332 if (phy->is_legacy_phy) { 340 if (phy->is_legacy_phy) {
333 val = readl(base + USB_SUSP_CTRL); 341 val = readl(base + USB_SUSP_CTRL);
334 val |= USB_SUSP_SET; 342 val |= USB_SUSP_SET;
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
351 unsigned long val; 359 unsigned long val;
352 void __iomem *base = phy->regs; 360 void __iomem *base = phy->regs;
353 361
362 /*
363 * The USB driver may have already initiated the phy clock
364 * enable so wait to see if the clock turns on and if not
365 * then proceed with ungating the clock.
366 */
367 if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
368 USB_PHY_CLK_VALID) == 0)
369 return;
370
354 if (phy->is_legacy_phy) { 371 if (phy->is_legacy_phy) {
355 val = readl(base + USB_SUSP_CTRL); 372 val = readl(base + USB_SUSP_CTRL);
356 val |= USB_SUSP_CLR; 373 val |= USB_SUSP_CLR;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index d1af831f43eb..50285b01da92 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
282 struct usbhs_fifo *fifo) 282 struct usbhs_fifo *fifo)
283{ 283{
284 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 284 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
285 int ret = 0;
285 286
286 if (!usbhs_pipe_is_dcp(pipe)) 287 if (!usbhs_pipe_is_dcp(pipe)) {
287 usbhsf_fifo_barrier(priv, fifo); 288 /*
289 * This driver checks the pipe condition first to avoid -EBUSY
290 * from usbhsf_fifo_barrier() with about 10 msec delay in
291 * the interrupt handler if the pipe is RX direction and empty.
292 */
293 if (usbhs_pipe_is_dir_in(pipe))
294 ret = usbhs_pipe_is_accessible(pipe);
295 if (!ret)
296 ret = usbhsf_fifo_barrier(priv, fifo);
297 }
288 298
289 usbhs_write(priv, fifo->ctr, BCLR); 299 /*
300 * if non-DCP pipe, this driver should set BCLR when
301 * usbhsf_fifo_barrier() returns 0.
302 */
303 if (!ret)
304 usbhs_write(priv, fifo->ctr, BCLR);
290} 305}
291 306
292static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, 307static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
@@ -842,9 +857,9 @@ static void xfer_work(struct work_struct *work)
842 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); 857 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
843 858
844 usbhs_pipe_running(pipe, 1); 859 usbhs_pipe_running(pipe, 1);
845 usbhsf_dma_start(pipe, fifo);
846 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); 860 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
847 dma_async_issue_pending(chan); 861 dma_async_issue_pending(chan);
862 usbhsf_dma_start(pipe, fifo);
848 usbhs_pipe_enable(pipe); 863 usbhs_pipe_enable(pipe);
849 864
850xfer_work_end: 865xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index fdf89800ebc3..43a862a90a77 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
186 tty_kref_put(tty); 186 tty_kref_put(tty);
187 reset_open_count: 187 reset_open_count:
188 port->port.count = 0; 188 port->port.count = 0;
189 info->port = NULL;
189 usb_autopm_put_interface(serial->interface); 190 usb_autopm_put_interface(serial->interface);
190 error_get_interface: 191 error_get_interface:
191 usb_serial_put(serial); 192 usb_serial_put(serial);
@@ -265,7 +266,7 @@ static struct console usbcons = {
265 266
266void usb_serial_console_disconnect(struct usb_serial *serial) 267void usb_serial_console_disconnect(struct usb_serial *serial)
267{ 268{
268 if (serial->port[0] == usbcons_info.port) { 269 if (serial->port[0] && serial->port[0] == usbcons_info.port) {
269 usb_serial_console_exit(); 270 usb_serial_console_exit();
270 usb_serial_put(serial); 271 usb_serial_put(serial);
271 } 272 }
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2d945c9f975c..412f812522ee 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = {
177 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 177 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
178 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 178 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
179 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 179 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
180 { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
180 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ 181 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
181 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ 182 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
182 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ 183 { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
352#define CP210X_PARTNUM_CP2104 0x04 353#define CP210X_PARTNUM_CP2104 0x04
353#define CP210X_PARTNUM_CP2105 0x05 354#define CP210X_PARTNUM_CP2105 0x05
354#define CP210X_PARTNUM_CP2108 0x08 355#define CP210X_PARTNUM_CP2108 0x08
356#define CP210X_PARTNUM_UNKNOWN 0xFF
355 357
356/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ 358/* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
357struct cp210x_comm_status { 359struct cp210x_comm_status {
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial)
1491 result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, 1493 result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
1492 CP210X_GET_PARTNUM, &priv->partnum, 1494 CP210X_GET_PARTNUM, &priv->partnum,
1493 sizeof(priv->partnum)); 1495 sizeof(priv->partnum));
1494 if (result < 0) 1496 if (result < 0) {
1495 goto err_free_priv; 1497 dev_warn(&serial->interface->dev,
1498 "querying part number failed\n");
1499 priv->partnum = CP210X_PARTNUM_UNKNOWN;
1500 }
1496 1501
1497 usb_set_serial_data(serial, priv); 1502 usb_set_serial_data(serial, priv);
1498 1503
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial)
1505 } 1510 }
1506 1511
1507 return 0; 1512 return 0;
1508err_free_priv:
1509 kfree(priv);
1510
1511 return result;
1512} 1513}
1513 1514
1514static void cp210x_disconnect(struct usb_serial *serial) 1515static void cp210x_disconnect(struct usb_serial *serial)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1cec03799cdf..49d1b2d4606d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
1015 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, 1015 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1016 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), 1016 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1017 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 1017 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1018 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1019 { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1018 { } /* Terminating entry */ 1020 { } /* Terminating entry */
1019}; 1021};
1020 1022
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1cecb6d7..f9d15bd62785 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
610#define ADI_GNICEPLUS_PID 0xF001 610#define ADI_GNICEPLUS_PID 0xF001
611 611
612/* 612/*
613 * Cypress WICED USB UART
614 */
615#define CYPRESS_VID 0x04B4
616#define CYPRESS_WICED_BT_USB_PID 0x009B
617#define CYPRESS_WICED_WL_USB_PID 0xF900
618
619/*
613 * Microchip Technology, Inc. 620 * Microchip Technology, Inc.
614 * 621 *
615 * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are 622 * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index cc84da8dbb84..14511d6a7d44 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
45static const struct usb_device_id id_table[] = { 45static const struct usb_device_id id_table[] = {
46 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) }, 46 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
47 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, 47 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
48 { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
48 { }, /* Terminating entry. */ 49 { }, /* Terminating entry. */
49}; 50};
50MODULE_DEVICE_TABLE(usb, id_table); 51MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 54bfef13966a..ba672cf4e888 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
522 522
523/* TP-LINK Incorporated products */ 523/* TP-LINK Incorporated products */
524#define TPLINK_VENDOR_ID 0x2357 524#define TPLINK_VENDOR_ID 0x2357
525#define TPLINK_PRODUCT_LTE 0x000D
525#define TPLINK_PRODUCT_MA180 0x0201 526#define TPLINK_PRODUCT_MA180 0x0201
526 527
527/* Changhong products */ 528/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
2011 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, 2012 { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
2012 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, 2013 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
2013 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, 2014 { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
2015 { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
2014 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), 2016 { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
2015 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2017 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2016 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ 2018 { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index ebc0beea69d6..eb9928963a53 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
174 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 174 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
175 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ 175 {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
176 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ 176 {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
177 {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
178 {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
179 {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
180 {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
177 181
178 /* Huawei devices */ 182 /* Huawei devices */
179 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 183 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1a59f335b063..a3ccb899df60 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -834,13 +834,25 @@ Retry_Sense:
834 if (result == USB_STOR_TRANSPORT_GOOD) { 834 if (result == USB_STOR_TRANSPORT_GOOD) {
835 srb->result = SAM_STAT_GOOD; 835 srb->result = SAM_STAT_GOOD;
836 srb->sense_buffer[0] = 0x0; 836 srb->sense_buffer[0] = 0x0;
837 }
838
839 /*
840 * ATA-passthru commands use sense data to report
841 * the command completion status, and often devices
842 * return Check Condition status when nothing is
843 * wrong.
844 */
845 else if (srb->cmnd[0] == ATA_16 ||
846 srb->cmnd[0] == ATA_12) {
847 /* leave the data alone */
848 }
837 849
838 /* 850 /*
839 * If there was a problem, report an unspecified 851 * If there was a problem, report an unspecified
840 * hardware error to prevent the higher layers from 852 * hardware error to prevent the higher layers from
841 * entering an infinite retry loop. 853 * entering an infinite retry loop.
842 */ 854 */
843 } else { 855 else {
844 srb->result = DID_ERROR << 16; 856 srb->result = DID_ERROR << 16;
845 if ((sshdr.response_code & 0x72) == 0x72) 857 if ((sshdr.response_code & 0x72) == 0x72)
846 srb->sense_buffer[1] = HARDWARE_ERROR; 858 srb->sense_buffer[1] = HARDWARE_ERROR;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index f58caa9e6a27..a155cd02bce2 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
9 intf->desc.bInterfaceProtocol == USB_PR_UAS); 9 intf->desc.bInterfaceProtocol == USB_PR_UAS);
10} 10}
11 11
12static int uas_find_uas_alt_setting(struct usb_interface *intf) 12static struct usb_host_interface *uas_find_uas_alt_setting(
13 struct usb_interface *intf)
13{ 14{
14 int i; 15 int i;
15 16
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
17 struct usb_host_interface *alt = &intf->altsetting[i]; 18 struct usb_host_interface *alt = &intf->altsetting[i];
18 19
19 if (uas_is_interface(alt)) 20 if (uas_is_interface(alt))
20 return alt->desc.bAlternateSetting; 21 return alt;
21 } 22 }
22 23
23 return -ENODEV; 24 return NULL;
24} 25}
25 26
26static int uas_find_endpoints(struct usb_host_interface *alt, 27static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
58 struct usb_device *udev = interface_to_usbdev(intf); 59 struct usb_device *udev = interface_to_usbdev(intf);
59 struct usb_hcd *hcd = bus_to_hcd(udev->bus); 60 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
60 unsigned long flags = id->driver_info; 61 unsigned long flags = id->driver_info;
61 int r, alt; 62 struct usb_host_interface *alt;
62 63 int r;
63 64
64 alt = uas_find_uas_alt_setting(intf); 65 alt = uas_find_uas_alt_setting(intf);
65 if (alt < 0) 66 if (!alt)
66 return 0; 67 return 0;
67 68
68 r = uas_find_endpoints(&intf->altsetting[alt], eps); 69 r = uas_find_endpoints(alt, eps);
69 if (r < 0) 70 if (r < 0)
70 return 0; 71 return 0;
71 72
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index cfb1e3bbd434..63cf981ed81c 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
873static int uas_switch_interface(struct usb_device *udev, 873static int uas_switch_interface(struct usb_device *udev,
874 struct usb_interface *intf) 874 struct usb_interface *intf)
875{ 875{
876 int alt; 876 struct usb_host_interface *alt;
877 877
878 alt = uas_find_uas_alt_setting(intf); 878 alt = uas_find_uas_alt_setting(intf);
879 if (alt < 0) 879 if (!alt)
880 return alt; 880 return -ENODEV;
881 881
882 return usb_set_interface(udev, 882 return usb_set_interface(udev, alt->desc.bInterfaceNumber,
883 intf->altsetting[0].desc.bInterfaceNumber, alt); 883 alt->desc.bAlternateSetting);
884} 884}
885 885
886static int uas_configure_endpoints(struct uas_dev_info *devinfo) 886static int uas_configure_endpoints(struct uas_dev_info *devinfo)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 5a70c33ef0e0..eb06d88b41d6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
1459 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1459 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1460 US_FL_SANE_SENSE ), 1460 US_FL_SANE_SENSE ),
1461 1461
1462/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
1463UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
1464 "Seagate",
1465 "External",
1466 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1467 US_FL_NO_WP_DETECT ),
1468
1462UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, 1469UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
1463 "Maxtor", 1470 "Maxtor",
1464 "USB to SATA", 1471 "USB to SATA",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 35a1e777b449..9a53912bdfe9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
825 825
826 if (iface->cur_altsetting->desc.bNumEndpoints < 1) 826 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
827 return -ENODEV; 827 return -ENODEV;
828 if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
829 return -ENODEV;
828 830
829 result = -ENOMEM; 831 result = -ENOMEM;
830 uwb_rc = uwb_rc_alloc(); 832 uwb_rc = uwb_rc_alloc();
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index 01c20a260a8b..39dd4ef53c77 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -302,18 +302,22 @@ static int uwbd(void *param)
302/** Start the UWB daemon */ 302/** Start the UWB daemon */
303void uwbd_start(struct uwb_rc *rc) 303void uwbd_start(struct uwb_rc *rc)
304{ 304{
305 rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); 305 struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
306 if (rc->uwbd.task == NULL) 306 if (IS_ERR(task)) {
307 rc->uwbd.task = NULL;
307 printk(KERN_ERR "UWB: Cannot start management daemon; " 308 printk(KERN_ERR "UWB: Cannot start management daemon; "
308 "UWB won't work\n"); 309 "UWB won't work\n");
309 else 310 } else {
311 rc->uwbd.task = task;
310 rc->uwbd.pid = rc->uwbd.task->pid; 312 rc->uwbd.pid = rc->uwbd.task->pid;
313 }
311} 314}
312 315
313/* Stop the UWB daemon and free any unprocessed events */ 316/* Stop the UWB daemon and free any unprocessed events */
314void uwbd_stop(struct uwb_rc *rc) 317void uwbd_stop(struct uwb_rc *rc)
315{ 318{
316 kthread_stop(rc->uwbd.task); 319 if (rc->uwbd.task)
320 kthread_stop(rc->uwbd.task);
317 uwbd_flush(rc); 321 uwbd_flush(rc);
318} 322}
319 323
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 82360594fa8e..57efbd3b053b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1024 mutex_unlock(&priv->lock); 1024 mutex_unlock(&priv->lock);
1025 1025
1026 if (use_ptemod) { 1026 if (use_ptemod) {
1027 map->pages_vm_start = vma->vm_start;
1027 err = apply_to_page_range(vma->vm_mm, vma->vm_start, 1028 err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1028 vma->vm_end - vma->vm_start, 1029 vma->vm_end - vma->vm_start,
1029 find_grant_ptes, map); 1030 find_grant_ptes, map);
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
1061 set_grant_ptes_as_special, NULL); 1062 set_grant_ptes_as_special, NULL);
1062 } 1063 }
1063#endif 1064#endif
1064 map->pages_vm_start = vma->vm_start;
1065 } 1065 }
1066 1066
1067 return 0; 1067 return 0;
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e89136ab851e..b437fccd4e62 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
57static void watch_target(struct xenbus_watch *watch, 57static void watch_target(struct xenbus_watch *watch,
58 const char *path, const char *token) 58 const char *path, const char *token)
59{ 59{
60 unsigned long long new_target; 60 unsigned long long new_target, static_max;
61 int err; 61 int err;
62 static bool watch_fired; 62 static bool watch_fired;
63 static long target_diff; 63 static long target_diff;
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
72 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. 72 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
73 */ 73 */
74 new_target >>= PAGE_SHIFT - 10; 74 new_target >>= PAGE_SHIFT - 10;
75 if (watch_fired) { 75
76 balloon_set_new_target(new_target - target_diff); 76 if (!watch_fired) {
77 return; 77 watch_fired = true;
78 err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
79 &static_max);
80 if (err != 1)
81 static_max = new_target;
82 else
83 static_max >>= PAGE_SHIFT - 10;
84 target_diff = xen_pv_domain() ? 0
85 : static_max - balloon_stats.target_pages;
78 } 86 }
79 87
80 watch_fired = true; 88 balloon_set_new_target(new_target - target_diff);
81 target_diff = new_target - balloon_stats.target_pages;
82} 89}
83static struct xenbus_watch target_watch = { 90static struct xenbus_watch target_watch = {
84 .node = "memory/target", 91 .node = "memory/target",
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 5fbfd9cfb6d6..5b3d57fc82d3 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
169static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) 169static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
170{ 170{
171 struct pci_bar_info *bar = data; 171 struct pci_bar_info *bar = data;
172 unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4;
173 const struct resource *res = dev->resource;
174 u32 mask;
172 175
173 if (unlikely(!bar)) { 176 if (unlikely(!bar)) {
174 pr_warn(DRV_NAME ": driver data not found for %s\n", 177 pr_warn(DRV_NAME ": driver data not found for %s\n",
@@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
179 /* A write to obtain the length must happen as a 32-bit write. 182 /* A write to obtain the length must happen as a 32-bit write.
180 * This does not (yet) support writing individual bytes 183 * This does not (yet) support writing individual bytes
181 */ 184 */
182 if (value == ~0) 185 if (res[pos].flags & IORESOURCE_IO)
186 mask = ~PCI_BASE_ADDRESS_IO_MASK;
187 else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64))
188 mask = 0;
189 else
190 mask = ~PCI_BASE_ADDRESS_MEM_MASK;
191 if ((value | mask) == ~0U)
183 bar->which = 1; 192 bar->which = 1;
184 else { 193 else {
185 u32 tmpval; 194 u32 tmpval;
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adaf6f6dd858..e1cbdfdb7c68 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
310 310
311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); 311 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
312 312
313 if (unlikely(copied < len && !PageUptodate(page))) { 313 if (!PageUptodate(page)) {
314 copied = 0; 314 if (unlikely(copied < len)) {
315 goto out; 315 copied = 0;
316 goto out;
317 } else if (len == PAGE_SIZE) {
318 SetPageUptodate(page);
319 }
316 } 320 }
317 /* 321 /*
318 * No need to use i_size_read() here, the i_size 322 * No need to use i_size_read() here, the i_size
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index ce7181ea60fa..a7c5a9861bef 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -54,7 +54,7 @@ typedef struct {
54 int size; /* size of magic/mask */ 54 int size; /* size of magic/mask */
55 char *magic; /* magic or filename extension */ 55 char *magic; /* magic or filename extension */
56 char *mask; /* mask, NULL for exact match */ 56 char *mask; /* mask, NULL for exact match */
57 char *interpreter; /* filename of interpreter */ 57 const char *interpreter; /* filename of interpreter */
58 char *name; 58 char *name;
59 struct dentry *dentry; 59 struct dentry *dentry;
60 struct file *interp_file; 60 struct file *interp_file;
@@ -131,27 +131,26 @@ static int load_misc_binary(struct linux_binprm *bprm)
131{ 131{
132 Node *fmt; 132 Node *fmt;
133 struct file *interp_file = NULL; 133 struct file *interp_file = NULL;
134 char iname[BINPRM_BUF_SIZE];
135 const char *iname_addr = iname;
136 int retval; 134 int retval;
137 int fd_binary = -1; 135 int fd_binary = -1;
138 136
139 retval = -ENOEXEC; 137 retval = -ENOEXEC;
140 if (!enabled) 138 if (!enabled)
141 goto ret; 139 return retval;
142 140
143 /* to keep locking time low, we copy the interpreter string */ 141 /* to keep locking time low, we copy the interpreter string */
144 read_lock(&entries_lock); 142 read_lock(&entries_lock);
145 fmt = check_file(bprm); 143 fmt = check_file(bprm);
146 if (fmt) 144 if (fmt)
147 strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE); 145 dget(fmt->dentry);
148 read_unlock(&entries_lock); 146 read_unlock(&entries_lock);
149 if (!fmt) 147 if (!fmt)
150 goto ret; 148 return retval;
151 149
152 /* Need to be able to load the file after exec */ 150 /* Need to be able to load the file after exec */
151 retval = -ENOENT;
153 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) 152 if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
154 return -ENOENT; 153 goto ret;
155 154
156 if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { 155 if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) {
157 retval = remove_arg_zero(bprm); 156 retval = remove_arg_zero(bprm);
@@ -195,22 +194,22 @@ static int load_misc_binary(struct linux_binprm *bprm)
195 bprm->argc++; 194 bprm->argc++;
196 195
197 /* add the interp as argv[0] */ 196 /* add the interp as argv[0] */
198 retval = copy_strings_kernel(1, &iname_addr, bprm); 197 retval = copy_strings_kernel(1, &fmt->interpreter, bprm);
199 if (retval < 0) 198 if (retval < 0)
200 goto error; 199 goto error;
201 bprm->argc++; 200 bprm->argc++;
202 201
203 /* Update interp in case binfmt_script needs it. */ 202 /* Update interp in case binfmt_script needs it. */
204 retval = bprm_change_interp(iname, bprm); 203 retval = bprm_change_interp(fmt->interpreter, bprm);
205 if (retval < 0) 204 if (retval < 0)
206 goto error; 205 goto error;
207 206
208 if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) { 207 if (fmt->flags & MISC_FMT_OPEN_FILE) {
209 interp_file = filp_clone_open(fmt->interp_file); 208 interp_file = filp_clone_open(fmt->interp_file);
210 if (!IS_ERR(interp_file)) 209 if (!IS_ERR(interp_file))
211 deny_write_access(interp_file); 210 deny_write_access(interp_file);
212 } else { 211 } else {
213 interp_file = open_exec(iname); 212 interp_file = open_exec(fmt->interpreter);
214 } 213 }
215 retval = PTR_ERR(interp_file); 214 retval = PTR_ERR(interp_file);
216 if (IS_ERR(interp_file)) 215 if (IS_ERR(interp_file))
@@ -238,6 +237,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
238 goto error; 237 goto error;
239 238
240ret: 239ret:
240 dput(fmt->dentry);
241 return retval; 241 return retval;
242error: 242error:
243 if (fd_binary > 0) 243 if (fd_binary > 0)
@@ -594,8 +594,13 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
594 594
595static void bm_evict_inode(struct inode *inode) 595static void bm_evict_inode(struct inode *inode)
596{ 596{
597 Node *e = inode->i_private;
598
599 if (e && e->flags & MISC_FMT_OPEN_FILE)
600 filp_close(e->interp_file, NULL);
601
597 clear_inode(inode); 602 clear_inode(inode);
598 kfree(inode->i_private); 603 kfree(e);
599} 604}
600 605
601static void kill_node(Node *e) 606static void kill_node(Node *e)
@@ -603,24 +608,14 @@ static void kill_node(Node *e)
603 struct dentry *dentry; 608 struct dentry *dentry;
604 609
605 write_lock(&entries_lock); 610 write_lock(&entries_lock);
606 dentry = e->dentry; 611 list_del_init(&e->list);
607 if (dentry) {
608 list_del_init(&e->list);
609 e->dentry = NULL;
610 }
611 write_unlock(&entries_lock); 612 write_unlock(&entries_lock);
612 613
613 if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) { 614 dentry = e->dentry;
614 filp_close(e->interp_file, NULL); 615 drop_nlink(d_inode(dentry));
615 e->interp_file = NULL; 616 d_drop(dentry);
616 } 617 dput(dentry);
617 618 simple_release_fs(&bm_mnt, &entry_count);
618 if (dentry) {
619 drop_nlink(d_inode(dentry));
620 d_drop(dentry);
621 dput(dentry);
622 simple_release_fs(&bm_mnt, &entry_count);
623 }
624} 619}
625 620
626/* /<entry> */ 621/* /<entry> */
@@ -665,7 +660,8 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
665 root = file_inode(file)->i_sb->s_root; 660 root = file_inode(file)->i_sb->s_root;
666 inode_lock(d_inode(root)); 661 inode_lock(d_inode(root));
667 662
668 kill_node(e); 663 if (!list_empty(&e->list))
664 kill_node(e);
669 665
670 inode_unlock(d_inode(root)); 666 inode_unlock(d_inode(root));
671 break; 667 break;
@@ -794,7 +790,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
794 inode_lock(d_inode(root)); 790 inode_lock(d_inode(root));
795 791
796 while (!list_empty(&entries)) 792 while (!list_empty(&entries))
797 kill_node(list_entry(entries.next, Node, list)); 793 kill_node(list_first_entry(&entries, Node, list));
798 794
799 inode_unlock(d_inode(root)); 795 inode_unlock(d_inode(root));
800 break; 796 break;
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index afdf4e3cafc2..7cde3f46ad26 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -19,7 +19,6 @@ static int load_script(struct linux_binprm *bprm)
19 const char *i_arg, *i_name; 19 const char *i_arg, *i_name;
20 char *cp; 20 char *cp;
21 struct file *file; 21 struct file *file;
22 char interp[BINPRM_BUF_SIZE];
23 int retval; 22 int retval;
24 23
25 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) 24 if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
@@ -55,7 +54,7 @@ static int load_script(struct linux_binprm *bprm)
55 break; 54 break;
56 } 55 }
57 for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); 56 for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
58 if (*cp == '\0') 57 if (*cp == '\0')
59 return -ENOEXEC; /* No interpreter name found */ 58 return -ENOEXEC; /* No interpreter name found */
60 i_name = cp; 59 i_name = cp;
61 i_arg = NULL; 60 i_arg = NULL;
@@ -65,7 +64,6 @@ static int load_script(struct linux_binprm *bprm)
65 *cp++ = '\0'; 64 *cp++ = '\0';
66 if (*cp) 65 if (*cp)
67 i_arg = cp; 66 i_arg = cp;
68 strcpy (interp, i_name);
69 /* 67 /*
70 * OK, we've parsed out the interpreter name and 68 * OK, we've parsed out the interpreter name and
71 * (optional) argument. 69 * (optional) argument.
@@ -80,24 +78,27 @@ static int load_script(struct linux_binprm *bprm)
80 if (retval) 78 if (retval)
81 return retval; 79 return retval;
82 retval = copy_strings_kernel(1, &bprm->interp, bprm); 80 retval = copy_strings_kernel(1, &bprm->interp, bprm);
83 if (retval < 0) return retval; 81 if (retval < 0)
82 return retval;
84 bprm->argc++; 83 bprm->argc++;
85 if (i_arg) { 84 if (i_arg) {
86 retval = copy_strings_kernel(1, &i_arg, bprm); 85 retval = copy_strings_kernel(1, &i_arg, bprm);
87 if (retval < 0) return retval; 86 if (retval < 0)
87 return retval;
88 bprm->argc++; 88 bprm->argc++;
89 } 89 }
90 retval = copy_strings_kernel(1, &i_name, bprm); 90 retval = copy_strings_kernel(1, &i_name, bprm);
91 if (retval) return retval; 91 if (retval)
92 return retval;
92 bprm->argc++; 93 bprm->argc++;
93 retval = bprm_change_interp(interp, bprm); 94 retval = bprm_change_interp(i_name, bprm);
94 if (retval < 0) 95 if (retval < 0)
95 return retval; 96 return retval;
96 97
97 /* 98 /*
98 * OK, now restart the process with the interpreter's dentry. 99 * OK, now restart the process with the interpreter's dentry.
99 */ 100 */
100 file = open_exec(interp); 101 file = open_exec(i_name);
101 if (IS_ERR(file)) 102 if (IS_ERR(file))
102 return PTR_ERR(file); 103 return PTR_ERR(file);
103 104
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 93d088ffc05c..789f55e851ae 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
716 716
717 set_page_writeback(page); 717 set_page_writeback(page);
718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); 718 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
719 if (result) 719 if (result) {
720 end_page_writeback(page); 720 end_page_writeback(page);
721 else 721 } else {
722 clean_page_buffers(page);
722 unlock_page(page); 723 unlock_page(page);
724 }
723 blk_queue_exit(bdev->bd_queue); 725 blk_queue_exit(bdev->bd_queue);
724 return result; 726 return result;
725} 727}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b51d23f5cafa..280384bf34f1 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -107,7 +107,8 @@ static void end_compressed_bio_read(struct bio *bio)
107 struct inode *inode; 107 struct inode *inode;
108 struct page *page; 108 struct page *page;
109 unsigned long index; 109 unsigned long index;
110 int ret; 110 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
111 int ret = 0;
111 112
112 if (bio->bi_status) 113 if (bio->bi_status)
113 cb->errors = 1; 114 cb->errors = 1;
@@ -118,6 +119,21 @@ static void end_compressed_bio_read(struct bio *bio)
118 if (!refcount_dec_and_test(&cb->pending_bios)) 119 if (!refcount_dec_and_test(&cb->pending_bios))
119 goto out; 120 goto out;
120 121
122 /*
123 * Record the correct mirror_num in cb->orig_bio so that
124 * read-repair can work properly.
125 */
126 ASSERT(btrfs_io_bio(cb->orig_bio));
127 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
128 cb->mirror_num = mirror;
129
130 /*
131 * Some IO in this cb have failed, just skip checksum as there
132 * is no way it could be correct.
133 */
134 if (cb->errors == 1)
135 goto csum_failed;
136
121 inode = cb->inode; 137 inode = cb->inode;
122 ret = check_compressed_csum(BTRFS_I(inode), cb, 138 ret = check_compressed_csum(BTRFS_I(inode), cb,
123 (u64)bio->bi_iter.bi_sector << 9); 139 (u64)bio->bi_iter.bi_sector << 9);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 5a8933da39a7..8fc690384c58 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -709,7 +709,6 @@ struct btrfs_delayed_root;
709#define BTRFS_FS_OPEN 5 709#define BTRFS_FS_OPEN 5
710#define BTRFS_FS_QUOTA_ENABLED 6 710#define BTRFS_FS_QUOTA_ENABLED 6
711#define BTRFS_FS_QUOTA_ENABLING 7 711#define BTRFS_FS_QUOTA_ENABLING 7
712#define BTRFS_FS_QUOTA_DISABLING 8
713#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9 712#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
714#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10 713#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
715#define BTRFS_FS_BTREE_ERR 11 714#define BTRFS_FS_BTREE_ERR 11
@@ -723,7 +722,7 @@ struct btrfs_delayed_root;
723 * Indicate that a whole-filesystem exclusive operation is running 722 * Indicate that a whole-filesystem exclusive operation is running
724 * (device replace, resize, device add/delete, balance) 723 * (device replace, resize, device add/delete, balance)
725 */ 724 */
726#define BTRFS_FS_EXCL_OP 14 725#define BTRFS_FS_EXCL_OP 16
727 726
728struct btrfs_fs_info { 727struct btrfs_fs_info {
729 u8 fsid[BTRFS_FSID_SIZE]; 728 u8 fsid[BTRFS_FSID_SIZE];
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 487bbe4fb3c6..dfdab849037b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3643,7 +3643,14 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
3643 u64 flags; 3643 u64 flags;
3644 3644
3645 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); 3645 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
3646 backup_super_roots(fs_info); 3646
3647 /*
3648 * max_mirrors == 0 indicates we're from commit_transaction,
3649 * not from fsync where the tree roots in fs_info have not
3650 * been consistent on disk.
3651 */
3652 if (max_mirrors == 0)
3653 backup_super_roots(fs_info);
3647 3654
3648 sb = fs_info->super_for_commit; 3655 sb = fs_info->super_for_commit;
3649 dev_item = &sb->dev_item; 3656 dev_item = &sb->dev_item;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3e5bb0cdd3cd..970190cd347e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
2801 } 2801 }
2802 } 2802 }
2803 2803
2804 bio = btrfs_bio_alloc(bdev, sector << 9); 2804 bio = btrfs_bio_alloc(bdev, (u64)sector << 9);
2805 bio_add_page(bio, page, page_size, offset); 2805 bio_add_page(bio, page, page_size, offset);
2806 bio->bi_end_io = end_io_func; 2806 bio->bi_end_io = end_io_func;
2807 bio->bi_private = tree; 2807 bio->bi_private = tree;
@@ -3471,8 +3471,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
3471 unsigned int write_flags = 0; 3471 unsigned int write_flags = 0;
3472 unsigned long nr_written = 0; 3472 unsigned long nr_written = 0;
3473 3473
3474 if (wbc->sync_mode == WB_SYNC_ALL) 3474 write_flags = wbc_to_write_flags(wbc);
3475 write_flags = REQ_SYNC;
3476 3475
3477 trace___extent_writepage(page, inode, wbc); 3476 trace___extent_writepage(page, inode, wbc);
3478 3477
@@ -3718,7 +3717,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3718 unsigned long i, num_pages; 3717 unsigned long i, num_pages;
3719 unsigned long bio_flags = 0; 3718 unsigned long bio_flags = 0;
3720 unsigned long start, end; 3719 unsigned long start, end;
3721 unsigned int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META; 3720 unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META;
3722 int ret = 0; 3721 int ret = 0;
3723 3722
3724 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); 3723 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -4063,9 +4062,6 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
4063 if (epd->bio) { 4062 if (epd->bio) {
4064 int ret; 4063 int ret;
4065 4064
4066 bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
4067 epd->sync_io ? REQ_SYNC : 0);
4068
4069 ret = submit_one_bio(epd->bio, 0, epd->bio_flags); 4065 ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
4070 BUG_ON(ret < 0); /* -ENOMEM */ 4066 BUG_ON(ret < 0); /* -ENOMEM */
4071 epd->bio = NULL; 4067 epd->bio = NULL;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 128f3e58634f..d94e3f68b9b1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -135,6 +135,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
135 const u64 offset, 135 const u64 offset,
136 const u64 bytes) 136 const u64 bytes)
137{ 137{
138 unsigned long index = offset >> PAGE_SHIFT;
139 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
140 struct page *page;
141
142 while (index <= end_index) {
143 page = find_get_page(inode->i_mapping, index);
144 index++;
145 if (!page)
146 continue;
147 ClearPagePrivate2(page);
148 put_page(page);
149 }
138 return __endio_write_update_ordered(inode, offset + PAGE_SIZE, 150 return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
139 bytes - PAGE_SIZE, false); 151 bytes - PAGE_SIZE, false);
140} 152}
@@ -8357,11 +8369,8 @@ static void btrfs_endio_direct_read(struct bio *bio)
8357 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 8369 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8358 blk_status_t err = bio->bi_status; 8370 blk_status_t err = bio->bi_status;
8359 8371
8360 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) { 8372 if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
8361 err = btrfs_subio_endio_read(inode, io_bio, err); 8373 err = btrfs_subio_endio_read(inode, io_bio, err);
8362 if (!err)
8363 bio->bi_status = 0;
8364 }
8365 8374
8366 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 8375 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
8367 dip->logical_offset + dip->bytes - 1); 8376 dip->logical_offset + dip->bytes - 1);
@@ -8369,7 +8378,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
8369 8378
8370 kfree(dip); 8379 kfree(dip);
8371 8380
8372 dio_bio->bi_status = bio->bi_status; 8381 dio_bio->bi_status = err;
8373 dio_end_io(dio_bio); 8382 dio_end_io(dio_bio);
8374 8383
8375 if (io_bio->end_io) 8384 if (io_bio->end_io)
@@ -8387,6 +8396,7 @@ static void __endio_write_update_ordered(struct inode *inode,
8387 btrfs_work_func_t func; 8396 btrfs_work_func_t func;
8388 u64 ordered_offset = offset; 8397 u64 ordered_offset = offset;
8389 u64 ordered_bytes = bytes; 8398 u64 ordered_bytes = bytes;
8399 u64 last_offset;
8390 int ret; 8400 int ret;
8391 8401
8392 if (btrfs_is_free_space_inode(BTRFS_I(inode))) { 8402 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
@@ -8398,6 +8408,7 @@ static void __endio_write_update_ordered(struct inode *inode,
8398 } 8408 }
8399 8409
8400again: 8410again:
8411 last_offset = ordered_offset;
8401 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 8412 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
8402 &ordered_offset, 8413 &ordered_offset,
8403 ordered_bytes, 8414 ordered_bytes,
@@ -8409,6 +8420,12 @@ again:
8409 btrfs_queue_work(wq, &ordered->work); 8420 btrfs_queue_work(wq, &ordered->work);
8410out_test: 8421out_test:
8411 /* 8422 /*
8423 * If btrfs_dec_test_ordered_pending does not find any ordered extent
8424 * in the range, we can exit.
8425 */
8426 if (ordered_offset == last_offset)
8427 return;
8428 /*
8412 * our bio might span multiple ordered extents. If we haven't 8429 * our bio might span multiple ordered extents. If we haven't
8413 * completed the accounting for the whole dio, go back and try again 8430 * completed the accounting for the whole dio, go back and try again
8414 */ 8431 */
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d6715c2bcdc4..6c7a49faf4e0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2773,9 +2773,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
2773 } 2773 }
2774 mutex_unlock(&fs_devices->device_list_mutex); 2774 mutex_unlock(&fs_devices->device_list_mutex);
2775 2775
2776 fi_args->nodesize = fs_info->super_copy->nodesize; 2776 fi_args->nodesize = fs_info->nodesize;
2777 fi_args->sectorsize = fs_info->super_copy->sectorsize; 2777 fi_args->sectorsize = fs_info->sectorsize;
2778 fi_args->clone_alignment = fs_info->super_copy->sectorsize; 2778 fi_args->clone_alignment = fs_info->sectorsize;
2779 2779
2780 if (copy_to_user(arg, fi_args, sizeof(*fi_args))) 2780 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2781 ret = -EFAULT; 2781 ret = -EFAULT;
@@ -3032,7 +3032,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
3032out: 3032out:
3033 if (ret) 3033 if (ret)
3034 btrfs_cmp_data_free(cmp); 3034 btrfs_cmp_data_free(cmp);
3035 return 0; 3035 return ret;
3036} 3036}
3037 3037
3038static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp) 3038static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
@@ -4061,6 +4061,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
4061 ret = PTR_ERR(new_root); 4061 ret = PTR_ERR(new_root);
4062 goto out; 4062 goto out;
4063 } 4063 }
4064 if (!is_fstree(new_root->objectid)) {
4065 ret = -ENOENT;
4066 goto out;
4067 }
4064 4068
4065 path = btrfs_alloc_path(); 4069 path = btrfs_alloc_path();
4066 if (!path) { 4070 if (!path) {
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5c8b61c86e61..e172d4843eae 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -807,7 +807,6 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
807 } 807 }
808 ret = 0; 808 ret = 0;
809out: 809out:
810 set_bit(BTRFS_FS_QUOTA_DISABLING, &root->fs_info->flags);
811 btrfs_free_path(path); 810 btrfs_free_path(path);
812 return ret; 811 return ret;
813} 812}
@@ -953,7 +952,6 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
953 if (!fs_info->quota_root) 952 if (!fs_info->quota_root)
954 goto out; 953 goto out;
955 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 954 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
956 set_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags);
957 btrfs_qgroup_wait_for_completion(fs_info, false); 955 btrfs_qgroup_wait_for_completion(fs_info, false);
958 spin_lock(&fs_info->qgroup_lock); 956 spin_lock(&fs_info->qgroup_lock);
959 quota_root = fs_info->quota_root; 957 quota_root = fs_info->quota_root;
@@ -1307,6 +1305,8 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
1307 } 1305 }
1308 } 1306 }
1309 ret = del_qgroup_item(trans, quota_root, qgroupid); 1307 ret = del_qgroup_item(trans, quota_root, qgroupid);
1308 if (ret && ret != -ENOENT)
1309 goto out;
1310 1310
1311 while (!list_empty(&qgroup->groups)) { 1311 while (!list_empty(&qgroup->groups)) {
1312 list = list_first_entry(&qgroup->groups, 1312 list = list_first_entry(&qgroup->groups,
@@ -2086,8 +2086,6 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
2086 2086
2087 if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags)) 2087 if (test_and_clear_bit(BTRFS_FS_QUOTA_ENABLING, &fs_info->flags))
2088 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); 2088 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2089 if (test_and_clear_bit(BTRFS_FS_QUOTA_DISABLING, &fs_info->flags))
2090 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
2091 2089
2092 spin_lock(&fs_info->qgroup_lock); 2090 spin_lock(&fs_info->qgroup_lock);
2093 while (!list_empty(&fs_info->dirty_qgroups)) { 2091 while (!list_empty(&fs_info->dirty_qgroups)) {
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 3a49a3c2fca4..9841faef08ea 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2400,11 +2400,11 @@ void free_reloc_roots(struct list_head *list)
2400 while (!list_empty(list)) { 2400 while (!list_empty(list)) {
2401 reloc_root = list_entry(list->next, struct btrfs_root, 2401 reloc_root = list_entry(list->next, struct btrfs_root,
2402 root_list); 2402 root_list);
2403 __del_reloc_root(reloc_root);
2403 free_extent_buffer(reloc_root->node); 2404 free_extent_buffer(reloc_root->node);
2404 free_extent_buffer(reloc_root->commit_root); 2405 free_extent_buffer(reloc_root->commit_root);
2405 reloc_root->node = NULL; 2406 reloc_root->node = NULL;
2406 reloc_root->commit_root = NULL; 2407 reloc_root->commit_root = NULL;
2407 __del_reloc_root(reloc_root);
2408 } 2408 }
2409} 2409}
2410 2410
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 32b043ef8ac9..8fd195cfe81b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2630,7 +2630,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
2630 } else { 2630 } else {
2631 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2631 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2632 (int)(mode & S_IFMT)); 2632 (int)(mode & S_IFMT));
2633 ret = -ENOTSUPP; 2633 ret = -EOPNOTSUPP;
2634 goto out; 2634 goto out;
2635 } 2635 }
2636 2636
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 35a128acfbd1..161694b66038 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL 1135#ifdef CONFIG_BTRFS_FS_POSIX_ACL
1136 sb->s_flags |= MS_POSIXACL; 1136 sb->s_flags |= MS_POSIXACL;
1137#endif 1137#endif
1138 sb->s_flags |= MS_I_VERSION; 1138 sb->s_flags |= SB_I_VERSION;
1139 sb->s_iflags |= SB_I_CGROUPWB; 1139 sb->s_iflags |= SB_I_CGROUPWB;
1140 1140
1141 err = super_setup_bdi(sb); 1141 err = super_setup_bdi(sb);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index ad7f4bab640b..c800d067fcbf 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4181,6 +4181,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4181 struct extent_map *em, *n; 4181 struct extent_map *em, *n;
4182 struct list_head extents; 4182 struct list_head extents;
4183 struct extent_map_tree *tree = &inode->extent_tree; 4183 struct extent_map_tree *tree = &inode->extent_tree;
4184 u64 logged_start, logged_end;
4184 u64 test_gen; 4185 u64 test_gen;
4185 int ret = 0; 4186 int ret = 0;
4186 int num = 0; 4187 int num = 0;
@@ -4190,10 +4191,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4190 down_write(&inode->dio_sem); 4191 down_write(&inode->dio_sem);
4191 write_lock(&tree->lock); 4192 write_lock(&tree->lock);
4192 test_gen = root->fs_info->last_trans_committed; 4193 test_gen = root->fs_info->last_trans_committed;
4194 logged_start = start;
4195 logged_end = end;
4193 4196
4194 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4197 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4195 list_del_init(&em->list); 4198 list_del_init(&em->list);
4196
4197 /* 4199 /*
4198 * Just an arbitrary number, this can be really CPU intensive 4200 * Just an arbitrary number, this can be really CPU intensive
4199 * once we start getting a lot of extents, and really once we 4201 * once we start getting a lot of extents, and really once we
@@ -4208,6 +4210,12 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4208 4210
4209 if (em->generation <= test_gen) 4211 if (em->generation <= test_gen)
4210 continue; 4212 continue;
4213
4214 if (em->start < logged_start)
4215 logged_start = em->start;
4216 if ((em->start + em->len - 1) > logged_end)
4217 logged_end = em->start + em->len - 1;
4218
4211 /* Need a ref to keep it from getting evicted from cache */ 4219 /* Need a ref to keep it from getting evicted from cache */
4212 refcount_inc(&em->refs); 4220 refcount_inc(&em->refs);
4213 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4221 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
@@ -4216,7 +4224,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4216 } 4224 }
4217 4225
4218 list_sort(NULL, &extents, extent_cmp); 4226 list_sort(NULL, &extents, extent_cmp);
4219 btrfs_get_logged_extents(inode, logged_list, start, end); 4227 btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
4220 /* 4228 /*
4221 * Some ordered extents started by fsync might have completed 4229 * Some ordered extents started by fsync might have completed
4222 * before we could collect them into the list logged_list, which 4230 * before we could collect them into the list logged_list, which
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0e8f16c305df..b39737568c22 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6166,7 +6166,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6166 map_length = length; 6166 map_length = length;
6167 6167
6168 btrfs_bio_counter_inc_blocked(fs_info); 6168 btrfs_bio_counter_inc_blocked(fs_info);
6169 ret = __btrfs_map_block(fs_info, bio_op(bio), logical, 6169 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6170 &map_length, &bbio, mirror_num, 1); 6170 &map_length, &bbio, mirror_num, 1);
6171 if (ret) { 6171 if (ret) {
6172 btrfs_bio_counter_dec(fs_info); 6172 btrfs_bio_counter_dec(fs_info);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 157fe59fbabe..1978a8cb1cb1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1991,6 +1991,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
1991retry: 1991retry:
1992 spin_lock(&ci->i_ceph_lock); 1992 spin_lock(&ci->i_ceph_lock);
1993 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1993 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1994 spin_unlock(&ci->i_ceph_lock);
1994 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1995 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1995 goto out; 1996 goto out;
1996 } 1997 }
@@ -2008,8 +2009,10 @@ retry:
2008 mutex_lock(&session->s_mutex); 2009 mutex_lock(&session->s_mutex);
2009 goto retry; 2010 goto retry;
2010 } 2011 }
2011 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) 2012 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
2013 spin_unlock(&ci->i_ceph_lock);
2012 goto out; 2014 goto out;
2015 }
2013 2016
2014 flushing = __mark_caps_flushing(inode, session, true, 2017 flushing = __mark_caps_flushing(inode, session, true,
2015 &flush_tid, &oldest_flush_tid); 2018 &flush_tid, &oldest_flush_tid);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 84edfc60d87a..f23c820daaed 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -734,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
734 inode = req->r_inode; 734 inode = req->r_inode;
735 ihold(inode); 735 ihold(inode);
736 } else { 736 } else {
737 /* req->r_dentry is non-null for LSSNAP request. 737 /* req->r_dentry is non-null for LSSNAP request */
738 * fall-thru */ 738 rcu_read_lock();
739 WARN_ON_ONCE(!req->r_dentry); 739 inode = get_nonsnap_parent(req->r_dentry);
740 rcu_read_unlock();
741 dout("__choose_mds using snapdir's parent %p\n", inode);
740 } 742 }
741 } 743 } else if (req->r_dentry) {
742 if (!inode && req->r_dentry) {
743 /* ignore race with rename; old or new d_parent is okay */ 744 /* ignore race with rename; old or new d_parent is okay */
744 struct dentry *parent; 745 struct dentry *parent;
745 struct inode *dir; 746 struct inode *dir;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 1ffc8b426c1c..7fc0b850c352 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
374 realm->ino, realm, snapc, snapc->seq, 374 realm->ino, realm, snapc, snapc->seq,
375 (unsigned int) snapc->num_snaps); 375 (unsigned int) snapc->num_snaps);
376 376
377 if (realm->cached_context) { 377 ceph_put_snap_context(realm->cached_context);
378 ceph_put_snap_context(realm->cached_context);
379 /* queue realm for cap_snap creation */
380 list_add_tail(&realm->dirty_item, dirty_realms);
381 }
382 realm->cached_context = snapc; 378 realm->cached_context = snapc;
379 /* queue realm for cap_snap creation */
380 list_add_tail(&realm->dirty_item, dirty_realms);
383 return 0; 381 return 0;
384 382
385fail: 383fail:
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f7243617316c..d5b2e12b5d02 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -5,9 +5,14 @@ config CIFS
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_MD4 6 select CRYPTO_MD4
7 select CRYPTO_MD5 7 select CRYPTO_MD5
8 select CRYPTO_SHA256
9 select CRYPTO_CMAC
8 select CRYPTO_HMAC 10 select CRYPTO_HMAC
9 select CRYPTO_ARC4 11 select CRYPTO_ARC4
12 select CRYPTO_AEAD2
13 select CRYPTO_CCM
10 select CRYPTO_ECB 14 select CRYPTO_ECB
15 select CRYPTO_AES
11 select CRYPTO_DES 16 select CRYPTO_DES
12 help 17 help
13 This is the client VFS module for the SMB3 family of NAS protocols, 18 This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de5b2e1fcce5..e185b2853eab 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,7 +661,9 @@ struct TCP_Server_Info {
661#endif 661#endif
662 unsigned int max_read; 662 unsigned int max_read;
663 unsigned int max_write; 663 unsigned int max_write;
664 __u8 preauth_hash[512]; 664#ifdef CONFIG_CIFS_SMB311
665 __u8 preauth_sha_hash[64]; /* save initital negprot hash */
666#endif /* 3.1.1 */
665 struct delayed_work reconnect; /* reconnect workqueue job */ 667 struct delayed_work reconnect; /* reconnect workqueue job */
666 struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ 668 struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
667 unsigned long echo_interval; 669 unsigned long echo_interval;
@@ -849,7 +851,9 @@ struct cifs_ses {
849 __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; 851 __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
850 __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; 852 __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
851 __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; 853 __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
852 __u8 preauth_hash[512]; 854#ifdef CONFIG_CIFS_SMB311
855 __u8 preauth_sha_hash[64];
856#endif /* 3.1.1 */
853}; 857};
854 858
855static inline bool 859static inline bool
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e702d48bd023..81ba6e0d88d8 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
204 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 204 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
205 int i; 205 int i;
206 206
207 if (unlikely(direntry->d_name.len > 207 if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
208 direntry->d_name.len >
208 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) 209 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
209 return -ENAMETOOLONG; 210 return -ENAMETOOLONG;
210 211
@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
520 521
521 rc = check_name(direntry, tcon); 522 rc = check_name(direntry, tcon);
522 if (rc) 523 if (rc)
523 goto out_free_xid; 524 goto out;
524 525
525 server = tcon->ses->server; 526 server = tcon->ses->server;
526 527
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 7ca9808a0daa..62c88dfed57b 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
214 {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"}, 214 {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
215 {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, 215 {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
216 {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, 216 {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
217 {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"}, 217 {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
218 {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, 218 {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
219 {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, 219 {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
220 {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, 220 {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0dafdbae1f8c..bdb963d0ba32 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -522,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
522 struct cifs_open_parms oparms; 522 struct cifs_open_parms oparms;
523 struct cifs_fid fid; 523 struct cifs_fid fid;
524 struct smb2_file_full_ea_info *smb2_data; 524 struct smb2_file_full_ea_info *smb2_data;
525 int ea_buf_size = SMB2_MIN_EA_BUF;
525 526
526 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 527 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
527 if (!utf16_path) 528 if (!utf16_path)
@@ -541,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
541 return rc; 542 return rc;
542 } 543 }
543 544
544 smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL); 545 while (1) {
545 if (smb2_data == NULL) { 546 smb2_data = kzalloc(ea_buf_size, GFP_KERNEL);
546 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 547 if (smb2_data == NULL) {
547 return -ENOMEM; 548 SMB2_close(xid, tcon, fid.persistent_fid,
549 fid.volatile_fid);
550 return -ENOMEM;
551 }
552
553 rc = SMB2_query_eas(xid, tcon, fid.persistent_fid,
554 fid.volatile_fid,
555 ea_buf_size, smb2_data);
556
557 if (rc != -E2BIG)
558 break;
559
560 kfree(smb2_data);
561 ea_buf_size <<= 1;
562
563 if (ea_buf_size > SMB2_MAX_EA_BUF) {
564 cifs_dbg(VFS, "EA size is too large\n");
565 SMB2_close(xid, tcon, fid.persistent_fid,
566 fid.volatile_fid);
567 return -ENOMEM;
568 }
548 } 569 }
549 570
550 rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid,
551 smb2_data);
552 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 571 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
553 572
554 if (!rc) 573 if (!rc)
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 6f0e6343c15e..5331631386a2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -648,7 +648,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
648{ 648{
649 int rc = 0; 649 int rc = 0;
650 struct validate_negotiate_info_req vneg_inbuf; 650 struct validate_negotiate_info_req vneg_inbuf;
651 struct validate_negotiate_info_rsp *pneg_rsp; 651 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
652 u32 rsplen; 652 u32 rsplen;
653 u32 inbuflen; /* max of 4 dialects */ 653 u32 inbuflen; /* max of 4 dialects */
654 654
@@ -727,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
727 rsplen); 727 rsplen);
728 728
729 /* relax check since Mac returns max bufsize allowed on ioctl */ 729 /* relax check since Mac returns max bufsize allowed on ioctl */
730 if (rsplen > CIFSMaxBufSize) 730 if ((rsplen > CIFSMaxBufSize)
731 return -EIO; 731 || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
732 goto err_rsp_free;
732 } 733 }
733 734
734 /* check validate negotiate info response matches what we got earlier */ 735 /* check validate negotiate info response matches what we got earlier */
@@ -747,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
747 748
748 /* validate negotiate successful */ 749 /* validate negotiate successful */
749 cifs_dbg(FYI, "validate negotiate info successful\n"); 750 cifs_dbg(FYI, "validate negotiate info successful\n");
751 kfree(pneg_rsp);
750 return 0; 752 return 0;
751 753
752vneg_out: 754vneg_out:
753 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); 755 cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
756err_rsp_free:
757 kfree(pneg_rsp);
754 return -EIO; 758 return -EIO;
755} 759}
756 760
@@ -1255,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1255 struct smb2_tree_connect_req *req; 1259 struct smb2_tree_connect_req *req;
1256 struct smb2_tree_connect_rsp *rsp = NULL; 1260 struct smb2_tree_connect_rsp *rsp = NULL;
1257 struct kvec iov[2]; 1261 struct kvec iov[2];
1258 struct kvec rsp_iov; 1262 struct kvec rsp_iov = { NULL, 0 };
1259 int rc = 0; 1263 int rc = 0;
1260 int resp_buftype; 1264 int resp_buftype;
1261 int unc_path_len; 1265 int unc_path_len;
@@ -1372,7 +1376,7 @@ tcon_exit:
1372 return rc; 1376 return rc;
1373 1377
1374tcon_error_exit: 1378tcon_error_exit:
1375 if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { 1379 if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
1376 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); 1380 cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1377 } 1381 }
1378 goto tcon_exit; 1382 goto tcon_exit;
@@ -1975,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
1975 } else 1979 } else
1976 iov[0].iov_len = get_rfc1002_length(req) + 4; 1980 iov[0].iov_len = get_rfc1002_length(req) + 4;
1977 1981
1982 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
1983 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
1984 req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1978 1985
1979 rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); 1986 rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
1980 cifs_small_buf_release(req); 1987 cifs_small_buf_release(req);
@@ -2191,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
2191 req->PersistentFileId = persistent_fid; 2198 req->PersistentFileId = persistent_fid;
2192 req->VolatileFileId = volatile_fid; 2199 req->VolatileFileId = volatile_fid;
2193 req->AdditionalInformation = cpu_to_le32(additional_info); 2200 req->AdditionalInformation = cpu_to_le32(additional_info);
2194 /* 4 for rfc1002 length field and 1 for Buffer */ 2201
2195 req->InputBufferOffset = 2202 /*
2196 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); 2203 * We do not use the input buffer (do not send extra byte)
2204 */
2205 req->InputBufferOffset = 0;
2206 inc_rfc1001_len(req, -1);
2207
2197 req->OutputBufferLength = cpu_to_le32(output_len); 2208 req->OutputBufferLength = cpu_to_le32(output_len);
2198 2209
2199 iov[0].iov_base = (char *)req; 2210 iov[0].iov_base = (char *)req;
@@ -2233,12 +2244,12 @@ qinf_exit:
2233} 2244}
2234 2245
2235int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, 2246int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
2236 u64 persistent_fid, u64 volatile_fid, 2247 u64 persistent_fid, u64 volatile_fid,
2237 struct smb2_file_full_ea_info *data) 2248 int ea_buf_size, struct smb2_file_full_ea_info *data)
2238{ 2249{
2239 return query_info(xid, tcon, persistent_fid, volatile_fid, 2250 return query_info(xid, tcon, persistent_fid, volatile_fid,
2240 FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, 2251 FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
2241 SMB2_MAX_EA_BUF, 2252 ea_buf_size,
2242 sizeof(struct smb2_file_full_ea_info), 2253 sizeof(struct smb2_file_full_ea_info),
2243 (void **)&data, 2254 (void **)&data,
2244 NULL); 2255 NULL);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6c9653a130c8..c2ec934be968 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -832,7 +832,7 @@ struct smb2_flush_rsp {
832/* Channel field for read and write: exactly one of following flags can be set*/ 832/* Channel field for read and write: exactly one of following flags can be set*/
833#define SMB2_CHANNEL_NONE 0x00000000 833#define SMB2_CHANNEL_NONE 0x00000000
834#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ 834#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
835#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ 835#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
836 836
837/* SMB2 read request without RFC1001 length at the beginning */ 837/* SMB2 read request without RFC1001 length at the beginning */
838struct smb2_read_plain_req { 838struct smb2_read_plain_req {
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
1178 char FileName[0]; /* Name to be assigned to new link */ 1178 char FileName[0]; /* Name to be assigned to new link */
1179} __packed; /* level 11 Set */ 1179} __packed; /* level 11 Set */
1180 1180
1181#define SMB2_MAX_EA_BUF 2048 1181#define SMB2_MIN_EA_BUF 2048
1182#define SMB2_MAX_EA_BUF 65536
1182 1183
1183struct smb2_file_full_ea_info { /* encoding of response for level 15 */ 1184struct smb2_file_full_ea_info { /* encoding of response for level 15 */
1184 __le32 next_entry_offset; 1185 __le32 next_entry_offset;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 003217099ef3..e9ab5227e7a8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
134 u64 persistent_file_id, u64 volatile_file_id); 134 u64 persistent_file_id, u64 volatile_file_id);
135extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, 135extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
136 u64 persistent_file_id, u64 volatile_file_id, 136 u64 persistent_file_id, u64 volatile_file_id,
137 int ea_buf_size,
137 struct smb2_file_full_ea_info *data); 138 struct smb2_file_full_ea_info *data);
138extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, 139extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
139 u64 persistent_file_id, u64 volatile_file_id, 140 u64 persistent_file_id, u64 volatile_file_id,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 67367cf1f8cd..99493946e2f9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
390 return generate_smb3signingkey(ses, &triplet); 390 return generate_smb3signingkey(ses, &triplet);
391} 391}
392 392
393#ifdef CONFIG_CIFS_SMB311
393int 394int
394generate_smb311signingkey(struct cifs_ses *ses) 395generate_smb311signingkey(struct cifs_ses *ses)
395 396
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
398 struct derivation *d; 399 struct derivation *d;
399 400
400 d = &triplet.signing; 401 d = &triplet.signing;
401 d->label.iov_base = "SMB2AESCMAC"; 402 d->label.iov_base = "SMBSigningKey";
402 d->label.iov_len = 12; 403 d->label.iov_len = 14;
403 d->context.iov_base = "SmbSign"; 404 d->context.iov_base = ses->preauth_sha_hash;
404 d->context.iov_len = 8; 405 d->context.iov_len = 64;
405 406
406 d = &triplet.encryption; 407 d = &triplet.encryption;
407 d->label.iov_base = "SMB2AESCCM"; 408 d->label.iov_base = "SMBC2SCipherKey";
408 d->label.iov_len = 11; 409 d->label.iov_len = 16;
409 d->context.iov_base = "ServerIn "; 410 d->context.iov_base = ses->preauth_sha_hash;
410 d->context.iov_len = 10; 411 d->context.iov_len = 64;
411 412
412 d = &triplet.decryption; 413 d = &triplet.decryption;
413 d->label.iov_base = "SMB2AESCCM"; 414 d->label.iov_base = "SMBS2CCipherKey";
414 d->label.iov_len = 11; 415 d->label.iov_len = 16;
415 d->context.iov_base = "ServerOut"; 416 d->context.iov_base = ses->preauth_sha_hash;
416 d->context.iov_len = 10; 417 d->context.iov_len = 64;
417 418
418 return generate_smb3signingkey(ses, &triplet); 419 return generate_smb3signingkey(ses, &triplet);
419} 420}
421#endif /* 311 */
420 422
421int 423int
422smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) 424smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588c7ac3..8e704d12a1cf 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
109 goto out; 109 goto out;
110 } 110 }
111 ukp = user_key_payload_locked(keyring_key); 111 ukp = user_key_payload_locked(keyring_key);
112 if (!ukp) {
113 /* key was revoked before we acquired its semaphore */
114 res = -EKEYREVOKED;
115 goto out;
116 }
112 if (ukp->datalen != sizeof(struct fscrypt_key)) { 117 if (ukp->datalen != sizeof(struct fscrypt_key)) {
113 res = -EINVAL; 118 res = -EINVAL;
114 goto out; 119 goto out;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 5fa2211e49ae..b53e66d9abd7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -45,6 +45,12 @@
45#define DIO_PAGES 64 45#define DIO_PAGES 64
46 46
47/* 47/*
48 * Flags for dio_complete()
49 */
50#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
51#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
52
53/*
48 * This code generally works in units of "dio_blocks". A dio_block is 54 * This code generally works in units of "dio_blocks". A dio_block is
49 * somewhere between the hard sector size and the filesystem block size. it 55 * somewhere between the hard sector size and the filesystem block size. it
50 * is determined on a per-invocation basis. When talking to the filesystem 56 * is determined on a per-invocation basis. When talking to the filesystem
@@ -225,10 +231,11 @@ static inline struct page *dio_get_page(struct dio *dio,
225 * filesystems can use it to hold additional state between get_block calls and 231 * filesystems can use it to hold additional state between get_block calls and
226 * dio_complete. 232 * dio_complete.
227 */ 233 */
228static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) 234static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
229{ 235{
230 loff_t offset = dio->iocb->ki_pos; 236 loff_t offset = dio->iocb->ki_pos;
231 ssize_t transferred = 0; 237 ssize_t transferred = 0;
238 int err;
232 239
233 /* 240 /*
234 * AIO submission can race with bio completion to get here while 241 * AIO submission can race with bio completion to get here while
@@ -259,18 +266,37 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
259 ret = transferred; 266 ret = transferred;
260 267
261 if (dio->end_io) { 268 if (dio->end_io) {
262 int err;
263
264 // XXX: ki_pos?? 269 // XXX: ki_pos??
265 err = dio->end_io(dio->iocb, offset, ret, dio->private); 270 err = dio->end_io(dio->iocb, offset, ret, dio->private);
266 if (err) 271 if (err)
267 ret = err; 272 ret = err;
268 } 273 }
269 274
275 /*
276 * Try again to invalidate clean pages which might have been cached by
277 * non-direct readahead, or faulted in by get_user_pages() if the source
278 * of the write was an mmap'ed region of the file we're writing. Either
279 * one is a pretty crazy thing to do, so we don't support it 100%. If
280 * this invalidation fails, tough, the write still worked...
281 *
282 * And this page cache invalidation has to be after dio->end_io(), as
283 * some filesystems convert unwritten extents to real allocations in
284 * end_io() when necessary, otherwise a racing buffer read would cache
285 * zeros from unwritten extents.
286 */
287 if (flags & DIO_COMPLETE_INVALIDATE &&
288 ret > 0 && dio->op == REQ_OP_WRITE &&
289 dio->inode->i_mapping->nrpages) {
290 err = invalidate_inode_pages2_range(dio->inode->i_mapping,
291 offset >> PAGE_SHIFT,
292 (offset + ret - 1) >> PAGE_SHIFT);
293 WARN_ON_ONCE(err);
294 }
295
270 if (!(dio->flags & DIO_SKIP_DIO_COUNT)) 296 if (!(dio->flags & DIO_SKIP_DIO_COUNT))
271 inode_dio_end(dio->inode); 297 inode_dio_end(dio->inode);
272 298
273 if (is_async) { 299 if (flags & DIO_COMPLETE_ASYNC) {
274 /* 300 /*
275 * generic_write_sync expects ki_pos to have been updated 301 * generic_write_sync expects ki_pos to have been updated
276 * already, but the submission path only does this for 302 * already, but the submission path only does this for
@@ -291,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
291{ 317{
292 struct dio *dio = container_of(work, struct dio, complete_work); 318 struct dio *dio = container_of(work, struct dio, complete_work);
293 319
294 dio_complete(dio, 0, true); 320 dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
295} 321}
296 322
297static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); 323static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -304,6 +330,7 @@ static void dio_bio_end_aio(struct bio *bio)
304 struct dio *dio = bio->bi_private; 330 struct dio *dio = bio->bi_private;
305 unsigned long remaining; 331 unsigned long remaining;
306 unsigned long flags; 332 unsigned long flags;
333 bool defer_completion = false;
307 334
308 /* cleanup the bio */ 335 /* cleanup the bio */
309 dio_bio_complete(dio, bio); 336 dio_bio_complete(dio, bio);
@@ -315,12 +342,24 @@ static void dio_bio_end_aio(struct bio *bio)
315 spin_unlock_irqrestore(&dio->bio_lock, flags); 342 spin_unlock_irqrestore(&dio->bio_lock, flags);
316 343
317 if (remaining == 0) { 344 if (remaining == 0) {
318 if (dio->result && dio->defer_completion) { 345 /*
346 * Defer completion when defer_completion is set or
347 * when the inode has pages mapped and this is AIO write.
348 * We need to invalidate those pages because there is a
349 * chance they contain stale data in the case buffered IO
350 * went in between AIO submission and completion into the
351 * same region.
352 */
353 if (dio->result)
354 defer_completion = dio->defer_completion ||
355 (dio->op == REQ_OP_WRITE &&
356 dio->inode->i_mapping->nrpages);
357 if (defer_completion) {
319 INIT_WORK(&dio->complete_work, dio_aio_complete_work); 358 INIT_WORK(&dio->complete_work, dio_aio_complete_work);
320 queue_work(dio->inode->i_sb->s_dio_done_wq, 359 queue_work(dio->inode->i_sb->s_dio_done_wq,
321 &dio->complete_work); 360 &dio->complete_work);
322 } else { 361 } else {
323 dio_complete(dio, 0, true); 362 dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
324 } 363 }
325 } 364 }
326} 365}
@@ -838,7 +877,8 @@ out:
838 */ 877 */
839 if (sdio->boundary) { 878 if (sdio->boundary) {
840 ret = dio_send_cur_page(dio, sdio, map_bh); 879 ret = dio_send_cur_page(dio, sdio, map_bh);
841 dio_bio_submit(dio, sdio); 880 if (sdio->bio)
881 dio_bio_submit(dio, sdio);
842 put_page(sdio->cur_page); 882 put_page(sdio->cur_page);
843 sdio->cur_page = NULL; 883 sdio->cur_page = NULL;
844 } 884 }
@@ -1210,10 +1250,19 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1210 * For AIO O_(D)SYNC writes we need to defer completions to a workqueue 1250 * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
1211 * so that we can call ->fsync. 1251 * so that we can call ->fsync.
1212 */ 1252 */
1213 if (dio->is_async && iov_iter_rw(iter) == WRITE && 1253 if (dio->is_async && iov_iter_rw(iter) == WRITE) {
1214 ((iocb->ki_filp->f_flags & O_DSYNC) || 1254 retval = 0;
1215 IS_SYNC(iocb->ki_filp->f_mapping->host))) { 1255 if ((iocb->ki_filp->f_flags & O_DSYNC) ||
1216 retval = dio_set_defer_completion(dio); 1256 IS_SYNC(iocb->ki_filp->f_mapping->host))
1257 retval = dio_set_defer_completion(dio);
1258 else if (!dio->inode->i_sb->s_dio_done_wq) {
1259 /*
1260 * In case of AIO write racing with buffered read we
1261 * need to defer completion. We can't decide this now,
1262 * however the workqueue needs to be initialized here.
1263 */
1264 retval = sb_init_dio_done_wq(dio->inode->i_sb);
1265 }
1217 if (retval) { 1266 if (retval) {
1218 /* 1267 /*
1219 * We grab i_mutex only for reads so we don't have 1268 * We grab i_mutex only for reads so we don't have
@@ -1322,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
1322 dio_await_completion(dio); 1371 dio_await_completion(dio);
1323 1372
1324 if (drop_refcount(dio) == 0) { 1373 if (drop_refcount(dio) == 0) {
1325 retval = dio_complete(dio, retval, false); 1374 retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
1326 } else 1375 } else
1327 BUG_ON(retval != -EIOCBQUEUED); 1376 BUG_ON(retval != -EIOCBQUEUED);
1328 1377
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 9c351bf757b2..3fbc0ff79699 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
84static inline struct ecryptfs_auth_tok * 84static inline struct ecryptfs_auth_tok *
85ecryptfs_get_encrypted_key_payload_data(struct key *key) 85ecryptfs_get_encrypted_key_payload_data(struct key *key)
86{ 86{
87 if (key->type == &key_type_encrypted) 87 struct encrypted_key_payload *payload;
88 return (struct ecryptfs_auth_tok *) 88
89 (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); 89 if (key->type != &key_type_encrypted)
90 else
91 return NULL; 90 return NULL;
91
92 payload = key->payload.data[0];
93 if (!payload)
94 return ERR_PTR(-EKEYREVOKED);
95
96 return (struct ecryptfs_auth_tok *)payload->payload_data;
92} 97}
93 98
94static inline struct key *ecryptfs_get_encrypted_key(char *sig) 99static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
114ecryptfs_get_key_payload_data(struct key *key) 119ecryptfs_get_key_payload_data(struct key *key)
115{ 120{
116 struct ecryptfs_auth_tok *auth_tok; 121 struct ecryptfs_auth_tok *auth_tok;
122 struct user_key_payload *ukp;
117 123
118 auth_tok = ecryptfs_get_encrypted_key_payload_data(key); 124 auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
119 if (!auth_tok) 125 if (auth_tok)
120 return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
121 else
122 return auth_tok; 126 return auth_tok;
127
128 ukp = user_key_payload_locked(key);
129 if (!ukp)
130 return ERR_PTR(-EKEYREVOKED);
131
132 return (struct ecryptfs_auth_tok *)ukp->data;
123} 133}
124 134
125#define ECRYPTFS_MAX_KEYSET_SIZE 1024 135#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546dca82..fa218cd64f74 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ out:
459 * @auth_tok_key: key containing the authentication token 459 * @auth_tok_key: key containing the authentication token
460 * @auth_tok: authentication token 460 * @auth_tok: authentication token
461 * 461 *
462 * Returns zero on valid auth tok; -EINVAL otherwise 462 * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
463 * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
463 */ 464 */
464static int 465static int
465ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, 466ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
468 int rc = 0; 469 int rc = 0;
469 470
470 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); 471 (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
472 if (IS_ERR(*auth_tok)) {
473 rc = PTR_ERR(*auth_tok);
474 *auth_tok = NULL;
475 goto out;
476 }
477
471 if (ecryptfs_verify_version((*auth_tok)->version)) { 478 if (ecryptfs_verify_version((*auth_tok)->version)) {
472 printk(KERN_ERR "Data structure version mismatch. Userspace " 479 printk(KERN_ERR "Data structure version mismatch. Userspace "
473 "tools must match eCryptfs kernel module with major " 480 "tools must match eCryptfs kernel module with major "
diff --git a/fs/exec.c b/fs/exec.c
index ac34d9724684..3e14ba25f678 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1410,7 +1410,7 @@ static void free_bprm(struct linux_binprm *bprm)
1410 kfree(bprm); 1410 kfree(bprm);
1411} 1411}
1412 1412
1413int bprm_change_interp(char *interp, struct linux_binprm *bprm) 1413int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1414{ 1414{
1415 /* If a binfmt changed the interp, free it first. */ 1415 /* If a binfmt changed the interp, free it first. */
1416 if (bprm->interp != bprm->filename) 1416 if (bprm->interp != bprm->filename)
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
1802 /* execve succeeded */ 1802 /* execve succeeded */
1803 current->fs->in_exec = 0; 1803 current->fs->in_exec = 0;
1804 current->in_execve = 0; 1804 current->in_execve = 0;
1805 membarrier_execve(current);
1805 acct_update_integrals(current); 1806 acct_update_integrals(current);
1806 task_numa_free(current); 1807 task_numa_free(current);
1807 free_bprm(bprm); 1808 free_bprm(bprm);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b104096fce9e..b0915b734a38 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1677 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1678 return 1; 1678 return 1;
1679 case Opt_i_version: 1679 case Opt_i_version:
1680 sb->s_flags |= MS_I_VERSION; 1680 sb->s_flags |= SB_I_VERSION;
1681 return 1; 1681 return 1;
1682 case Opt_lazytime: 1682 case Opt_lazytime:
1683 sb->s_flags |= MS_LAZYTIME; 1683 sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); 2060 SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) 2061 if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); 2062 SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
2063 if (sb->s_flags & MS_I_VERSION) 2063 if (sb->s_flags & SB_I_VERSION)
2064 SEQ_OPTS_PUTS("i_version"); 2064 SEQ_OPTS_PUTS("i_version");
2065 if (nodefs || sbi->s_stripe) 2065 if (nodefs || sbi->s_stripe)
2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); 2066 SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9a7c90386947..4b4a72f392be 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); 2525bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); 2526void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new);
2527void stop_discard_thread(struct f2fs_sb_info *sbi); 2527void stop_discard_thread(struct f2fs_sb_info *sbi);
2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); 2528void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount);
2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); 2529void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
2530void release_discard_addrs(struct f2fs_sb_info *sbi); 2530void release_discard_addrs(struct f2fs_sb_info *sbi);
2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 2531int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 621b9b3d320b..c695ff462ee6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi)
1210} 1210}
1211 1211
1212/* This comes from f2fs_put_super and f2fs_trim_fs */ 1212/* This comes from f2fs_put_super and f2fs_trim_fs */
1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) 1213void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount)
1214{ 1214{
1215 __issue_discard_cmd(sbi, false); 1215 __issue_discard_cmd(sbi, false);
1216 __drop_discard_cmd(sbi); 1216 __drop_discard_cmd(sbi);
1217 __wait_discard_cmd(sbi, false); 1217 __wait_discard_cmd(sbi, !umount);
1218} 1218}
1219 1219
1220static void mark_discard_range_all(struct f2fs_sb_info *sbi) 1220static void mark_discard_range_all(struct f2fs_sb_info *sbi)
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2244 } 2244 }
2245 /* It's time to issue all the filed discards */ 2245 /* It's time to issue all the filed discards */
2246 mark_discard_range_all(sbi); 2246 mark_discard_range_all(sbi);
2247 f2fs_wait_discard_bios(sbi); 2247 f2fs_wait_discard_bios(sbi, false);
2248out: 2248out:
2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); 2249 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
2250 return err; 2250 return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 89f61eb3d167..933c3d529e65 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb)
801 } 801 }
802 802
803 /* be sure to wait for any on-going discard commands */ 803 /* be sure to wait for any on-going discard commands */
804 f2fs_wait_discard_bios(sbi); 804 f2fs_wait_discard_bios(sbi, true);
805 805
806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) { 806 if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
807 struct cp_control cpc = { 807 struct cp_control cpc = {
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index b5ab06fabc60..0438d4cd91ef 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
331 rcu_read_lock(); 331 rcu_read_lock();
332 332
333 confkey = user_key_payload_rcu(key); 333 confkey = user_key_payload_rcu(key);
334 if (!confkey) {
335 /* key was revoked */
336 rcu_read_unlock();
337 key_put(key);
338 goto no_config;
339 }
340
334 buf = confkey->data; 341 buf = confkey->data;
335 342
336 for (len = confkey->datalen - 1; len >= 0; len--) { 343 for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 622081b97426..24967382a7b1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
1308 */ 1308 */
1309 over = !dir_emit(ctx, dirent->name, dirent->namelen, 1309 over = !dir_emit(ctx, dirent->name, dirent->namelen,
1310 dirent->ino, dirent->type); 1310 dirent->ino, dirent->type);
1311 ctx->pos = dirent->off; 1311 if (!over)
1312 ctx->pos = dirent->off;
1312 } 1313 }
1313 1314
1314 buf += reclen; 1315 buf += reclen;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 65c88379a3a1..94a745acaef8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1059 if (sb->s_flags & MS_MANDLOCK) 1059 if (sb->s_flags & MS_MANDLOCK)
1060 goto err; 1060 goto err;
1061 1061
1062 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1062 sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
1063 1063
1064 if (!parse_fuse_opt(data, &d, is_bdev)) 1064 if (!parse_fuse_opt(data, &d, is_bdev))
1065 goto err; 1065 goto err;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 98e845b7841b..11066d8647d2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1945,13 +1945,9 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1945{ 1945{
1946 struct gfs2_glock_iter *gi = seq->private; 1946 struct gfs2_glock_iter *gi = seq->private;
1947 loff_t n = *pos; 1947 loff_t n = *pos;
1948 int ret;
1949
1950 if (gi->last_pos <= *pos)
1951 n = (*pos - gi->last_pos);
1952 1948
1953 ret = rhashtable_walk_start(&gi->hti); 1949 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1954 if (ret) 1950 if (rhashtable_walk_start(&gi->hti) != 0)
1955 return NULL; 1951 return NULL;
1956 1952
1957 do { 1953 do {
@@ -1959,6 +1955,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1959 } while (gi->gl && n--); 1955 } while (gi->gl && n--);
1960 1956
1961 gi->last_pos = *pos; 1957 gi->last_pos = *pos;
1958
1962 return gi->gl; 1959 return gi->gl;
1963} 1960}
1964 1961
@@ -1970,6 +1967,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1970 (*pos)++; 1967 (*pos)++;
1971 gi->last_pos = *pos; 1968 gi->last_pos = *pos;
1972 gfs2_glock_iter_next(gi); 1969 gfs2_glock_iter_next(gi);
1970
1973 return gi->gl; 1971 return gi->gl;
1974} 1972}
1975 1973
@@ -1980,6 +1978,7 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1980 1978
1981 gi->gl = NULL; 1979 gi->gl = NULL;
1982 rhashtable_walk_stop(&gi->hti); 1980 rhashtable_walk_stop(&gi->hti);
1981 rhashtable_walk_exit(&gi->hti);
1983} 1982}
1984 1983
1985static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 1984static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
@@ -2042,12 +2041,10 @@ static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2042 struct gfs2_glock_iter *gi = seq->private; 2041 struct gfs2_glock_iter *gi = seq->private;
2043 2042
2044 gi->sdp = inode->i_private; 2043 gi->sdp = inode->i_private;
2045 gi->last_pos = 0;
2046 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2044 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2047 if (seq->buf) 2045 if (seq->buf)
2048 seq->size = GFS2_SEQ_GOODSIZE; 2046 seq->size = GFS2_SEQ_GOODSIZE;
2049 gi->gl = NULL; 2047 gi->gl = NULL;
2050 rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2051 } 2048 }
2052 return ret; 2049 return ret;
2053} 2050}
@@ -2063,7 +2060,6 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
2063 struct gfs2_glock_iter *gi = seq->private; 2060 struct gfs2_glock_iter *gi = seq->private;
2064 2061
2065 gi->gl = NULL; 2062 gi->gl = NULL;
2066 rhashtable_walk_exit(&gi->hti);
2067 return seq_release_private(inode, file); 2063 return seq_release_private(inode, file);
2068} 2064}
2069 2065
diff --git a/fs/iomap.c b/fs/iomap.c
index 269b24a01f32..d4801f8dd4fd 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -713,6 +713,8 @@ struct iomap_dio {
713static ssize_t iomap_dio_complete(struct iomap_dio *dio) 713static ssize_t iomap_dio_complete(struct iomap_dio *dio)
714{ 714{
715 struct kiocb *iocb = dio->iocb; 715 struct kiocb *iocb = dio->iocb;
716 struct inode *inode = file_inode(iocb->ki_filp);
717 loff_t offset = iocb->ki_pos;
716 ssize_t ret; 718 ssize_t ret;
717 719
718 if (dio->end_io) { 720 if (dio->end_io) {
@@ -726,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
726 if (likely(!ret)) { 728 if (likely(!ret)) {
727 ret = dio->size; 729 ret = dio->size;
728 /* check for short read */ 730 /* check for short read */
729 if (iocb->ki_pos + ret > dio->i_size && 731 if (offset + ret > dio->i_size &&
730 !(dio->flags & IOMAP_DIO_WRITE)) 732 !(dio->flags & IOMAP_DIO_WRITE))
731 ret = dio->i_size - iocb->ki_pos; 733 ret = dio->i_size - offset;
732 iocb->ki_pos += ret; 734 iocb->ki_pos += ret;
733 } 735 }
734 736
737 /*
738 * Try again to invalidate clean pages which might have been cached by
739 * non-direct readahead, or faulted in by get_user_pages() if the source
740 * of the write was an mmap'ed region of the file we're writing. Either
741 * one is a pretty crazy thing to do, so we don't support it 100%. If
742 * this invalidation fails, tough, the write still worked...
743 *
744 * And this page cache invalidation has to be after dio->end_io(), as
745 * some filesystems convert unwritten extents to real allocations in
746 * end_io() when necessary, otherwise a racing buffer read would cache
747 * zeros from unwritten extents.
748 */
749 if (!dio->error &&
750 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
751 int err;
752 err = invalidate_inode_pages2_range(inode->i_mapping,
753 offset >> PAGE_SHIFT,
754 (offset + dio->size - 1) >> PAGE_SHIFT);
755 WARN_ON_ONCE(err);
756 }
757
735 inode_dio_end(file_inode(iocb->ki_filp)); 758 inode_dio_end(file_inode(iocb->ki_filp));
736 kfree(dio); 759 kfree(dio);
737 760
@@ -993,6 +1016,13 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
993 WARN_ON_ONCE(ret); 1016 WARN_ON_ONCE(ret);
994 ret = 0; 1017 ret = 0;
995 1018
1019 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1020 !inode->i_sb->s_dio_done_wq) {
1021 ret = sb_init_dio_done_wq(inode->i_sb);
1022 if (ret < 0)
1023 goto out_free_dio;
1024 }
1025
996 inode_dio_begin(inode); 1026 inode_dio_begin(inode);
997 1027
998 blk_start_plug(&plug); 1028 blk_start_plug(&plug);
@@ -1015,13 +1045,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1015 if (ret < 0) 1045 if (ret < 0)
1016 iomap_dio_set_error(dio, ret); 1046 iomap_dio_set_error(dio, ret);
1017 1047
1018 if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1019 !inode->i_sb->s_dio_done_wq) {
1020 ret = sb_init_dio_done_wq(inode->i_sb);
1021 if (ret < 0)
1022 iomap_dio_set_error(dio, ret);
1023 }
1024
1025 if (!atomic_dec_and_test(&dio->ref)) { 1048 if (!atomic_dec_and_test(&dio->ref)) {
1026 if (!is_sync_kiocb(iocb)) 1049 if (!is_sync_kiocb(iocb))
1027 return -EIOCBQUEUED; 1050 return -EIOCBQUEUED;
@@ -1042,19 +1065,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1042 1065
1043 ret = iomap_dio_complete(dio); 1066 ret = iomap_dio_complete(dio);
1044 1067
1045 /*
1046 * Try again to invalidate clean pages which might have been cached by
1047 * non-direct readahead, or faulted in by get_user_pages() if the source
1048 * of the write was an mmap'ed region of the file we're writing. Either
1049 * one is a pretty crazy thing to do, so we don't support it 100%. If
1050 * this invalidation fails, tough, the write still worked...
1051 */
1052 if (iov_iter_rw(iter) == WRITE) {
1053 int err = invalidate_inode_pages2_range(mapping,
1054 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
1055 WARN_ON_ONCE(err);
1056 }
1057
1058 return ret; 1068 return ret;
1059 1069
1060out_free_dio: 1070out_free_dio:
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index db692f554158..447a24d77b89 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -514,9 +514,11 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
514 if (sbi->s_fmode != ISOFS_INVALID_MODE) 514 if (sbi->s_fmode != ISOFS_INVALID_MODE)
515 seq_printf(m, ",fmode=%o", sbi->s_fmode); 515 seq_printf(m, ",fmode=%o", sbi->s_fmode);
516 516
517#ifdef CONFIG_JOLIET
517 if (sbi->s_nls_iocharset && 518 if (sbi->s_nls_iocharset &&
518 strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0) 519 strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
519 seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset); 520 seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
521#endif
520 return 0; 522 return 0;
521} 523}
522 524
diff --git a/fs/mpage.c b/fs/mpage.c
index 37bb77c1302c..c991faec70b9 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped)
468 try_to_free_buffers(page); 468 try_to_free_buffers(page);
469} 469}
470 470
471/*
472 * For situations where we want to clean all buffers attached to a page.
473 * We don't need to calculate how many buffers are attached to the page,
474 * we just need to specify a number larger than the maximum number of buffers.
475 */
476void clean_page_buffers(struct page *page)
477{
478 clean_buffers(page, ~0U);
479}
480
471static int __mpage_writepage(struct page *page, struct writeback_control *wbc, 481static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
472 void *data) 482 void *data)
473{ 483{
@@ -605,10 +615,8 @@ alloc_new:
605 if (bio == NULL) { 615 if (bio == NULL) {
606 if (first_unmapped == blocks_per_page) { 616 if (first_unmapped == blocks_per_page) {
607 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), 617 if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
608 page, wbc)) { 618 page, wbc))
609 clean_buffers(page, first_unmapped);
610 goto out; 619 goto out;
611 }
612 } 620 }
613 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), 621 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
614 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); 622 BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
diff --git a/fs/namespace.c b/fs/namespace.c
index 54059b142d6b..d18deb4c410b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file)
468 468
469 /* File refers to upper, writable layer? */ 469 /* File refers to upper, writable layer? */
470 upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER); 470 upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER);
471 if (upperdentry && file_inode(file) == d_inode(upperdentry)) 471 if (upperdentry &&
472 (file_inode(file) == d_inode(upperdentry) ||
473 file_inode(file) == d_inode(dentry)))
472 return 0; 474 return 0;
473 475
474 /* Lower layer: can't write to real file, sorry... */ 476 /* Lower layer: can't write to real file, sorry... */
@@ -2823,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
2823 SB_MANDLOCK | 2825 SB_MANDLOCK |
2824 SB_DIRSYNC | 2826 SB_DIRSYNC |
2825 SB_SILENT | 2827 SB_SILENT |
2826 SB_POSIXACL); 2828 SB_POSIXACL |
2829 SB_I_VERSION);
2827 2830
2828 if (flags & MS_REMOUNT) 2831 if (flags & MS_REMOUNT)
2829 retval = do_remount(&path, flags, sb_flags, mnt_flags, 2832 retval = do_remount(&path, flags, sb_flags, mnt_flags,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index efebe6cf4378..22880ef6d8dd 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
218static void pnfs_init_server(struct nfs_server *server) 218static void pnfs_init_server(struct nfs_server *server)
219{ 219{
220 rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); 220 rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
221 rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
222} 221}
223 222
224#else 223#else
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void)
888 ida_init(&server->openowner_id); 887 ida_init(&server->openowner_id);
889 ida_init(&server->lockowner_id); 888 ida_init(&server->lockowner_id);
890 pnfs_init_server(server); 889 pnfs_init_server(server);
890 rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC");
891 891
892 return server; 892 return server;
893} 893}
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 44c638b7876c..508126eb49f9 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
745 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 745 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
746 746
747 dprintk("--> %s\n", __func__); 747 dprintk("--> %s\n", __func__);
748 nfs4_fl_put_deviceid(fl->dsaddr); 748 if (fl->dsaddr != NULL)
749 nfs4_fl_put_deviceid(fl->dsaddr);
749 /* This assumes a single RW lseg */ 750 /* This assumes a single RW lseg */
750 if (lseg->pls_range.iomode == IOMODE_RW) { 751 if (lseg->pls_range.iomode == IOMODE_RW) {
751 struct nfs4_filelayout *flo; 752 struct nfs4_filelayout *flo;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index dd5d27da8c0c..30426c1a1bbd 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
274 ssize_t ret; 274 ssize_t ret;
275 275
276 ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); 276 ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
277 if (ret <= 0) 277 if (ret < 0)
278 return ERR_PTR(ret); 278 return ERR_PTR(ret);
279 279
280 rkey = request_key(&key_type_id_resolver, desc, ""); 280 rkey = request_key(&key_type_id_resolver, desc, "");
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6c61e2b99635..f90090e8c959 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
8399 lo = NFS_I(inode)->layout; 8399 lo = NFS_I(inode)->layout;
8400 /* If the open stateid was bad, then recover it. */ 8400 /* If the open stateid was bad, then recover it. */
8401 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 8401 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
8402 nfs4_stateid_match_other(&lgp->args.stateid, 8402 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
8403 &lgp->args.ctx->state->stateid)) {
8404 spin_unlock(&inode->i_lock); 8403 spin_unlock(&inode->i_lock);
8405 exception->state = lgp->args.ctx->state; 8404 exception->state = lgp->args.ctx->state;
8406 exception->stateid = &lgp->args.stateid; 8405 exception->stateid = &lgp->args.stateid;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 37c8af003275..14ed9791ec9c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr,
1842 * Assumes OPEN is the biggest non-idempotent compound. 1842 * Assumes OPEN is the biggest non-idempotent compound.
1843 * 2 is the verifier. 1843 * 2 is the verifier.
1844 */ 1844 */
1845 max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 1845 max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2)
1846 RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; 1846 * XDR_UNIT + RPC_MAX_AUTH_SIZE;
1847 1847
1848 encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); 1848 encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
1849 p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); 1849 p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 3c69db7d4905..8487486ec496 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u)
927 exp_put(u->secinfo.si_exp); 927 exp_put(u->secinfo.si_exp);
928} 928}
929 929
930static void
931nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
932{
933 if (u->secinfo_no_name.sin_exp)
934 exp_put(u->secinfo_no_name.sin_exp);
935}
936
930static __be32 937static __be32
931nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 938nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
932 union nfsd4_op_u *u) 939 union nfsd4_op_u *u)
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
2375 }, 2382 },
2376 [OP_SECINFO_NO_NAME] = { 2383 [OP_SECINFO_NO_NAME] = {
2377 .op_func = nfsd4_secinfo_no_name, 2384 .op_func = nfsd4_secinfo_no_name,
2378 .op_release = nfsd4_secinfo_release, 2385 .op_release = nfsd4_secinfo_no_name_release,
2379 .op_flags = OP_HANDLES_WRONGSEC, 2386 .op_flags = OP_HANDLES_WRONGSEC,
2380 .op_name = "OP_SECINFO_NO_NAME", 2387 .op_name = "OP_SECINFO_NO_NAME",
2381 .op_rsize_bop = nfsd4_secinfo_rsize, 2388 .op_rsize_bop = nfsd4_secinfo_rsize,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index aad97b30d5e6..c441f9387a1b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
561 c->tmpfile = true; 561 c->tmpfile = true;
562 err = ovl_copy_up_locked(c); 562 err = ovl_copy_up_locked(c);
563 } else { 563 } else {
564 err = -EIO; 564 err = ovl_lock_rename_workdir(c->workdir, c->destdir);
565 if (lock_rename(c->workdir, c->destdir) != NULL) { 565 if (!err) {
566 pr_err("overlayfs: failed to lock workdir+upperdir\n");
567 } else {
568 err = ovl_copy_up_locked(c); 566 err = ovl_copy_up_locked(c);
569 unlock_rename(c->workdir, c->destdir); 567 unlock_rename(c->workdir, c->destdir);
570 } 568 }
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 3309b1912241..cc961a3bd3bd 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -216,26 +216,6 @@ out_unlock:
216 return err; 216 return err;
217} 217}
218 218
219static int ovl_lock_rename_workdir(struct dentry *workdir,
220 struct dentry *upperdir)
221{
222 /* Workdir should not be the same as upperdir */
223 if (workdir == upperdir)
224 goto err;
225
226 /* Workdir should not be subdir of upperdir and vice versa */
227 if (lock_rename(workdir, upperdir) != NULL)
228 goto err_unlock;
229
230 return 0;
231
232err_unlock:
233 unlock_rename(workdir, upperdir);
234err:
235 pr_err("overlayfs: failed to lock workdir+upperdir\n");
236 return -EIO;
237}
238
239static struct dentry *ovl_clear_empty(struct dentry *dentry, 219static struct dentry *ovl_clear_empty(struct dentry *dentry,
240 struct list_head *list) 220 struct list_head *list)
241{ 221{
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a619addecafc..321511ed8c42 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
598 return true; 598 return true;
599} 599}
600 600
601struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry) 601struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
602 struct dentry *index)
602{ 603{
603 struct dentry *lowerdentry = ovl_dentry_lower(dentry); 604 struct dentry *lowerdentry = ovl_dentry_lower(dentry);
604 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; 605 struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
605 struct inode *inode; 606 struct inode *inode;
607 /* Already indexed or could be indexed on copy up? */
608 bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
609
610 if (WARN_ON(upperdentry && indexed && !lowerdentry))
611 return ERR_PTR(-EIO);
606 612
607 if (!realinode) 613 if (!realinode)
608 realinode = d_inode(lowerdentry); 614 realinode = d_inode(lowerdentry);
609 615
610 if (!S_ISDIR(realinode->i_mode) && 616 /*
611 (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) { 617 * Copy up origin (lower) may exist for non-indexed upper, but we must
612 struct inode *key = d_inode(lowerdentry ?: upperdentry); 618 * not use lower as hash key in that case.
619 * Hash inodes that are or could be indexed by origin inode and
620 * non-indexed upper inodes that could be hard linked by upper inode.
621 */
622 if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
623 struct inode *key = d_inode(indexed ? lowerdentry :
624 upperdentry);
613 unsigned int nlink; 625 unsigned int nlink;
614 626
615 inode = iget5_locked(dentry->d_sb, (unsigned long) key, 627 inode = iget5_locked(dentry->d_sb, (unsigned long) key,
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index c3addd1114f1..a12dc10bf726 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
405 * be treated as stale (i.e. after unlink of the overlay inode). 405 * be treated as stale (i.e. after unlink of the overlay inode).
406 * We don't know the verification rules for directory and whiteout 406 * We don't know the verification rules for directory and whiteout
407 * index entries, because they have not been implemented yet, so return 407 * index entries, because they have not been implemented yet, so return
408 * EROFS if those entries are found to avoid corrupting an index that 408 * EINVAL if those entries are found to abort the mount to avoid
409 * was created by a newer kernel. 409 * corrupting an index that was created by a newer kernel.
410 */ 410 */
411 err = -EROFS; 411 err = -EINVAL;
412 if (d_is_dir(index) || ovl_is_whiteout(index)) 412 if (d_is_dir(index) || ovl_is_whiteout(index))
413 goto fail; 413 goto fail;
414 414
415 err = -EINVAL;
416 if (index->d_name.len < sizeof(struct ovl_fh)*2) 415 if (index->d_name.len < sizeof(struct ovl_fh)*2)
417 goto fail; 416 goto fail;
418 417
@@ -506,6 +505,11 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
506 505
507 index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); 506 index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
508 if (IS_ERR(index)) { 507 if (IS_ERR(index)) {
508 err = PTR_ERR(index);
509 if (err == -ENOENT) {
510 index = NULL;
511 goto out;
512 }
509 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" 513 pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
510 "overlayfs: mount with '-o index=off' to disable inodes index.\n", 514 "overlayfs: mount with '-o index=off' to disable inodes index.\n",
511 d_inode(origin)->i_ino, name.len, name.name, 515 d_inode(origin)->i_ino, name.len, name.name,
@@ -515,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
515 519
516 inode = d_inode(index); 520 inode = d_inode(index);
517 if (d_is_negative(index)) { 521 if (d_is_negative(index)) {
518 if (upper && d_inode(origin)->i_nlink > 1) { 522 goto out_dput;
519 pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
520 d_inode(origin)->i_ino);
521 goto fail;
522 }
523
524 dput(index);
525 index = NULL;
526 } else if (upper && d_inode(upper) != inode) { 523 } else if (upper && d_inode(upper) != inode) {
527 pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n", 524 goto out_dput;
528 index, inode->i_ino, d_inode(upper)->i_ino);
529 goto fail;
530 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || 525 } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
531 ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) { 526 ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
532 /* 527 /*
@@ -546,6 +541,11 @@ out:
546 kfree(name.name); 541 kfree(name.name);
547 return index; 542 return index;
548 543
544out_dput:
545 dput(index);
546 index = NULL;
547 goto out;
548
549fail: 549fail:
550 dput(index); 550 dput(index);
551 index = ERR_PTR(-EIO); 551 index = ERR_PTR(-EIO);
@@ -634,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
634 } 634 }
635 635
636 if (d.redirect) { 636 if (d.redirect) {
637 err = -ENOMEM;
637 upperredirect = kstrdup(d.redirect, GFP_KERNEL); 638 upperredirect = kstrdup(d.redirect, GFP_KERNEL);
638 if (!upperredirect) 639 if (!upperredirect)
639 goto out_put_upper; 640 goto out_put_upper;
@@ -708,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
708 upperdentry = dget(index); 709 upperdentry = dget(index);
709 710
710 if (upperdentry || ctr) { 711 if (upperdentry || ctr) {
711 inode = ovl_get_inode(dentry, upperdentry); 712 inode = ovl_get_inode(dentry, upperdentry, index);
712 err = PTR_ERR(inode); 713 err = PTR_ERR(inode);
713 if (IS_ERR(inode)) 714 if (IS_ERR(inode))
714 goto out_free_oe; 715 goto out_free_oe;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index d4e8c1a08fb0..d9a0edd4e57e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry);
235void ovl_inuse_unlock(struct dentry *dentry); 235void ovl_inuse_unlock(struct dentry *dentry);
236int ovl_nlink_start(struct dentry *dentry, bool *locked); 236int ovl_nlink_start(struct dentry *dentry, bool *locked);
237void ovl_nlink_end(struct dentry *dentry, bool locked); 237void ovl_nlink_end(struct dentry *dentry, bool locked);
238int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
238 239
239static inline bool ovl_is_impuredir(struct dentry *dentry) 240static inline bool ovl_is_impuredir(struct dentry *dentry)
240{ 241{
@@ -285,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
285bool ovl_is_private_xattr(const char *name); 286bool ovl_is_private_xattr(const char *name);
286 287
287struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); 288struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
288struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry); 289struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
290 struct dentry *index);
289static inline void ovl_copyattr(struct inode *from, struct inode *to) 291static inline void ovl_copyattr(struct inode *from, struct inode *to)
290{ 292{
291 to->i_uid = from->i_uid; 293 to->i_uid = from->i_uid;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 878a750986dd..25d9b5adcd42 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -37,6 +37,9 @@ struct ovl_fs {
37 bool noxattr; 37 bool noxattr;
38 /* sb common to all layers */ 38 /* sb common to all layers */
39 struct super_block *same_sb; 39 struct super_block *same_sb;
40 /* Did we take the inuse lock? */
41 bool upperdir_locked;
42 bool workdir_locked;
40}; 43};
41 44
42/* private information held for every overlayfs dentry */ 45/* private information held for every overlayfs dentry */
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 62e9b22a2077..698b74dd750e 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
988 struct path *lowerstack, unsigned int numlower) 988 struct path *lowerstack, unsigned int numlower)
989{ 989{
990 int err; 990 int err;
991 struct dentry *index = NULL;
991 struct inode *dir = dentry->d_inode; 992 struct inode *dir = dentry->d_inode;
992 struct path path = { .mnt = mnt, .dentry = dentry }; 993 struct path path = { .mnt = mnt, .dentry = dentry };
993 LIST_HEAD(list); 994 LIST_HEAD(list);
@@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1007 1008
1008 inode_lock_nested(dir, I_MUTEX_PARENT); 1009 inode_lock_nested(dir, I_MUTEX_PARENT);
1009 list_for_each_entry(p, &list, l_node) { 1010 list_for_each_entry(p, &list, l_node) {
1010 struct dentry *index;
1011
1012 if (p->name[0] == '.') { 1011 if (p->name[0] == '.') {
1013 if (p->len == 1) 1012 if (p->len == 1)
1014 continue; 1013 continue;
@@ -1018,18 +1017,20 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
1018 index = lookup_one_len(p->name, dentry, p->len); 1017 index = lookup_one_len(p->name, dentry, p->len);
1019 if (IS_ERR(index)) { 1018 if (IS_ERR(index)) {
1020 err = PTR_ERR(index); 1019 err = PTR_ERR(index);
1020 index = NULL;
1021 break; 1021 break;
1022 } 1022 }
1023 err = ovl_verify_index(index, lowerstack, numlower); 1023 err = ovl_verify_index(index, lowerstack, numlower);
1024 if (err) { 1024 /* Cleanup stale and orphan index entries */
1025 if (err == -EROFS) 1025 if (err && (err == -ESTALE || err == -ENOENT))
1026 break;
1027 err = ovl_cleanup(dir, index); 1026 err = ovl_cleanup(dir, index);
1028 if (err) 1027 if (err)
1029 break; 1028 break;
1030 } 1029
1031 dput(index); 1030 dput(index);
1031 index = NULL;
1032 } 1032 }
1033 dput(index);
1033 inode_unlock(dir); 1034 inode_unlock(dir);
1034out: 1035out:
1035 ovl_cache_free(&list); 1036 ovl_cache_free(&list);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index fd5ea4facc62..f5738e96a052 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
174{ 174{
175 struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL); 175 struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
176 176
177 if (!oi)
178 return NULL;
179
177 oi->cache = NULL; 180 oi->cache = NULL;
178 oi->redirect = NULL; 181 oi->redirect = NULL;
179 oi->version = 0; 182 oi->version = 0;
@@ -211,9 +214,10 @@ static void ovl_put_super(struct super_block *sb)
211 214
212 dput(ufs->indexdir); 215 dput(ufs->indexdir);
213 dput(ufs->workdir); 216 dput(ufs->workdir);
214 ovl_inuse_unlock(ufs->workbasedir); 217 if (ufs->workdir_locked)
218 ovl_inuse_unlock(ufs->workbasedir);
215 dput(ufs->workbasedir); 219 dput(ufs->workbasedir);
216 if (ufs->upper_mnt) 220 if (ufs->upper_mnt && ufs->upperdir_locked)
217 ovl_inuse_unlock(ufs->upper_mnt->mnt_root); 221 ovl_inuse_unlock(ufs->upper_mnt->mnt_root);
218 mntput(ufs->upper_mnt); 222 mntput(ufs->upper_mnt);
219 for (i = 0; i < ufs->numlower; i++) 223 for (i = 0; i < ufs->numlower; i++)
@@ -881,9 +885,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
881 goto out_put_upperpath; 885 goto out_put_upperpath;
882 886
883 err = -EBUSY; 887 err = -EBUSY;
884 if (!ovl_inuse_trylock(upperpath.dentry)) { 888 if (ovl_inuse_trylock(upperpath.dentry)) {
885 pr_err("overlayfs: upperdir is in-use by another mount\n"); 889 ufs->upperdir_locked = true;
890 } else if (ufs->config.index) {
891 pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
886 goto out_put_upperpath; 892 goto out_put_upperpath;
893 } else {
894 pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
887 } 895 }
888 896
889 err = ovl_mount_dir(ufs->config.workdir, &workpath); 897 err = ovl_mount_dir(ufs->config.workdir, &workpath);
@@ -901,9 +909,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
901 } 909 }
902 910
903 err = -EBUSY; 911 err = -EBUSY;
904 if (!ovl_inuse_trylock(workpath.dentry)) { 912 if (ovl_inuse_trylock(workpath.dentry)) {
905 pr_err("overlayfs: workdir is in-use by another mount\n"); 913 ufs->workdir_locked = true;
914 } else if (ufs->config.index) {
915 pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
906 goto out_put_workpath; 916 goto out_put_workpath;
917 } else {
918 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
907 } 919 }
908 920
909 ufs->workbasedir = workpath.dentry; 921 ufs->workbasedir = workpath.dentry;
@@ -1156,11 +1168,13 @@ out_put_lowerpath:
1156out_free_lowertmp: 1168out_free_lowertmp:
1157 kfree(lowertmp); 1169 kfree(lowertmp);
1158out_unlock_workdentry: 1170out_unlock_workdentry:
1159 ovl_inuse_unlock(workpath.dentry); 1171 if (ufs->workdir_locked)
1172 ovl_inuse_unlock(workpath.dentry);
1160out_put_workpath: 1173out_put_workpath:
1161 path_put(&workpath); 1174 path_put(&workpath);
1162out_unlock_upperdentry: 1175out_unlock_upperdentry:
1163 ovl_inuse_unlock(upperpath.dentry); 1176 if (ufs->upperdir_locked)
1177 ovl_inuse_unlock(upperpath.dentry);
1164out_put_upperpath: 1178out_put_upperpath:
1165 path_put(&upperpath); 1179 path_put(&upperpath);
1166out_free_config: 1180out_free_config:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 117794582f9f..b9b239fa5cfd 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry)
430 } 430 }
431} 431}
432 432
433/* Called must hold OVL_I(inode)->oi_lock */ 433/* Caller must hold OVL_I(inode)->lock */
434static void ovl_cleanup_index(struct dentry *dentry) 434static void ovl_cleanup_index(struct dentry *dentry)
435{ 435{
436 struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode; 436 struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode;
@@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry)
469 err = PTR_ERR(index); 469 err = PTR_ERR(index);
470 if (!IS_ERR(index)) 470 if (!IS_ERR(index))
471 err = ovl_cleanup(dir, index); 471 err = ovl_cleanup(dir, index);
472 else
473 index = NULL;
474
472 inode_unlock(dir); 475 inode_unlock(dir);
473 if (err) 476 if (err)
474 goto fail; 477 goto fail;
@@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked)
557 mutex_unlock(&OVL_I(d_inode(dentry))->lock); 560 mutex_unlock(&OVL_I(d_inode(dentry))->lock);
558 } 561 }
559} 562}
563
564int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
565{
566 /* Workdir should not be the same as upperdir */
567 if (workdir == upperdir)
568 goto err;
569
570 /* Workdir should not be subdir of upperdir and vice versa */
571 if (lock_rename(workdir, upperdir) != NULL)
572 goto err_unlock;
573
574 return 0;
575
576err_unlock:
577 unlock_rename(workdir, upperdir);
578err:
579 pr_err("overlayfs: failed to lock workdir+upperdir\n");
580 return -EIO;
581}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 525157ca25cb..77a8eacbe032 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -119,30 +119,25 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
119 * simple bit tests. 119 * simple bit tests.
120 */ 120 */
121static const char * const task_state_array[] = { 121static const char * const task_state_array[] = {
122 "R (running)", /* 0 */ 122
123 "S (sleeping)", /* 1 */ 123 /* states in TASK_REPORT: */
124 "D (disk sleep)", /* 2 */ 124 "R (running)", /* 0x00 */
125 "T (stopped)", /* 4 */ 125 "S (sleeping)", /* 0x01 */
126 "t (tracing stop)", /* 8 */ 126 "D (disk sleep)", /* 0x02 */
127 "X (dead)", /* 16 */ 127 "T (stopped)", /* 0x04 */
128 "Z (zombie)", /* 32 */ 128 "t (tracing stop)", /* 0x08 */
129 "X (dead)", /* 0x10 */
130 "Z (zombie)", /* 0x20 */
131 "P (parked)", /* 0x40 */
132
133 /* states beyond TASK_REPORT: */
134 "I (idle)", /* 0x80 */
129}; 135};
130 136
131static inline const char *get_task_state(struct task_struct *tsk) 137static inline const char *get_task_state(struct task_struct *tsk)
132{ 138{
133 unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT; 139 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array));
134 140 return task_state_array[__get_task_state(tsk)];
135 /*
136 * Parked tasks do not run; they sit in __kthread_parkme().
137 * Without this check, we would report them as running, which is
138 * clearly wrong, so we report them as sleeping instead.
139 */
140 if (tsk->state == TASK_PARKED)
141 state = TASK_INTERRUPTIBLE;
142
143 BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1);
144
145 return task_state_array[fls(state)];
146} 141}
147 142
148static inline int get_task_umask(struct task_struct *tsk) 143static inline int get_task_umask(struct task_struct *tsk)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8381db9db6d9..52ad15192e72 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1297 spin_lock(&dquot->dq_dqb_lock); 1297 spin_lock(&dquot->dq_dqb_lock);
1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || 1298 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1299 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1299 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1300 goto add; 1300 goto finish;
1301 1301
1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1302 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1303 + space + rsv_space; 1303 + space + rsv_space;
1304 1304
1305 if (flags & DQUOT_SPACE_NOFAIL)
1306 goto add;
1307
1308 if (dquot->dq_dqb.dqb_bhardlimit && 1305 if (dquot->dq_dqb.dqb_bhardlimit &&
1309 tspace > dquot->dq_dqb.dqb_bhardlimit && 1306 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1310 !ignore_hardlimit(dquot)) { 1307 !ignore_hardlimit(dquot)) {
1311 if (flags & DQUOT_SPACE_WARN) 1308 if (flags & DQUOT_SPACE_WARN)
1312 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); 1309 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1313 ret = -EDQUOT; 1310 ret = -EDQUOT;
1314 goto out; 1311 goto finish;
1315 } 1312 }
1316 1313
1317 if (dquot->dq_dqb.dqb_bsoftlimit && 1314 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1322 if (flags & DQUOT_SPACE_WARN) 1319 if (flags & DQUOT_SPACE_WARN)
1323 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); 1320 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1324 ret = -EDQUOT; 1321 ret = -EDQUOT;
1325 goto out; 1322 goto finish;
1326 } 1323 }
1327 1324
1328 if (dquot->dq_dqb.dqb_bsoftlimit && 1325 if (dquot->dq_dqb.dqb_bsoftlimit &&
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space,
1338 * be always printed 1335 * be always printed
1339 */ 1336 */
1340 ret = -EDQUOT; 1337 ret = -EDQUOT;
1341 goto out; 1338 goto finish;
1342 } 1339 }
1343 } 1340 }
1344add: 1341finish:
1345 dquot->dq_dqb.dqb_rsvspace += rsv_space; 1342 /*
1346 dquot->dq_dqb.dqb_curspace += space; 1343 * We have to be careful and go through warning generation & grace time
1347out: 1344 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1345 * only here...
1346 */
1347 if (flags & DQUOT_SPACE_NOFAIL)
1348 ret = 0;
1349 if (!ret) {
1350 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1351 dquot->dq_dqb.dqb_curspace += space;
1352 }
1348 spin_unlock(&dquot->dq_dqb_lock); 1353 spin_unlock(&dquot->dq_dqb_lock);
1349 return ret; 1354 return ret;
1350} 1355}
@@ -1980,7 +1985,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1980 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0, 1985 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, 0,
1981 &warn_to[cnt]); 1986 &warn_to[cnt]);
1982 if (ret) { 1987 if (ret) {
1988 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
1983 dquot_decr_inodes(transfer_to[cnt], inode_usage); 1989 dquot_decr_inodes(transfer_to[cnt], inode_usage);
1990 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
1984 goto over_quota; 1991 goto over_quota;
1985 } 1992 }
1986 } 1993 }
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index c0187cda2c1e..a73e5b34db41 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -328,12 +328,16 @@ static int v2_write_dquot(struct dquot *dquot)
328 if (!dquot->dq_off) { 328 if (!dquot->dq_off) {
329 alloc = true; 329 alloc = true;
330 down_write(&dqopt->dqio_sem); 330 down_write(&dqopt->dqio_sem);
331 } else {
332 down_read(&dqopt->dqio_sem);
331 } 333 }
332 ret = qtree_write_dquot( 334 ret = qtree_write_dquot(
333 sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, 335 sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv,
334 dquot); 336 dquot);
335 if (alloc) 337 if (alloc)
336 up_write(&dqopt->dqio_sem); 338 up_write(&dqopt->dqio_sem);
339 else
340 up_read(&dqopt->dqio_sem);
337 return ret; 341 return ret;
338} 342}
339 343
diff --git a/fs/read_write.c b/fs/read_write.c
index a2b9a47235c5..f0d4b16873e8 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
112 * In the generic case the entire file is data, so as long as 112 * In the generic case the entire file is data, so as long as
113 * offset isn't at the end of the file then the offset is data. 113 * offset isn't at the end of the file then the offset is data.
114 */ 114 */
115 if (offset >= eof) 115 if ((unsigned long long)offset >= eof)
116 return -ENXIO; 116 return -ENXIO;
117 break; 117 break;
118 case SEEK_HOLE: 118 case SEEK_HOLE:
@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
120 * There is a virtual hole at the end of the file, so as long as 120 * There is a virtual hole at the end of the file, so as long as
121 * offset isn't i_size or larger, return i_size. 121 * offset isn't i_size or larger, return i_size.
122 */ 122 */
123 if (offset >= eof) 123 if ((unsigned long long)offset >= eof)
124 return -ENXIO; 124 return -ENXIO;
125 offset = eof; 125 offset = eof;
126 break; 126 break;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ef4b48d1ea42..1c713fd5b3e6 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -588,6 +588,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
588 break; 588 break;
589 if (ACCESS_ONCE(ctx->released) || 589 if (ACCESS_ONCE(ctx->released) ||
590 fatal_signal_pending(current)) { 590 fatal_signal_pending(current)) {
591 /*
592 * &ewq->wq may be queued in fork_event, but
593 * __remove_wait_queue ignores the head
594 * parameter. It would be a problem if it
595 * didn't.
596 */
591 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); 597 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
592 if (ewq->msg.event == UFFD_EVENT_FORK) { 598 if (ewq->msg.event == UFFD_EVENT_FORK) {
593 struct userfaultfd_ctx *new; 599 struct userfaultfd_ctx *new;
@@ -1061,6 +1067,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1061 (unsigned long) 1067 (unsigned long)
1062 uwq->msg.arg.reserved.reserved1; 1068 uwq->msg.arg.reserved.reserved1;
1063 list_move(&uwq->wq.entry, &fork_event); 1069 list_move(&uwq->wq.entry, &fork_event);
1070 /*
1071 * fork_nctx can be freed as soon as
1072 * we drop the lock, unless we take a
1073 * reference on it.
1074 */
1075 userfaultfd_ctx_get(fork_nctx);
1064 spin_unlock(&ctx->event_wqh.lock); 1076 spin_unlock(&ctx->event_wqh.lock);
1065 ret = 0; 1077 ret = 0;
1066 break; 1078 break;
@@ -1091,19 +1103,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1091 1103
1092 if (!ret && msg->event == UFFD_EVENT_FORK) { 1104 if (!ret && msg->event == UFFD_EVENT_FORK) {
1093 ret = resolve_userfault_fork(ctx, fork_nctx, msg); 1105 ret = resolve_userfault_fork(ctx, fork_nctx, msg);
1106 spin_lock(&ctx->event_wqh.lock);
1107 if (!list_empty(&fork_event)) {
1108 /*
1109 * The fork thread didn't abort, so we can
1110 * drop the temporary refcount.
1111 */
1112 userfaultfd_ctx_put(fork_nctx);
1113
1114 uwq = list_first_entry(&fork_event,
1115 typeof(*uwq),
1116 wq.entry);
1117 /*
1118 * If fork_event list wasn't empty and in turn
1119 * the event wasn't already released by fork
1120 * (the event is allocated on fork kernel
1121 * stack), put the event back to its place in
1122 * the event_wq. fork_event head will be freed
1123 * as soon as we return so the event cannot
1124 * stay queued there no matter the current
1125 * "ret" value.
1126 */
1127 list_del(&uwq->wq.entry);
1128 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1094 1129
1095 if (!ret) { 1130 /*
1096 spin_lock(&ctx->event_wqh.lock); 1131 * Leave the event in the waitqueue and report
1097 if (!list_empty(&fork_event)) { 1132 * error to userland if we failed to resolve
1098 uwq = list_first_entry(&fork_event, 1133 * the userfault fork.
1099 typeof(*uwq), 1134 */
1100 wq.entry); 1135 if (likely(!ret))
1101 list_del(&uwq->wq.entry);
1102 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1103 userfaultfd_event_complete(ctx, uwq); 1136 userfaultfd_event_complete(ctx, uwq);
1104 } 1137 } else {
1105 spin_unlock(&ctx->event_wqh.lock); 1138 /*
1139 * Here the fork thread aborted and the
1140 * refcount from the fork thread on fork_nctx
1141 * has already been released. We still hold
1142 * the reference we took before releasing the
1143 * lock above. If resolve_userfault_fork
1144 * failed we've to drop it because the
1145 * fork_nctx has to be freed in such case. If
1146 * it succeeded we'll hold it because the new
1147 * uffd references it.
1148 */
1149 if (ret)
1150 userfaultfd_ctx_put(fork_nctx);
1106 } 1151 }
1152 spin_unlock(&ctx->event_wqh.lock);
1107 } 1153 }
1108 1154
1109 return ret; 1155 return ret;
diff --git a/fs/xattr.c b/fs/xattr.c
index 4424f7fecf14..61cd28ba25f3 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -250,7 +250,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
250 } 250 }
251 memcpy(value, buffer, len); 251 memcpy(value, buffer, len);
252out: 252out:
253 security_release_secctx(buffer, len); 253 kfree(buffer);
254out_noalloc: 254out_noalloc:
255 return len; 255 return len;
256} 256}
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index b008ff3250eb..df3e600835e8 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -156,7 +156,8 @@ __xfs_ag_resv_free(
156 trace_xfs_ag_resv_free(pag, type, 0); 156 trace_xfs_ag_resv_free(pag, type, 0);
157 157
158 resv = xfs_perag_resv(pag, type); 158 resv = xfs_perag_resv(pag, type);
159 pag->pag_mount->m_ag_max_usable += resv->ar_asked; 159 if (pag->pag_agno == 0)
160 pag->pag_mount->m_ag_max_usable += resv->ar_asked;
160 /* 161 /*
161 * AGFL blocks are always considered "free", so whatever 162 * AGFL blocks are always considered "free", so whatever
162 * was reserved at mount time must be given back at umount. 163 * was reserved at mount time must be given back at umount.
@@ -216,7 +217,14 @@ __xfs_ag_resv_init(
216 return error; 217 return error;
217 } 218 }
218 219
219 mp->m_ag_max_usable -= ask; 220 /*
221 * Reduce the maximum per-AG allocation length by however much we're
222 * trying to reserve for an AG. Since this is a filesystem-wide
223 * counter, we only make the adjustment for AG 0. This assumes that
224 * there aren't any AGs hungrier for per-AG reservation than AG 0.
225 */
226 if (pag->pag_agno == 0)
227 mp->m_ag_max_usable -= ask;
220 228
221 resv = xfs_perag_resv(pag, type); 229 resv = xfs_perag_resv(pag, type);
222 resv->ar_asked = ask; 230 resv->ar_asked = ask;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 744dcaec34cc..f965ce832bc0 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small(
1584 1584
1585 bp = xfs_btree_get_bufs(args->mp, args->tp, 1585 bp = xfs_btree_get_bufs(args->mp, args->tp,
1586 args->agno, fbno, 0); 1586 args->agno, fbno, 0);
1587 if (!bp) {
1588 error = -EFSCORRUPTED;
1589 goto error0;
1590 }
1587 xfs_trans_binval(args->tp, bp); 1591 xfs_trans_binval(args->tp, bp);
1588 } 1592 }
1589 args->len = 1; 1593 args->len = 1;
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist(
2141 if (error) 2145 if (error)
2142 goto out_agbp_relse; 2146 goto out_agbp_relse;
2143 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); 2147 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
2148 if (!bp) {
2149 error = -EFSCORRUPTED;
2150 goto out_agbp_relse;
2151 }
2144 xfs_trans_binval(tp, bp); 2152 xfs_trans_binval(tp, bp);
2145 } 2153 }
2146 2154
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 459f4b4f08fe..89263797cf32 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -49,7 +49,6 @@
49#include "xfs_rmap.h" 49#include "xfs_rmap.h"
50#include "xfs_ag_resv.h" 50#include "xfs_ag_resv.h"
51#include "xfs_refcount.h" 51#include "xfs_refcount.h"
52#include "xfs_rmap_btree.h"
53#include "xfs_icache.h" 52#include "xfs_icache.h"
54 53
55 54
@@ -192,12 +191,8 @@ xfs_bmap_worst_indlen(
192 int maxrecs; /* maximum record count at this level */ 191 int maxrecs; /* maximum record count at this level */
193 xfs_mount_t *mp; /* mount structure */ 192 xfs_mount_t *mp; /* mount structure */
194 xfs_filblks_t rval; /* return value */ 193 xfs_filblks_t rval; /* return value */
195 xfs_filblks_t orig_len;
196 194
197 mp = ip->i_mount; 195 mp = ip->i_mount;
198
199 /* Calculate the worst-case size of the bmbt. */
200 orig_len = len;
201 maxrecs = mp->m_bmap_dmxr[0]; 196 maxrecs = mp->m_bmap_dmxr[0];
202 for (level = 0, rval = 0; 197 for (level = 0, rval = 0;
203 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK); 198 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
@@ -205,20 +200,12 @@ xfs_bmap_worst_indlen(
205 len += maxrecs - 1; 200 len += maxrecs - 1;
206 do_div(len, maxrecs); 201 do_div(len, maxrecs);
207 rval += len; 202 rval += len;
208 if (len == 1) { 203 if (len == 1)
209 rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) - 204 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
210 level - 1; 205 level - 1;
211 break;
212 }
213 if (level == 0) 206 if (level == 0)
214 maxrecs = mp->m_bmap_dmxr[1]; 207 maxrecs = mp->m_bmap_dmxr[1];
215 } 208 }
216
217 /* Calculate the worst-case size of the rmapbt. */
218 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
219 rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
220 mp->m_rmap_maxlevels;
221
222 return rval; 209 return rval;
223} 210}
224 211
@@ -1490,14 +1477,14 @@ xfs_bmap_isaeof(
1490 int is_empty; 1477 int is_empty;
1491 int error; 1478 int error;
1492 1479
1493 bma->aeof = 0; 1480 bma->aeof = false;
1494 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, 1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1495 &is_empty); 1482 &is_empty);
1496 if (error) 1483 if (error)
1497 return error; 1484 return error;
1498 1485
1499 if (is_empty) { 1486 if (is_empty) {
1500 bma->aeof = 1; 1487 bma->aeof = true;
1501 return 0; 1488 return 0;
1502 } 1489 }
1503 1490
@@ -3865,6 +3852,17 @@ xfs_trim_extent(
3865 } 3852 }
3866} 3853}
3867 3854
3855/* trim extent to within eof */
3856void
3857xfs_trim_extent_eof(
3858 struct xfs_bmbt_irec *irec,
3859 struct xfs_inode *ip)
3860
3861{
3862 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3863 i_size_read(VFS_I(ip))));
3864}
3865
3868/* 3866/*
3869 * Trim the returned map to the required bounds 3867 * Trim the returned map to the required bounds
3870 */ 3868 */
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 851982a5dfbc..502e0d8fb4ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
208 208
209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, 209void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
210 xfs_filblks_t len); 210 xfs_filblks_t len);
211void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
211int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); 212int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
212void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); 213void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
213void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, 214void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 988bb3f31446..dfd643909f85 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -1962,7 +1962,7 @@ xfs_difree_inobt(
1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) && 1962 if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
1963 rec.ir_free == XFS_INOBT_ALL_FREE && 1963 rec.ir_free == XFS_INOBT_ALL_FREE &&
1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { 1964 mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
1965 xic->deleted = 1; 1965 xic->deleted = true;
1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); 1966 xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec); 1967 xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
1968 1968
@@ -1989,7 +1989,7 @@ xfs_difree_inobt(
1989 1989
1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops); 1990 xfs_difree_inode_chunk(mp, agno, &rec, dfops);
1991 } else { 1991 } else {
1992 xic->deleted = 0; 1992 xic->deleted = false;
1993 1993
1994 error = xfs_inobt_update(cur, &rec); 1994 error = xfs_inobt_update(cur, &rec);
1995 if (error) { 1995 if (error) {
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 8372e9bcd7b6..71de185735e0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format {
270 uint32_t ilf_fields; /* flags for fields logged */ 270 uint32_t ilf_fields; /* flags for fields logged */
271 uint16_t ilf_asize; /* size of attr d/ext/root */ 271 uint16_t ilf_asize; /* size of attr d/ext/root */
272 uint16_t ilf_dsize; /* size of data/ext/root */ 272 uint16_t ilf_dsize; /* size of data/ext/root */
273 uint32_t ilf_pad; /* pad for 64 bit boundary */
273 uint64_t ilf_ino; /* inode number */ 274 uint64_t ilf_ino; /* inode number */
274 union { 275 union {
275 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 276 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format {
280 int32_t ilf_boffset; /* off of inode in buffer */ 281 int32_t ilf_boffset; /* off of inode in buffer */
281} xfs_inode_log_format_t; 282} xfs_inode_log_format_t;
282 283
283typedef struct xfs_inode_log_format_32 { 284/*
284 uint16_t ilf_type; /* inode log item type */ 285 * Old 32 bit systems will log in this format without the 64 bit
285 uint16_t ilf_size; /* size of this item */ 286 * alignment padding. Recovery will detect this and convert it to the
286 uint32_t ilf_fields; /* flags for fields logged */ 287 * correct format.
287 uint16_t ilf_asize; /* size of attr d/ext/root */ 288 */
288 uint16_t ilf_dsize; /* size of data/ext/root */ 289struct xfs_inode_log_format_32 {
289 uint64_t ilf_ino; /* inode number */
290 union {
291 uint32_t ilfu_rdev; /* rdev value for dev inode*/
292 uuid_t ilfu_uuid; /* mount point value */
293 } ilf_u;
294 int64_t ilf_blkno; /* blkno of inode buffer */
295 int32_t ilf_len; /* len of inode buffer */
296 int32_t ilf_boffset; /* off of inode in buffer */
297} __attribute__((packed)) xfs_inode_log_format_32_t;
298
299typedef struct xfs_inode_log_format_64 {
300 uint16_t ilf_type; /* inode log item type */ 290 uint16_t ilf_type; /* inode log item type */
301 uint16_t ilf_size; /* size of this item */ 291 uint16_t ilf_size; /* size of this item */
302 uint32_t ilf_fields; /* flags for fields logged */ 292 uint32_t ilf_fields; /* flags for fields logged */
303 uint16_t ilf_asize; /* size of attr d/ext/root */ 293 uint16_t ilf_asize; /* size of attr d/ext/root */
304 uint16_t ilf_dsize; /* size of data/ext/root */ 294 uint16_t ilf_dsize; /* size of data/ext/root */
305 uint32_t ilf_pad; /* pad for 64 bit boundary */
306 uint64_t ilf_ino; /* inode number */ 295 uint64_t ilf_ino; /* inode number */
307 union { 296 union {
308 uint32_t ilfu_rdev; /* rdev value for dev inode*/ 297 uint32_t ilfu_rdev; /* rdev value for dev inode*/
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 {
311 int64_t ilf_blkno; /* blkno of inode buffer */ 300 int64_t ilf_blkno; /* blkno of inode buffer */
312 int32_t ilf_len; /* len of inode buffer */ 301 int32_t ilf_len; /* len of inode buffer */
313 int32_t ilf_boffset; /* off of inode in buffer */ 302 int32_t ilf_boffset; /* off of inode in buffer */
314} xfs_inode_log_format_64_t; 303} __attribute__((packed));
315 304
316 305
317/* 306/*
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 7034e17535de..3354140de07e 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode)
247int 247int
248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) 248xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
249{ 249{
250 umode_t mode;
251 bool set_mode = false;
250 int error = 0; 252 int error = 0;
251 253
252 if (!acl) 254 if (!acl)
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
257 return error; 259 return error;
258 260
259 if (type == ACL_TYPE_ACCESS) { 261 if (type == ACL_TYPE_ACCESS) {
260 umode_t mode;
261
262 error = posix_acl_update_mode(inode, &mode, &acl); 262 error = posix_acl_update_mode(inode, &mode, &acl);
263 if (error) 263 if (error)
264 return error; 264 return error;
265 error = xfs_set_mode(inode, mode); 265 set_mode = true;
266 if (error)
267 return error;
268 } 266 }
269 267
270 set_acl: 268 set_acl:
271 return __xfs_set_acl(inode, acl, type); 269 error = __xfs_set_acl(inode, acl, type);
270 if (error)
271 return error;
272
273 /*
274 * We set the mode after successfully updating the ACL xattr because the
275 * xattr update can fail at ENOSPC and we don't want to change the mode
276 * if the ACL update hasn't been applied.
277 */
278 if (set_mode)
279 error = xfs_set_mode(inode, mode);
280
281 return error;
272} 282}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 29172609f2a3..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -343,7 +343,8 @@ xfs_end_io(
343 error = xfs_reflink_end_cow(ip, offset, size); 343 error = xfs_reflink_end_cow(ip, offset, size);
344 break; 344 break;
345 case XFS_IO_UNWRITTEN: 345 case XFS_IO_UNWRITTEN:
346 error = xfs_iomap_write_unwritten(ip, offset, size); 346 /* writeback should never update isize */
347 error = xfs_iomap_write_unwritten(ip, offset, size, false);
347 break; 348 break;
348 default: 349 default:
349 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); 350 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
@@ -445,6 +446,19 @@ xfs_imap_valid(
445{ 446{
446 offset >>= inode->i_blkbits; 447 offset >>= inode->i_blkbits;
447 448
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
448 return offset >= imap->br_startoff && 462 return offset >= imap->br_startoff &&
449 offset < imap->br_startoff + imap->br_blockcount; 463 offset < imap->br_startoff + imap->br_blockcount;
450} 464}
@@ -734,6 +748,14 @@ xfs_vm_invalidatepage(
734{ 748{
735 trace_xfs_invalidatepage(page->mapping->host, page, offset, 749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
736 length); 750 length);
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
737 block_invalidatepage(page, offset, length); 759 block_invalidatepage(page, offset, length);
738} 760}
739 761
@@ -1189,25 +1211,27 @@ xfs_vm_releasepage(
1189 * mm accommodates an old ext3 case where clean pages might not have had 1211 * mm accommodates an old ext3 case where clean pages might not have had
1190 * the dirty bit cleared. Thus, it can send actual dirty pages to 1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1191 * ->releasepage() via shrink_active_list(). Conversely, 1213 * ->releasepage() via shrink_active_list(). Conversely,
1192 * block_invalidatepage() can send pages that are still marked dirty 1214 * block_invalidatepage() can send pages that are still marked dirty but
1193 * but otherwise have invalidated buffers. 1215 * otherwise have invalidated buffers.
1194 * 1216 *
1195 * We want to release the latter to avoid unnecessary buildup of the 1217 * We want to release the latter to avoid unnecessary buildup of the
1196 * LRU, skip the former and warn if we've left any lingering 1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1197 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc 1219 * that are entirely invalidated and need to be released. Hence the
1198 * or unwritten buffers and warn if the page is not dirty. Otherwise 1220 * only time we should get dirty pages here is through
1199 * try to release the buffers. 1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1200 */ 1225 */
1226 if (PageDirty(page))
1227 return 0;
1228
1201 xfs_count_page_state(page, &delalloc, &unwritten); 1229 xfs_count_page_state(page, &delalloc, &unwritten);
1202 1230
1203 if (delalloc) { 1231 if (WARN_ON_ONCE(delalloc))
1204 WARN_ON_ONCE(!PageDirty(page));
1205 return 0; 1232 return 0;
1206 } 1233 if (WARN_ON_ONCE(unwritten))
1207 if (unwritten) {
1208 WARN_ON_ONCE(!PageDirty(page));
1209 return 0; 1234 return 0;
1210 }
1211 1235
1212 return try_to_free_buffers(page); 1236 return try_to_free_buffers(page);
1213} 1237}
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index ebd66b19fbfc..e3a950ed35a8 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive(
302 &bp, XFS_ATTR_FORK); 302 &bp, XFS_ATTR_FORK);
303 if (error) 303 if (error)
304 return error; 304 return error;
305 node = bp->b_addr;
306 btree = dp->d_ops->node_tree_p(node);
305 child_fsb = be32_to_cpu(btree[i + 1].before); 307 child_fsb = be32_to_cpu(btree[i + 1].before);
306 xfs_trans_brelse(*trans, bp); 308 xfs_trans_brelse(*trans, bp);
307 } 309 }
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index cd9a5400ba4f..6503cfa44262 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -84,6 +84,7 @@ xfs_zero_extent(
84 GFP_NOFS, 0); 84 GFP_NOFS, 0);
85} 85}
86 86
87#ifdef CONFIG_XFS_RT
87int 88int
88xfs_bmap_rtalloc( 89xfs_bmap_rtalloc(
89 struct xfs_bmalloca *ap) /* bmap alloc argument struct */ 90 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc(
190 } 191 }
191 return 0; 192 return 0;
192} 193}
194#endif /* CONFIG_XFS_RT */
193 195
194/* 196/*
195 * Check if the endoff is outside the last extent. If so the caller will grow 197 * Check if the endoff is outside the last extent. If so the caller will grow
@@ -1459,7 +1461,19 @@ xfs_shift_file_space(
1459 return error; 1461 return error;
1460 1462
1461 /* 1463 /*
1462 * The extent shiting code works on extent granularity. So, if 1464 * Clean out anything hanging around in the cow fork now that
1465 * we've flushed all the dirty data out to disk to avoid having
1466 * CoW extents at the wrong offsets.
1467 */
1468 if (xfs_is_reflink_inode(ip)) {
1469 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1470 true);
1471 if (error)
1472 return error;
1473 }
1474
1475 /*
1476 * The extent shifting code works on extent granularity. So, if
1463 * stop_fsb is not the starting block of extent, we need to split 1477 * stop_fsb is not the starting block of extent, we need to split
1464 * the extent at stop_fsb. 1478 * the extent at stop_fsb.
1465 */ 1479 */
@@ -2110,11 +2124,31 @@ xfs_swap_extents(
2110 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; 2124 ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
2111 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 2125 tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
2112 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; 2126 tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
2127 }
2128
2129 /* Swap the cow forks. */
2130 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
2131 xfs_extnum_t extnum;
2132
2133 ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2134 ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
2135
2136 extnum = ip->i_cnextents;
2137 ip->i_cnextents = tip->i_cnextents;
2138 tip->i_cnextents = extnum;
2139
2113 cowfp = ip->i_cowfp; 2140 cowfp = ip->i_cowfp;
2114 ip->i_cowfp = tip->i_cowfp; 2141 ip->i_cowfp = tip->i_cowfp;
2115 tip->i_cowfp = cowfp; 2142 tip->i_cowfp = cowfp;
2116 xfs_inode_set_cowblocks_tag(ip); 2143
2117 xfs_inode_set_cowblocks_tag(tip); 2144 if (ip->i_cowfp && ip->i_cnextents)
2145 xfs_inode_set_cowblocks_tag(ip);
2146 else
2147 xfs_inode_clear_cowblocks_tag(ip);
2148 if (tip->i_cowfp && tip->i_cnextents)
2149 xfs_inode_set_cowblocks_tag(tip);
2150 else
2151 xfs_inode_clear_cowblocks_tag(tip);
2118 } 2152 }
2119 2153
2120 xfs_trans_log_inode(tp, ip, src_log_flags); 2154 xfs_trans_log_inode(tp, ip, src_log_flags);
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 0eaa81dc49be..7d330b3c77c3 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -28,7 +28,20 @@ struct xfs_mount;
28struct xfs_trans; 28struct xfs_trans;
29struct xfs_bmalloca; 29struct xfs_bmalloca;
30 30
31#ifdef CONFIG_XFS_RT
31int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 32int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
33#else /* !CONFIG_XFS_RT */
34/*
35 * Attempts to allocate RT extents when RT is disable indicates corruption and
36 * should trigger a shutdown.
37 */
38static inline int
39xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
40{
41 return -EFSCORRUPTED;
42}
43#endif /* CONFIG_XFS_RT */
44
32int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 45int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
33 int whichfork, int *eof); 46 int whichfork, int *eof);
34int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, 47int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index da14658da310..2f97c12ca75e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1258,8 +1258,6 @@ xfs_buf_ioapply_map(
1258 int size; 1258 int size;
1259 int offset; 1259 int offset;
1260 1260
1261 total_nr_pages = bp->b_page_count;
1262
1263 /* skip the pages in the buffer before the start offset */ 1261 /* skip the pages in the buffer before the start offset */
1264 page_index = 0; 1262 page_index = 0;
1265 offset = *buf_offset; 1263 offset = *buf_offset;
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index bd786a9ac2c3..eaf86f55b7f2 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -347,7 +347,7 @@ xfs_verifier_error(
347{ 347{
348 struct xfs_mount *mp = bp->b_target->bt_mount; 348 struct xfs_mount *mp = bp->b_target->bt_mount;
349 349
350 xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx", 350 xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
351 bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", 351 bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
352 __return_address, bp->b_ops->name, bp->b_bn); 352 __return_address, bp->b_ops->name, bp->b_bn);
353 353
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index ebdd0bd2b261..6526ef0e2a23 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -58,7 +58,7 @@ xfs_zero_range(
58 xfs_off_t count, 58 xfs_off_t count,
59 bool *did_zero) 59 bool *did_zero)
60{ 60{
61 return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops); 61 return iomap_zero_range(VFS_I(ip), pos, count, did_zero, &xfs_iomap_ops);
62} 62}
63 63
64int 64int
@@ -237,11 +237,13 @@ xfs_file_dax_read(
237 if (!count) 237 if (!count)
238 return 0; /* skip atime */ 238 return 0; /* skip atime */
239 239
240 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { 240 if (iocb->ki_flags & IOCB_NOWAIT) {
241 if (iocb->ki_flags & IOCB_NOWAIT) 241 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
242 return -EAGAIN; 242 return -EAGAIN;
243 } else {
243 xfs_ilock(ip, XFS_IOLOCK_SHARED); 244 xfs_ilock(ip, XFS_IOLOCK_SHARED);
244 } 245 }
246
245 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); 247 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
246 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 248 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
247 249
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read(
259 261
260 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); 262 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
261 263
262 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { 264 if (iocb->ki_flags & IOCB_NOWAIT) {
263 if (iocb->ki_flags & IOCB_NOWAIT) 265 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
264 return -EAGAIN; 266 return -EAGAIN;
267 } else {
265 xfs_ilock(ip, XFS_IOLOCK_SHARED); 268 xfs_ilock(ip, XFS_IOLOCK_SHARED);
266 } 269 }
267 ret = generic_file_read_iter(iocb, to); 270 ret = generic_file_read_iter(iocb, to);
@@ -377,8 +380,6 @@ restart:
377 */ 380 */
378 spin_lock(&ip->i_flags_lock); 381 spin_lock(&ip->i_flags_lock);
379 if (iocb->ki_pos > i_size_read(inode)) { 382 if (iocb->ki_pos > i_size_read(inode)) {
380 bool zero = false;
381
382 spin_unlock(&ip->i_flags_lock); 383 spin_unlock(&ip->i_flags_lock);
383 if (!drained_dio) { 384 if (!drained_dio) {
384 if (*iolock == XFS_IOLOCK_SHARED) { 385 if (*iolock == XFS_IOLOCK_SHARED) {
@@ -399,7 +400,7 @@ restart:
399 drained_dio = true; 400 drained_dio = true;
400 goto restart; 401 goto restart;
401 } 402 }
402 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero); 403 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), NULL);
403 if (error) 404 if (error)
404 return error; 405 return error;
405 } else 406 } else
@@ -436,7 +437,6 @@ xfs_dio_write_end_io(
436 struct inode *inode = file_inode(iocb->ki_filp); 437 struct inode *inode = file_inode(iocb->ki_filp);
437 struct xfs_inode *ip = XFS_I(inode); 438 struct xfs_inode *ip = XFS_I(inode);
438 loff_t offset = iocb->ki_pos; 439 loff_t offset = iocb->ki_pos;
439 bool update_size = false;
440 int error = 0; 440 int error = 0;
441 441
442 trace_xfs_end_io_direct_write(ip, offset, size); 442 trace_xfs_end_io_direct_write(ip, offset, size);
@@ -447,6 +447,21 @@ xfs_dio_write_end_io(
447 if (size <= 0) 447 if (size <= 0)
448 return size; 448 return size;
449 449
450 if (flags & IOMAP_DIO_COW) {
451 error = xfs_reflink_end_cow(ip, offset, size);
452 if (error)
453 return error;
454 }
455
456 /*
457 * Unwritten conversion updates the in-core isize after extent
458 * conversion but before updating the on-disk size. Updating isize any
459 * earlier allows a racing dio read to find unwritten extents before
460 * they are converted.
461 */
462 if (flags & IOMAP_DIO_UNWRITTEN)
463 return xfs_iomap_write_unwritten(ip, offset, size, true);
464
450 /* 465 /*
451 * We need to update the in-core inode size here so that we don't end up 466 * We need to update the in-core inode size here so that we don't end up
452 * with the on-disk inode size being outside the in-core inode size. We 467 * with the on-disk inode size being outside the in-core inode size. We
@@ -461,20 +476,11 @@ xfs_dio_write_end_io(
461 spin_lock(&ip->i_flags_lock); 476 spin_lock(&ip->i_flags_lock);
462 if (offset + size > i_size_read(inode)) { 477 if (offset + size > i_size_read(inode)) {
463 i_size_write(inode, offset + size); 478 i_size_write(inode, offset + size);
464 update_size = true; 479 spin_unlock(&ip->i_flags_lock);
465 }
466 spin_unlock(&ip->i_flags_lock);
467
468 if (flags & IOMAP_DIO_COW) {
469 error = xfs_reflink_end_cow(ip, offset, size);
470 if (error)
471 return error;
472 }
473
474 if (flags & IOMAP_DIO_UNWRITTEN)
475 error = xfs_iomap_write_unwritten(ip, offset, size);
476 else if (update_size)
477 error = xfs_setfilesize(ip, offset, size); 480 error = xfs_setfilesize(ip, offset, size);
481 } else {
482 spin_unlock(&ip->i_flags_lock);
483 }
478 484
479 return error; 485 return error;
480} 486}
@@ -549,9 +555,10 @@ xfs_file_dio_aio_write(
549 iolock = XFS_IOLOCK_SHARED; 555 iolock = XFS_IOLOCK_SHARED;
550 } 556 }
551 557
552 if (!xfs_ilock_nowait(ip, iolock)) { 558 if (iocb->ki_flags & IOCB_NOWAIT) {
553 if (iocb->ki_flags & IOCB_NOWAIT) 559 if (!xfs_ilock_nowait(ip, iolock))
554 return -EAGAIN; 560 return -EAGAIN;
561 } else {
555 xfs_ilock(ip, iolock); 562 xfs_ilock(ip, iolock);
556 } 563 }
557 564
@@ -603,9 +610,10 @@ xfs_file_dax_write(
603 size_t count; 610 size_t count;
604 loff_t pos; 611 loff_t pos;
605 612
606 if (!xfs_ilock_nowait(ip, iolock)) { 613 if (iocb->ki_flags & IOCB_NOWAIT) {
607 if (iocb->ki_flags & IOCB_NOWAIT) 614 if (!xfs_ilock_nowait(ip, iolock))
608 return -EAGAIN; 615 return -EAGAIN;
616 } else {
609 xfs_ilock(ip, iolock); 617 xfs_ilock(ip, iolock);
610 } 618 }
611 619
@@ -761,7 +769,7 @@ xfs_file_fallocate(
761 enum xfs_prealloc_flags flags = 0; 769 enum xfs_prealloc_flags flags = 0;
762 uint iolock = XFS_IOLOCK_EXCL; 770 uint iolock = XFS_IOLOCK_EXCL;
763 loff_t new_size = 0; 771 loff_t new_size = 0;
764 bool do_file_insert = 0; 772 bool do_file_insert = false;
765 773
766 if (!S_ISREG(inode->i_mode)) 774 if (!S_ISREG(inode->i_mode))
767 return -EINVAL; 775 return -EINVAL;
@@ -822,7 +830,7 @@ xfs_file_fallocate(
822 error = -EINVAL; 830 error = -EINVAL;
823 goto out_unlock; 831 goto out_unlock;
824 } 832 }
825 do_file_insert = 1; 833 do_file_insert = true;
826 } else { 834 } else {
827 flags |= XFS_PREALLOC_SET; 835 flags |= XFS_PREALLOC_SET;
828 836
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 814ed729881d..43cfc07996a4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); 367 return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
368} 368}
369 369
370/* Transform a rtbitmap "record" into a fsmap */
371STATIC int
372xfs_getfsmap_rtdev_rtbitmap_helper(
373 struct xfs_trans *tp,
374 struct xfs_rtalloc_rec *rec,
375 void *priv)
376{
377 struct xfs_mount *mp = tp->t_mountp;
378 struct xfs_getfsmap_info *info = priv;
379 struct xfs_rmap_irec irec;
380 xfs_daddr_t rec_daddr;
381
382 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
383
384 irec.rm_startblock = rec->ar_startblock;
385 irec.rm_blockcount = rec->ar_blockcount;
386 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
387 irec.rm_offset = 0;
388 irec.rm_flags = 0;
389
390 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
391}
392
393/* Transform a bnobt irec into a fsmap */ 370/* Transform a bnobt irec into a fsmap */
394STATIC int 371STATIC int
395xfs_getfsmap_datadev_bnobt_helper( 372xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
475 return xfs_getfsmap_helper(tp, info, &rmap, 0); 452 return xfs_getfsmap_helper(tp, info, &rmap, 0);
476} 453}
477 454
455#ifdef CONFIG_XFS_RT
456/* Transform a rtbitmap "record" into a fsmap */
457STATIC int
458xfs_getfsmap_rtdev_rtbitmap_helper(
459 struct xfs_trans *tp,
460 struct xfs_rtalloc_rec *rec,
461 void *priv)
462{
463 struct xfs_mount *mp = tp->t_mountp;
464 struct xfs_getfsmap_info *info = priv;
465 struct xfs_rmap_irec irec;
466 xfs_daddr_t rec_daddr;
467
468 rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
469
470 irec.rm_startblock = rec->ar_startblock;
471 irec.rm_blockcount = rec->ar_blockcount;
472 irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
473 irec.rm_offset = 0;
474 irec.rm_flags = 0;
475
476 return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
477}
478
478/* Execute a getfsmap query against the realtime device. */ 479/* Execute a getfsmap query against the realtime device. */
479STATIC int 480STATIC int
480__xfs_getfsmap_rtdev( 481__xfs_getfsmap_rtdev(
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap(
561 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, 562 return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query,
562 info); 563 info);
563} 564}
565#endif /* CONFIG_XFS_RT */
564 566
565/* Execute a getfsmap query against the regular data device. */ 567/* Execute a getfsmap query against the regular data device. */
566STATIC int 568STATIC int
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys(
795 return false; 797 return false;
796} 798}
797 799
800/*
801 * There are only two devices if we didn't configure RT devices at build time.
802 */
803#ifdef CONFIG_XFS_RT
798#define XFS_GETFSMAP_DEVS 3 804#define XFS_GETFSMAP_DEVS 3
805#else
806#define XFS_GETFSMAP_DEVS 2
807#endif /* CONFIG_XFS_RT */
808
799/* 809/*
800 * Get filesystem's extents as described in head, and format for 810 * Get filesystem's extents as described in head, and format for
801 * output. Calls formatter to fill the user's buffer until all 811 * output. Calls formatter to fill the user's buffer until all
@@ -853,10 +863,12 @@ xfs_getfsmap(
853 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); 863 handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev);
854 handlers[1].fn = xfs_getfsmap_logdev; 864 handlers[1].fn = xfs_getfsmap_logdev;
855 } 865 }
866#ifdef CONFIG_XFS_RT
856 if (mp->m_rtdev_targp) { 867 if (mp->m_rtdev_targp) {
857 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); 868 handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev);
858 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; 869 handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap;
859 } 870 }
871#endif /* CONFIG_XFS_RT */
860 872
861 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), 873 xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev),
862 xfs_getfsmap_dev_compare); 874 xfs_getfsmap_dev_compare);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5599dda4727a..4ec5b7f45401 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1624,10 +1624,12 @@ xfs_itruncate_extents(
1624 goto out; 1624 goto out;
1625 1625
1626 /* 1626 /*
1627 * Clear the reflink flag if we truncated everything. 1627 * Clear the reflink flag if there are no data fork blocks and
1628 * there are no extents staged in the cow fork.
1628 */ 1629 */
1629 if (ip->i_d.di_nblocks == 0 && xfs_is_reflink_inode(ip)) { 1630 if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
1630 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; 1631 if (ip->i_d.di_nblocks == 0)
1632 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1631 xfs_inode_clear_cowblocks_tag(ip); 1633 xfs_inode_clear_cowblocks_tag(ip);
1632 } 1634 }
1633 1635
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6d0f74ec31e8..9bbc2d7cc8cb 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode(
364 to->di_dmstate = from->di_dmstate; 364 to->di_dmstate = from->di_dmstate;
365 to->di_flags = from->di_flags; 365 to->di_flags = from->di_flags;
366 366
367 /* log a dummy value to ensure log structure is fully initialised */
368 to->di_next_unlinked = NULLAGINO;
369
367 if (from->di_version == 3) { 370 if (from->di_version == 3) {
368 to->di_changecount = inode->i_version; 371 to->di_changecount = inode->i_version;
369 to->di_crtime.t_sec = from->di_crtime.t_sec; 372 to->di_crtime.t_sec = from->di_crtime.t_sec;
@@ -404,6 +407,11 @@ xfs_inode_item_format_core(
404 * the second with the on-disk inode structure, and a possible third and/or 407 * the second with the on-disk inode structure, and a possible third and/or
405 * fourth with the inode data/extents/b-tree root and inode attributes 408 * fourth with the inode data/extents/b-tree root and inode attributes
406 * data/extents/b-tree root. 409 * data/extents/b-tree root.
410 *
411 * Note: Always use the 64 bit inode log format structure so we don't
412 * leave an uninitialised hole in the format item on 64 bit systems. Log
413 * recovery on 32 bit systems handles this just fine, so there's no reason
414 * for not using an initialising the properly padded structure all the time.
407 */ 415 */
408STATIC void 416STATIC void
409xfs_inode_item_format( 417xfs_inode_item_format(
@@ -412,8 +420,8 @@ xfs_inode_item_format(
412{ 420{
413 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 421 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
414 struct xfs_inode *ip = iip->ili_inode; 422 struct xfs_inode *ip = iip->ili_inode;
415 struct xfs_inode_log_format *ilf;
416 struct xfs_log_iovec *vecp = NULL; 423 struct xfs_log_iovec *vecp = NULL;
424 struct xfs_inode_log_format *ilf;
417 425
418 ASSERT(ip->i_d.di_version > 1); 426 ASSERT(ip->i_d.di_version > 1);
419 427
@@ -425,7 +433,17 @@ xfs_inode_item_format(
425 ilf->ilf_boffset = ip->i_imap.im_boffset; 433 ilf->ilf_boffset = ip->i_imap.im_boffset;
426 ilf->ilf_fields = XFS_ILOG_CORE; 434 ilf->ilf_fields = XFS_ILOG_CORE;
427 ilf->ilf_size = 2; /* format + core */ 435 ilf->ilf_size = 2; /* format + core */
428 xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); 436
437 /*
438 * make sure we don't leak uninitialised data into the log in the case
439 * when we don't log every field in the inode.
440 */
441 ilf->ilf_dsize = 0;
442 ilf->ilf_asize = 0;
443 ilf->ilf_pad = 0;
444 uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null);
445
446 xlog_finish_iovec(lv, vecp, sizeof(*ilf));
429 447
430 xfs_inode_item_format_core(ip, lv, &vecp); 448 xfs_inode_item_format_core(ip, lv, &vecp);
431 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); 449 xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
@@ -745,7 +763,7 @@ xfs_iflush_done(
745 */ 763 */
746 iip = INODE_ITEM(blip); 764 iip = INODE_ITEM(blip);
747 if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) || 765 if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
748 lip->li_flags & XFS_LI_FAILED) 766 (blip->li_flags & XFS_LI_FAILED))
749 need_ail++; 767 need_ail++;
750 768
751 blip = next; 769 blip = next;
@@ -855,44 +873,29 @@ xfs_istale_done(
855} 873}
856 874
857/* 875/*
858 * convert an xfs_inode_log_format struct from either 32 or 64 bit versions 876 * convert an xfs_inode_log_format struct from the old 32 bit version
859 * (which can have different field alignments) to the native version 877 * (which can have different field alignments) to the native 64 bit version
860 */ 878 */
861int 879int
862xfs_inode_item_format_convert( 880xfs_inode_item_format_convert(
863 xfs_log_iovec_t *buf, 881 struct xfs_log_iovec *buf,
864 xfs_inode_log_format_t *in_f) 882 struct xfs_inode_log_format *in_f)
865{ 883{
866 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { 884 struct xfs_inode_log_format_32 *in_f32 = buf->i_addr;
867 xfs_inode_log_format_32_t *in_f32 = buf->i_addr; 885
868 886 if (buf->i_len != sizeof(*in_f32))
869 in_f->ilf_type = in_f32->ilf_type; 887 return -EFSCORRUPTED;
870 in_f->ilf_size = in_f32->ilf_size; 888
871 in_f->ilf_fields = in_f32->ilf_fields; 889 in_f->ilf_type = in_f32->ilf_type;
872 in_f->ilf_asize = in_f32->ilf_asize; 890 in_f->ilf_size = in_f32->ilf_size;
873 in_f->ilf_dsize = in_f32->ilf_dsize; 891 in_f->ilf_fields = in_f32->ilf_fields;
874 in_f->ilf_ino = in_f32->ilf_ino; 892 in_f->ilf_asize = in_f32->ilf_asize;
875 /* copy biggest field of ilf_u */ 893 in_f->ilf_dsize = in_f32->ilf_dsize;
876 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); 894 in_f->ilf_ino = in_f32->ilf_ino;
877 in_f->ilf_blkno = in_f32->ilf_blkno; 895 /* copy biggest field of ilf_u */
878 in_f->ilf_len = in_f32->ilf_len; 896 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
879 in_f->ilf_boffset = in_f32->ilf_boffset; 897 in_f->ilf_blkno = in_f32->ilf_blkno;
880 return 0; 898 in_f->ilf_len = in_f32->ilf_len;
881 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ 899 in_f->ilf_boffset = in_f32->ilf_boffset;
882 xfs_inode_log_format_64_t *in_f64 = buf->i_addr; 900 return 0;
883
884 in_f->ilf_type = in_f64->ilf_type;
885 in_f->ilf_size = in_f64->ilf_size;
886 in_f->ilf_fields = in_f64->ilf_fields;
887 in_f->ilf_asize = in_f64->ilf_asize;
888 in_f->ilf_dsize = in_f64->ilf_dsize;
889 in_f->ilf_ino = in_f64->ilf_ino;
890 /* copy biggest field of ilf_u */
891 uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
892 in_f->ilf_blkno = in_f64->ilf_blkno;
893 in_f->ilf_len = in_f64->ilf_len;
894 in_f->ilf_boffset = in_f64->ilf_boffset;
895 return 0;
896 }
897 return -EFSCORRUPTED;
898} 901}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 5049e8ab6e30..aa75389be8cf 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1088,6 +1088,7 @@ xfs_ioctl_setattr_dax_invalidate(
1088 int *join_flags) 1088 int *join_flags)
1089{ 1089{
1090 struct inode *inode = VFS_I(ip); 1090 struct inode *inode = VFS_I(ip);
1091 struct super_block *sb = inode->i_sb;
1091 int error; 1092 int error;
1092 1093
1093 *join_flags = 0; 1094 *join_flags = 0;
@@ -1100,7 +1101,7 @@ xfs_ioctl_setattr_dax_invalidate(
1100 if (fa->fsx_xflags & FS_XFLAG_DAX) { 1101 if (fa->fsx_xflags & FS_XFLAG_DAX) {
1101 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) 1102 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
1102 return -EINVAL; 1103 return -EINVAL;
1103 if (ip->i_mount->m_sb.sb_blocksize != PAGE_SIZE) 1104 if (bdev_dax_supported(sb, sb->s_blocksize) < 0)
1104 return -EINVAL; 1105 return -EINVAL;
1105 } 1106 }
1106 1107
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index a1909bc064e9..f179bdf1644d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -829,7 +829,8 @@ int
829xfs_iomap_write_unwritten( 829xfs_iomap_write_unwritten(
830 xfs_inode_t *ip, 830 xfs_inode_t *ip,
831 xfs_off_t offset, 831 xfs_off_t offset,
832 xfs_off_t count) 832 xfs_off_t count,
833 bool update_isize)
833{ 834{
834 xfs_mount_t *mp = ip->i_mount; 835 xfs_mount_t *mp = ip->i_mount;
835 xfs_fileoff_t offset_fsb; 836 xfs_fileoff_t offset_fsb;
@@ -840,6 +841,7 @@ xfs_iomap_write_unwritten(
840 xfs_trans_t *tp; 841 xfs_trans_t *tp;
841 xfs_bmbt_irec_t imap; 842 xfs_bmbt_irec_t imap;
842 struct xfs_defer_ops dfops; 843 struct xfs_defer_ops dfops;
844 struct inode *inode = VFS_I(ip);
843 xfs_fsize_t i_size; 845 xfs_fsize_t i_size;
844 uint resblks; 846 uint resblks;
845 int error; 847 int error;
@@ -899,7 +901,8 @@ xfs_iomap_write_unwritten(
899 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 901 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
900 if (i_size > offset + count) 902 if (i_size > offset + count)
901 i_size = offset + count; 903 i_size = offset + count;
902 904 if (update_isize && i_size > i_size_read(inode))
905 i_size_write(inode, i_size);
903 i_size = xfs_new_eof(ip, i_size); 906 i_size = xfs_new_eof(ip, i_size);
904 if (i_size) { 907 if (i_size) {
905 ip->i_d.di_size = i_size; 908 ip->i_d.di_size = i_size;
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 00db3ecea084..ee535065c5d0 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -27,7 +27,7 @@ int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
27 struct xfs_bmbt_irec *, int); 27 struct xfs_bmbt_irec *, int);
28int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t, 28int xfs_iomap_write_allocate(struct xfs_inode *, int, xfs_off_t,
29 struct xfs_bmbt_irec *); 29 struct xfs_bmbt_irec *);
30int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t); 30int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
31 31
32void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, 32void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
33 struct xfs_bmbt_irec *); 33 struct xfs_bmbt_irec *);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c5107c7bc4bf..dc95a49d62e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2515,7 +2515,7 @@ next_lv:
2515 if (lv) 2515 if (lv)
2516 vecp = lv->lv_iovecp; 2516 vecp = lv->lv_iovecp;
2517 } 2517 }
2518 if (record_cnt == 0 && ordered == false) { 2518 if (record_cnt == 0 && !ordered) {
2519 if (!lv) 2519 if (!lv)
2520 return 0; 2520 return 0;
2521 break; 2521 break;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ea7d4b4e50d0..e9727d0a541a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -704,7 +704,7 @@ xfs_mountfs(
704 xfs_set_maxicount(mp); 704 xfs_set_maxicount(mp);
705 705
706 /* enable fail_at_unmount as default */ 706 /* enable fail_at_unmount as default */
707 mp->m_fail_unmount = 1; 707 mp->m_fail_unmount = true;
708 708
709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 709 error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
710 if (error) 710 if (error)
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 0c381d71b242..0492436a053f 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void)
134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); 134 XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28);
135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); 135 XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8);
136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); 136 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52);
137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); 137 XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56);
138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); 138 XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20);
139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); 139 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16);
140} 140}
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 2f2dc3c09ad0..4246876df7b7 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -274,7 +274,7 @@ xfs_fs_commit_blocks(
274 (end - 1) >> PAGE_SHIFT); 274 (end - 1) >> PAGE_SHIFT);
275 WARN_ON_ONCE(error); 275 WARN_ON_ONCE(error);
276 276
277 error = xfs_iomap_write_unwritten(ip, start, length); 277 error = xfs_iomap_write_unwritten(ip, start, length, false);
278 if (error) 278 if (error)
279 goto out_drop_iolock; 279 goto out_drop_iolock;
280 } 280 }
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 3246815c24d6..37e603bf1591 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -736,7 +736,13 @@ xfs_reflink_end_cow(
736 /* If there is a hole at end_fsb - 1 go to the previous extent */ 736 /* If there is a hole at end_fsb - 1 go to the previous extent */
737 if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) || 737 if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
738 got.br_startoff > end_fsb) { 738 got.br_startoff > end_fsb) {
739 ASSERT(idx > 0); 739 /*
740 * In case of racing, overlapping AIO writes no COW extents
741 * might be left by the time I/O completes for the loser of
742 * the race. In that case we are done.
743 */
744 if (idx <= 0)
745 goto out_cancel;
740 xfs_iext_get_extent(ifp, --idx, &got); 746 xfs_iext_get_extent(ifp, --idx, &got);
741 } 747 }
742 748
@@ -809,6 +815,7 @@ next_extent:
809 815
810out_defer: 816out_defer:
811 xfs_defer_cancel(&dfops); 817 xfs_defer_cancel(&dfops);
818out_cancel:
812 xfs_trans_cancel(tp); 819 xfs_trans_cancel(tp);
813 xfs_iunlock(ip, XFS_ILOCK_EXCL); 820 xfs_iunlock(ip, XFS_ILOCK_EXCL);
814out: 821out:
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c996f4ae4a5f..f663022353c0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
1637 1637
1638 /* version 5 superblocks support inode version counters. */ 1638 /* version 5 superblocks support inode version counters. */
1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1639 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1640 sb->s_flags |= MS_I_VERSION; 1640 sb->s_flags |= SB_I_VERSION;
1641 1641
1642 if (mp->m_flags & XFS_MOUNT_DAX) { 1642 if (mp->m_flags & XFS_MOUNT_DAX) {
1643 xfs_warn(mp, 1643 xfs_warn(mp,
@@ -1654,6 +1654,16 @@ xfs_fs_fill_super(
1654 "DAX and reflink have not been tested together!"); 1654 "DAX and reflink have not been tested together!");
1655 } 1655 }
1656 1656
1657 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1658 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1659
1660 if (!blk_queue_discard(q)) {
1661 xfs_warn(mp, "mounting with \"discard\" option, but "
1662 "the device does not support discard");
1663 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1664 }
1665 }
1666
1657 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { 1667 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1658 if (mp->m_sb.sb_rblocks) { 1668 if (mp->m_sb.sb_rblocks) {
1659 xfs_alert(mp, 1669 xfs_alert(mp,
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 0504ef8f3aa3..976f8ac26665 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -115,15 +115,35 @@ do { \
115 (__ret); \ 115 (__ret); \
116}) 116})
117 117
118#define this_cpu_generic_read(pcp) \ 118#define __this_cpu_generic_read_nopreempt(pcp) \
119({ \ 119({ \
120 typeof(pcp) __ret; \ 120 typeof(pcp) __ret; \
121 preempt_disable_notrace(); \ 121 preempt_disable_notrace(); \
122 __ret = raw_cpu_generic_read(pcp); \ 122 __ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
123 preempt_enable_notrace(); \ 123 preempt_enable_notrace(); \
124 __ret; \ 124 __ret; \
125}) 125})
126 126
127#define __this_cpu_generic_read_noirq(pcp) \
128({ \
129 typeof(pcp) __ret; \
130 unsigned long __flags; \
131 raw_local_irq_save(__flags); \
132 __ret = raw_cpu_generic_read(pcp); \
133 raw_local_irq_restore(__flags); \
134 __ret; \
135})
136
137#define this_cpu_generic_read(pcp) \
138({ \
139 typeof(pcp) __ret; \
140 if (__native_word(pcp)) \
141 __ret = __this_cpu_generic_read_nopreempt(pcp); \
142 else \
143 __ret = __this_cpu_generic_read_noirq(pcp); \
144 __ret; \
145})
146
127#define this_cpu_generic_to_op(pcp, val, op) \ 147#define this_cpu_generic_to_op(pcp, val, op) \
128do { \ 148do { \
129 unsigned long __flags; \ 149 unsigned long __flags; \
diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h
new file mode 100644
index 000000000000..e1a643e4bc91
--- /dev/null
+++ b/include/dt-bindings/reset/snps,hsdk-reset.h
@@ -0,0 +1,17 @@
1/**
2 * This header provides index for the HSDK reset controller.
3 */
4#ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
5#define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK
6
7#define HSDK_APB_RESET 0
8#define HSDK_AXI_RESET 1
9#define HSDK_ETH_RESET 2
10#define HSDK_USB_RESET 3
11#define HSDK_SDIO_RESET 4
12#define HSDK_HDMI_RESET 5
13#define HSDK_GFX_RESET 6
14#define HSDK_DMAC_RESET 7
15#define HSDK_EBI_RESET 8
16
17#endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/
diff --git a/include/dt-bindings/reset/snps,hsdk-v1-reset.h b/include/dt-bindings/reset/snps,hsdk-v1-reset.h
deleted file mode 100644
index d898c89b7123..000000000000
--- a/include/dt-bindings/reset/snps,hsdk-v1-reset.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/**
2 * This header provides index for the HSDK v1 reset controller.
3 */
4#ifndef _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
5#define _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1
6
7#define HSDK_V1_APB_RESET 0
8#define HSDK_V1_AXI_RESET 1
9#define HSDK_V1_ETH_RESET 2
10#define HSDK_V1_USB_RESET 3
11#define HSDK_V1_SDIO_RESET 4
12#define HSDK_V1_HDMI_RESET 5
13#define HSDK_V1_GFX_RESET 6
14#define HSDK_V1_DMAC_RESET 7
15#define HSDK_V1_EBI_RESET 8
16
17#endif /*_DT_BINDINGS_RESET_CONTROLLER_HSDK_V1*/
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fb44d6180ca0..18d05b5491f3 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
131 int executable_stack); 131 int executable_stack);
132extern int transfer_args_to_stack(struct linux_binprm *bprm, 132extern int transfer_args_to_stack(struct linux_binprm *bprm,
133 unsigned long *sp_location); 133 unsigned long *sp_location);
134extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); 134extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm);
135extern int copy_strings_kernel(int argc, const char *const *argv, 135extern int copy_strings_kernel(int argc, const char *const *argv,
136 struct linux_binprm *bprm); 136 struct linux_binprm *bprm);
137extern int prepare_bprm_creds(struct linux_binprm *bprm); 137extern int prepare_bprm_creds(struct linux_binprm *bprm);
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 8b9d6fff002d..f2deb71958b2 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -92,7 +92,7 @@
92/** 92/**
93 * FIELD_GET() - extract a bitfield element 93 * FIELD_GET() - extract a bitfield element
94 * @_mask: shifted mask defining the field's length and position 94 * @_mask: shifted mask defining the field's length and position
95 * @_reg: 32bit value of entire bitfield 95 * @_reg: value of entire bitfield
96 * 96 *
97 * FIELD_GET() extracts the field specified by @_mask from the 97 * FIELD_GET() extracts the field specified by @_mask from the
98 * bitfield passed in as @_reg by masking and shifting it down. 98 * bitfield passed in as @_reg by masking and shifting it down.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 460294bb0fa5..02fa42d24b52 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -551,6 +551,7 @@ struct request_queue {
551 int node; 551 int node;
552#ifdef CONFIG_BLK_DEV_IO_TRACE 552#ifdef CONFIG_BLK_DEV_IO_TRACE
553 struct blk_trace *blk_trace; 553 struct blk_trace *blk_trace;
554 struct mutex blk_trace_mutex;
554#endif 555#endif
555 /* 556 /*
556 * for flush operations 557 * for flush operations
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8390859e79e7..f1af7d63d678 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
368{ 368{
369} 369}
370 370
371static inline int bpf_obj_get_user(const char __user *pathname)
372{
373 return -EOPNOTSUPP;
374}
375
371static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, 376static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
372 u32 key) 377 u32 key)
373{ 378{
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index c8dae555eccf..446b24cac67d 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *,
232 loff_t, unsigned, unsigned, 232 loff_t, unsigned, unsigned,
233 struct page *, void *); 233 struct page *, void *);
234void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); 234void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
235void clean_page_buffers(struct page *page);
235int cont_write_begin(struct file *, struct address_space *, loff_t, 236int cont_write_begin(struct file *, struct address_space *, loff_t,
236 unsigned, unsigned, struct page **, void **, 237 unsigned, unsigned, struct page **, void **,
237 get_block_t *, loff_t *); 238 get_block_t *, loff_t *);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index f24bfb2b9a2d..6d508767e144 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -3,8 +3,27 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6/*
7 * CPU-up CPU-down
8 *
9 * BP AP BP AP
10 *
11 * OFFLINE OFFLINE
12 * | ^
13 * v |
14 * BRINGUP_CPU->AP_OFFLINE BRINGUP_CPU <- AP_IDLE_DEAD (idle thread/play_dead)
15 * | AP_OFFLINE
16 * v (IRQ-off) ,---------------^
17 * AP_ONLNE | (stop_machine)
18 * | TEARDOWN_CPU <- AP_ONLINE_IDLE
19 * | ^
20 * v |
21 * AP_ACTIVE AP_ACTIVE
22 */
23
6enum cpuhp_state { 24enum cpuhp_state {
7 CPUHP_OFFLINE, 25 CPUHP_INVALID = -1,
26 CPUHP_OFFLINE = 0,
8 CPUHP_CREATE_THREADS, 27 CPUHP_CREATE_THREADS,
9 CPUHP_PERF_PREPARE, 28 CPUHP_PERF_PREPARE,
10 CPUHP_PERF_X86_PREPARE, 29 CPUHP_PERF_X86_PREPARE,
diff --git a/include/linux/device.h b/include/linux/device.h
index 1d2607923a24..66fe271c2544 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -307,8 +307,6 @@ struct driver_attribute {
307 size_t count); 307 size_t count);
308}; 308};
309 309
310#define DRIVER_ATTR(_name, _mode, _show, _store) \
311 struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
312#define DRIVER_ATTR_RW(_name) \ 310#define DRIVER_ATTR_RW(_name) \
313 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) 311 struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
314#define DRIVER_ATTR_RO(_name) \ 312#define DRIVER_ATTR_RO(_name) \
diff --git a/include/linux/filter.h b/include/linux/filter.h
index d29e58fde364..818a0b26249e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
728void bpf_warn_invalid_xdp_action(u32 act); 728void bpf_warn_invalid_xdp_action(u32 act);
729void bpf_warn_invalid_xdp_redirect(u32 ifindex); 729void bpf_warn_invalid_xdp_redirect(u32 ifindex);
730 730
731struct sock *do_sk_redirect_map(void); 731struct sock *do_sk_redirect_map(struct sk_buff *skb);
732 732
733#ifdef CONFIG_BPF_JIT 733#ifdef CONFIG_BPF_JIT
734extern int bpf_jit_enable; 734extern int bpf_jit_enable;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 339e73742e73..13dab191a23e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -403,7 +403,7 @@ struct address_space {
403 unsigned long flags; /* error bits */ 403 unsigned long flags; /* error bits */
404 spinlock_t private_lock; /* for use by the address_space */ 404 spinlock_t private_lock; /* for use by the address_space */
405 gfp_t gfp_mask; /* implicit gfp mask for allocations */ 405 gfp_t gfp_mask; /* implicit gfp mask for allocations */
406 struct list_head private_list; /* ditto */ 406 struct list_head private_list; /* for use by the address_space */
407 void *private_data; /* ditto */ 407 void *private_data; /* ditto */
408 errseq_t wb_err; 408 errseq_t wb_err;
409} __attribute__((aligned(sizeof(long)))) __randomize_layout; 409} __attribute__((aligned(sizeof(long)))) __randomize_layout;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index c458d7b7ad19..6431087816ba 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1403 const int *srv_version, int srv_vercnt, 1403 const int *srv_version, int srv_vercnt,
1404 int *nego_fw_version, int *nego_srv_version); 1404 int *nego_fw_version, int *nego_srv_version);
1405 1405
1406void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); 1406void hv_process_channel_removal(u32 relid);
1407 1407
1408void vmbus_setevent(struct vmbus_channel *channel); 1408void vmbus_setevent(struct vmbus_channel *channel);
1409/* 1409/*
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 4837157da0dc..9ae41cdd0d4c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
73int tap_get_minor(dev_t major, struct tap_dev *tap); 73int tap_get_minor(dev_t major, struct tap_dev *tap);
74void tap_free_minor(dev_t major, struct tap_dev *tap); 74void tap_free_minor(dev_t major, struct tap_dev *tap);
75int tap_queue_resize(struct tap_dev *tap); 75int tap_queue_resize(struct tap_dev *tap);
76int tap_create_cdev(struct cdev *tap_cdev, 76int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
77 dev_t *tap_major, const char *device_name); 77 const char *device_name, struct module *module);
78void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); 78void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
79 79
80#endif /*_LINUX_IF_TAP_H_*/ 80#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 5ba430cc9a87..1fc7abd28b0b 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
111int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg, 111int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
112 unsigned int size, unsigned int *val); 112 unsigned int size, unsigned int *val);
113 113
114int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
115 unsigned int reset_length);
116
114int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, 117int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
115 const struct iio_chan_spec *chan, int *val); 118 const struct iio_chan_spec *chan, int *val);
116int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta, 119int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
diff --git a/include/linux/input.h b/include/linux/input.h
index fb5e23c7ed98..7c7516eb7d76 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -234,6 +234,10 @@ struct input_dev {
234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" 234#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
235#endif 235#endif
236 236
237#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
238#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
239#endif
240
237#define INPUT_DEVICE_ID_MATCH_DEVICE \ 241#define INPUT_DEVICE_ID_MATCH_DEVICE \
238 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) 242 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
239#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ 243#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
469int input_set_keycode(struct input_dev *dev, 473int input_set_keycode(struct input_dev *dev,
470 const struct input_keymap_entry *ke); 474 const struct input_keymap_entry *ke);
471 475
476bool input_match_device_id(const struct input_dev *dev,
477 const struct input_device_id *id);
478
472void input_enable_softrepeat(struct input_dev *dev, int delay, int period); 479void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
473 480
474extern struct class input_class; 481extern struct class input_class;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index a7f2ac689d29..41b8c5757859 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -167,11 +167,11 @@ struct iommu_resv_region {
167 * @map: map a physically contiguous memory region to an iommu domain 167 * @map: map a physically contiguous memory region to an iommu domain
168 * @unmap: unmap a physically contiguous memory region from an iommu domain 168 * @unmap: unmap a physically contiguous memory region from an iommu domain
169 * @map_sg: map a scatter-gather list of physically contiguous memory chunks 169 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
170 * to an iommu domain
170 * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain 171 * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
171 * @tlb_range_add: Add a given iova range to the flush queue for this domain 172 * @tlb_range_add: Add a given iova range to the flush queue for this domain
172 * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 173 * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
173 * queue 174 * queue
174 * to an iommu domain
175 * @iova_to_phys: translate iova to physical address 175 * @iova_to_phys: translate iova to physical address
176 * @add_device: add device to iommu grouping 176 * @add_device: add device to iommu grouping
177 * @remove_device: remove device from iommu grouping 177 * @remove_device: remove device from iommu grouping
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d4728bf6a537..5ad10948ea95 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
1009void irq_gc_unmask_enable_reg(struct irq_data *d); 1009void irq_gc_unmask_enable_reg(struct irq_data *d);
1010void irq_gc_ack_set_bit(struct irq_data *d); 1010void irq_gc_ack_set_bit(struct irq_data *d);
1011void irq_gc_ack_clr_bit(struct irq_data *d); 1011void irq_gc_ack_clr_bit(struct irq_data *d);
1012void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); 1012void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
1013void irq_gc_eoi(struct irq_data *d); 1013void irq_gc_eoi(struct irq_data *d);
1014int irq_gc_set_wake(struct irq_data *d, unsigned int on); 1014int irq_gc_set_wake(struct irq_data *d, unsigned int on);
1015 1015
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1ea576c8126f..14b74f22d43c 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -372,6 +372,8 @@
372#define GITS_BASER_ENTRY_SIZE_SHIFT (48) 372#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
373#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) 373#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
374#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) 374#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
375#define GITS_BASER_PHYS_52_to_48(phys) \
376 (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
375#define GITS_BASER_SHAREABILITY_SHIFT (10) 377#define GITS_BASER_SHAREABILITY_SHIFT (10)
376#define GITS_BASER_InnerShareable \ 378#define GITS_BASER_InnerShareable \
377 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) 379 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0ad4c3044cf9..91189bb0c818 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -44,6 +44,12 @@
44 44
45#define STACK_MAGIC 0xdeadbeef 45#define STACK_MAGIC 0xdeadbeef
46 46
47/**
48 * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
49 * @x: value to repeat
50 *
51 * NOTE: @x is not checked for > 0xff; larger values produce odd results.
52 */
47#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) 53#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
48 54
49/* @a is a power of 2 value */ 55/* @a is a power of 2 value */
@@ -57,6 +63,10 @@
57#define READ 0 63#define READ 0
58#define WRITE 1 64#define WRITE 1
59 65
66/**
67 * ARRAY_SIZE - get the number of elements in array @arr
68 * @arr: array to be sized
69 */
60#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) 70#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
61 71
62#define u64_to_user_ptr(x) ( \ 72#define u64_to_user_ptr(x) ( \
@@ -76,7 +86,15 @@
76#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) 86#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
77#define round_down(x, y) ((x) & ~__round_mask(x, y)) 87#define round_down(x, y) ((x) & ~__round_mask(x, y))
78 88
89/**
90 * FIELD_SIZEOF - get the size of a struct's field
91 * @t: the target struct
92 * @f: the target struct's field
93 * Return: the size of @f in the struct definition without having a
94 * declared instance of @t.
95 */
79#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 96#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
97
80#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP 98#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
81 99
82#define DIV_ROUND_DOWN_ULL(ll, d) \ 100#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -107,7 +125,7 @@
107/* 125/*
108 * Divide positive or negative dividend by positive or negative divisor 126 * Divide positive or negative dividend by positive or negative divisor
109 * and round to closest integer. Result is undefined for negative 127 * and round to closest integer. Result is undefined for negative
110 * divisors if he dividend variable type is unsigned and for negative 128 * divisors if the dividend variable type is unsigned and for negative
111 * dividends if the divisor variable type is unsigned. 129 * dividends if the divisor variable type is unsigned.
112 */ 130 */
113#define DIV_ROUND_CLOSEST(x, divisor)( \ 131#define DIV_ROUND_CLOSEST(x, divisor)( \
@@ -247,13 +265,13 @@ extern int _cond_resched(void);
247 * @ep_ro: right open interval endpoint 265 * @ep_ro: right open interval endpoint
248 * 266 *
249 * Perform a "reciprocal multiplication" in order to "scale" a value into 267 * Perform a "reciprocal multiplication" in order to "scale" a value into
250 * range [0, ep_ro), where the upper interval endpoint is right-open. 268 * range [0, @ep_ro), where the upper interval endpoint is right-open.
251 * This is useful, e.g. for accessing a index of an array containing 269 * This is useful, e.g. for accessing a index of an array containing
252 * ep_ro elements, for example. Think of it as sort of modulus, only that 270 * @ep_ro elements, for example. Think of it as sort of modulus, only that
253 * the result isn't that of modulo. ;) Note that if initial input is a 271 * the result isn't that of modulo. ;) Note that if initial input is a
254 * small value, then result will return 0. 272 * small value, then result will return 0.
255 * 273 *
256 * Return: a result based on val in interval [0, ep_ro). 274 * Return: a result based on @val in interval [0, @ep_ro).
257 */ 275 */
258static inline u32 reciprocal_scale(u32 val, u32 ep_ro) 276static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
259{ 277{
@@ -618,8 +636,8 @@ do { \
618 * trace_printk - printf formatting in the ftrace buffer 636 * trace_printk - printf formatting in the ftrace buffer
619 * @fmt: the printf format for printing 637 * @fmt: the printf format for printing
620 * 638 *
621 * Note: __trace_printk is an internal function for trace_printk and 639 * Note: __trace_printk is an internal function for trace_printk() and
622 * the @ip is passed in via the trace_printk macro. 640 * the @ip is passed in via the trace_printk() macro.
623 * 641 *
624 * This function allows a kernel developer to debug fast path sections 642 * This function allows a kernel developer to debug fast path sections
625 * that printk is not appropriate for. By scattering in various 643 * that printk is not appropriate for. By scattering in various
@@ -629,7 +647,7 @@ do { \
629 * This is intended as a debugging tool for the developer only. 647 * This is intended as a debugging tool for the developer only.
630 * Please refrain from leaving trace_printks scattered around in 648 * Please refrain from leaving trace_printks scattered around in
631 * your code. (Extra memory is used for special buffers that are 649 * your code. (Extra memory is used for special buffers that are
632 * allocated when trace_printk() is used) 650 * allocated when trace_printk() is used.)
633 * 651 *
634 * A little optization trick is done here. If there's only one 652 * A little optization trick is done here. If there's only one
635 * argument, there's no need to scan the string for printf formats. 653 * argument, there's no need to scan the string for printf formats.
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
681 * the @ip is passed in via the trace_puts macro. 699 * the @ip is passed in via the trace_puts macro.
682 * 700 *
683 * This is similar to trace_printk() but is made for those really fast 701 * This is similar to trace_printk() but is made for those really fast
684 * paths that a developer wants the least amount of "Heisenbug" affects, 702 * paths that a developer wants the least amount of "Heisenbug" effects,
685 * where the processing of the print format is still too much. 703 * where the processing of the print format is still too much.
686 * 704 *
687 * This function allows a kernel developer to debug fast path sections 705 * This function allows a kernel developer to debug fast path sections
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
692 * This is intended as a debugging tool for the developer only. 710 * This is intended as a debugging tool for the developer only.
693 * Please refrain from leaving trace_puts scattered around in 711 * Please refrain from leaving trace_puts scattered around in
694 * your code. (Extra memory is used for special buffers that are 712 * your code. (Extra memory is used for special buffers that are
695 * allocated when trace_puts() is used) 713 * allocated when trace_puts() is used.)
696 * 714 *
697 * Returns: 0 if nothing was written, positive # if string was. 715 * Returns: 0 if nothing was written, positive # if string was.
698 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) 716 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
771 t2 min2 = (y); \ 789 t2 min2 = (y); \
772 (void) (&min1 == &min2); \ 790 (void) (&min1 == &min2); \
773 min1 < min2 ? min1 : min2; }) 791 min1 < min2 ? min1 : min2; })
792
793/**
794 * min - return minimum of two values of the same or compatible types
795 * @x: first value
796 * @y: second value
797 */
774#define min(x, y) \ 798#define min(x, y) \
775 __min(typeof(x), typeof(y), \ 799 __min(typeof(x), typeof(y), \
776 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 800 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
781 t2 max2 = (y); \ 805 t2 max2 = (y); \
782 (void) (&max1 == &max2); \ 806 (void) (&max1 == &max2); \
783 max1 > max2 ? max1 : max2; }) 807 max1 > max2 ? max1 : max2; })
808
809/**
810 * max - return maximum of two values of the same or compatible types
811 * @x: first value
812 * @y: second value
813 */
784#define max(x, y) \ 814#define max(x, y) \
785 __max(typeof(x), typeof(y), \ 815 __max(typeof(x), typeof(y), \
786 __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ 816 __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \
787 x, y) 817 x, y)
788 818
819/**
820 * min3 - return minimum of three values
821 * @x: first value
822 * @y: second value
823 * @z: third value
824 */
789#define min3(x, y, z) min((typeof(x))min(x, y), z) 825#define min3(x, y, z) min((typeof(x))min(x, y), z)
826
827/**
828 * max3 - return maximum of three values
829 * @x: first value
830 * @y: second value
831 * @z: third value
832 */
790#define max3(x, y, z) max((typeof(x))max(x, y), z) 833#define max3(x, y, z) max((typeof(x))max(x, y), z)
791 834
792/** 835/**
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
805 * @lo: lowest allowable value 848 * @lo: lowest allowable value
806 * @hi: highest allowable value 849 * @hi: highest allowable value
807 * 850 *
808 * This macro does strict typechecking of lo/hi to make sure they are of the 851 * This macro does strict typechecking of @lo/@hi to make sure they are of the
809 * same type as val. See the unnecessary pointer comparisons. 852 * same type as @val. See the unnecessary pointer comparisons.
810 */ 853 */
811#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) 854#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
812 855
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
816 * 859 *
817 * Or not use min/max/clamp at all, of course. 860 * Or not use min/max/clamp at all, of course.
818 */ 861 */
862
863/**
864 * min_t - return minimum of two values, using the specified type
865 * @type: data type to use
866 * @x: first value
867 * @y: second value
868 */
819#define min_t(type, x, y) \ 869#define min_t(type, x, y) \
820 __min(type, type, \ 870 __min(type, type, \
821 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 871 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
822 x, y) 872 x, y)
823 873
874/**
875 * max_t - return maximum of two values, using the specified type
876 * @type: data type to use
877 * @x: first value
878 * @y: second value
879 */
824#define max_t(type, x, y) \ 880#define max_t(type, x, y) \
825 __max(type, type, \ 881 __max(type, type, \
826 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ 882 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
834 * @hi: maximum allowable value 890 * @hi: maximum allowable value
835 * 891 *
836 * This macro does no typechecking and uses temporary variables of type 892 * This macro does no typechecking and uses temporary variables of type
837 * 'type' to make all the comparisons. 893 * @type to make all the comparisons.
838 */ 894 */
839#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) 895#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
840 896
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
845 * @hi: maximum allowable value 901 * @hi: maximum allowable value
846 * 902 *
847 * This macro does no typechecking and uses temporary variables of whatever 903 * This macro does no typechecking and uses temporary variables of whatever
848 * type the input argument 'val' is. This is useful when val is an unsigned 904 * type the input argument @val is. This is useful when @val is an unsigned
849 * type and min and max are literals that will otherwise be assigned a signed 905 * type and @lo and @hi are literals that will otherwise be assigned a signed
850 * integer type. 906 * integer type.
851 */ 907 */
852#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) 908#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
853 909
854 910
855/* 911/**
856 * swap - swap value of @a and @b 912 * swap - swap values of @a and @b
913 * @a: first value
914 * @b: second value
857 */ 915 */
858#define swap(a, b) \ 916#define swap(a, b) \
859 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) 917 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
diff --git a/include/linux/key.h b/include/linux/key.h
index 044114185120..8a15cabe928d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -138,6 +138,11 @@ struct key_restriction {
138 struct key_type *keytype; 138 struct key_type *keytype;
139}; 139};
140 140
141enum key_state {
142 KEY_IS_UNINSTANTIATED,
143 KEY_IS_POSITIVE, /* Positively instantiated */
144};
145
141/*****************************************************************************/ 146/*****************************************************************************/
142/* 147/*
143 * authentication token / access credential / keyring 148 * authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
169 * - may not match RCU dereferenced payload 174 * - may not match RCU dereferenced payload
170 * - payload should contain own length 175 * - payload should contain own length
171 */ 176 */
177 short state; /* Key state (+) or rejection error (-) */
172 178
173#ifdef KEY_DEBUGGING 179#ifdef KEY_DEBUGGING
174 unsigned magic; 180 unsigned magic;
@@ -176,17 +182,16 @@ struct key {
176#endif 182#endif
177 183
178 unsigned long flags; /* status flags (change with bitops) */ 184 unsigned long flags; /* status flags (change with bitops) */
179#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ 185#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
180#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ 186#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
181#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ 187#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
182#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ 188#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
183#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ 189#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
184#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ 190#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
185#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ 191#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
186#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ 192#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
187#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ 193#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
188#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ 194#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
189#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
190 195
191 /* the key type and key description string 196 /* the key type and key description string
192 * - the desc is used to match a key against search criteria 197 * - the desc is used to match a key against search criteria
@@ -212,7 +217,6 @@ struct key {
212 struct list_head name_link; 217 struct list_head name_link;
213 struct assoc_array keys; 218 struct assoc_array keys;
214 }; 219 };
215 int reject_error;
216 }; 220 };
217 221
218 /* This is set on a keyring to restrict the addition of a link to a key 222 /* This is set on a keyring to restrict the addition of a link to a key
@@ -243,6 +247,7 @@ extern struct key *key_alloc(struct key_type *type,
243#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */ 247#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
244#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */ 248#define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
245#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */ 249#define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
250#define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
246 251
247extern void key_revoke(struct key *key); 252extern void key_revoke(struct key *key);
248extern void key_invalidate(struct key *key); 253extern void key_invalidate(struct key *key);
@@ -351,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
351#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ 356#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
352#define KEY_NEED_ALL 0x3f /* All the above permissions */ 357#define KEY_NEED_ALL 0x3f /* All the above permissions */
353 358
359static inline short key_read_state(const struct key *key)
360{
361 /* Barrier versus mark_key_instantiated(). */
362 return smp_load_acquire(&key->state);
363}
364
354/** 365/**
355 * key_is_instantiated - Determine if a key has been positively instantiated 366 * key_is_positive - Determine if a key has been positively instantiated
356 * @key: The key to check. 367 * @key: The key to check.
357 * 368 *
358 * Return true if the specified key has been positively instantiated, false 369 * Return true if the specified key has been positively instantiated, false
359 * otherwise. 370 * otherwise.
360 */ 371 */
361static inline bool key_is_instantiated(const struct key *key) 372static inline bool key_is_positive(const struct key *key)
373{
374 return key_read_state(key) == KEY_IS_POSITIVE;
375}
376
377static inline bool key_is_negative(const struct key *key)
362{ 378{
363 return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 379 return key_read_state(key) < 0;
364 !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
365} 380}
366 381
367#define dereference_key_rcu(KEY) \ 382#define dereference_key_rcu(KEY) \
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
31 struct mbus_dram_window { 31 struct mbus_dram_window {
32 u8 cs_index; 32 u8 cs_index;
33 u8 mbus_attr; 33 u8 mbus_attr;
34 u32 base; 34 u64 base;
35 u32 size; 35 u64 size;
36 } cs[4]; 36 } cs[4];
37}; 37};
38 38
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index eaf4ad209c8f..e32dbc4934db 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -980,7 +980,6 @@ enum mlx5_cap_type {
980 MLX5_CAP_RESERVED, 980 MLX5_CAP_RESERVED,
981 MLX5_CAP_VECTOR_CALC, 981 MLX5_CAP_VECTOR_CALC,
982 MLX5_CAP_QOS, 982 MLX5_CAP_QOS,
983 MLX5_CAP_FPGA,
984 /* NUM OF CAP Types */ 983 /* NUM OF CAP Types */
985 MLX5_CAP_NUM 984 MLX5_CAP_NUM
986}; 985};
@@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups {
1110 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) 1109 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1111 1110
1112#define MLX5_CAP_FPGA(mdev, cap) \ 1111#define MLX5_CAP_FPGA(mdev, cap) \
1113 MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) 1112 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1114 1113
1115#define MLX5_CAP64_FPGA(mdev, cap) \ 1114#define MLX5_CAP64_FPGA(mdev, cap) \
1116 MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) 1115 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1117 1116
1118enum { 1117enum {
1119 MLX5_CMD_STAT_OK = 0x0, 1118 MLX5_CMD_STAT_OK = 0x0,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 02ff700e4f30..401c8972cc3a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -774,6 +774,7 @@ struct mlx5_core_dev {
774 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; 774 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
775 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; 775 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
776 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; 776 u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
777 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
777 } caps; 778 } caps;
778 phys_addr_t iseg_base; 779 phys_addr_t iseg_base;
779 struct mlx5_init_seg __iomem *iseg; 780 struct mlx5_init_seg __iomem *iseg;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a528b35a022e..69772347f866 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
327 u8 reserved_at_80[0x18]; 327 u8 reserved_at_80[0x18];
328 u8 log_max_destination[0x8]; 328 u8 log_max_destination[0x8];
329 329
330 u8 reserved_at_a0[0x18]; 330 u8 log_max_flow_counter[0x8];
331 u8 reserved_at_a8[0x10];
331 u8 log_max_flow[0x8]; 332 u8 log_max_flow[0x8];
332 333
333 u8 reserved_at_c0[0x40]; 334 u8 reserved_at_c0[0x40];
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..c59af8ab753a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, 157int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
158 u8 prio, u8 *tc); 158 u8 prio, u8 *tc);
159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); 159int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
160int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
161 u8 tc, u8 *tc_group);
160int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); 162int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
161int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, 163int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
162 u8 tc, u8 *bw_pct); 164 u8 tc, u8 *bw_pct);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f8c10d336e42..065d99deb847 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp);
240 240
241#if defined(CONFIG_X86_INTEL_MPX) 241#if defined(CONFIG_X86_INTEL_MPX)
242/* MPX specific bounds table or bounds directory */ 242/* MPX specific bounds table or bounds directory */
243# define VM_MPX VM_HIGH_ARCH_BIT_4 243# define VM_MPX VM_HIGH_ARCH_4
244#else 244#else
245# define VM_MPX VM_NONE 245# define VM_MPX VM_NONE
246#endif 246#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..1861ea8dba77 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,6 +445,9 @@ struct mm_struct {
445 unsigned long flags; /* Must use atomic bitops to access the bits */ 445 unsigned long flags; /* Must use atomic bitops to access the bits */
446 446
447 struct core_state *core_state; /* coredumping support */ 447 struct core_state *core_state; /* coredumping support */
448#ifdef CONFIG_MEMBARRIER
449 atomic_t membarrier_state;
450#endif
448#ifdef CONFIG_AIO 451#ifdef CONFIG_AIO
449 spinlock_t ioctx_lock; 452 spinlock_t ioctx_lock;
450 struct kioctx_table __rcu *ioctx_table; 453 struct kioctx_table __rcu *ioctx_table;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index f3f2d07feb2a..9a43763a68ad 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -316,7 +316,7 @@ struct mmc_host {
316#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ 316#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
317#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ 317#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
318#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ 318#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
319#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ 319/* (1 << 21) is free for reuse */
320#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ 320#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
321#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ 321#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
322#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ 322#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 7b2e31b1745a..6866e8126982 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void);
400 400
401#else /* CONFIG_MMU_NOTIFIER */ 401#else /* CONFIG_MMU_NOTIFIER */
402 402
403static inline int mm_has_notifiers(struct mm_struct *mm)
404{
405 return 0;
406}
407
403static inline void mmu_notifier_release(struct mm_struct *mm) 408static inline void mmu_notifier_release(struct mm_struct *mm)
404{ 409{
405} 410}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 356a814e7c8e..c8f89417740b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1094#error Allocator MAX_ORDER exceeds SECTION_SIZE 1094#error Allocator MAX_ORDER exceeds SECTION_SIZE
1095#endif 1095#endif
1096 1096
1097#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) 1097static inline unsigned long pfn_to_section_nr(unsigned long pfn)
1098#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) 1098{
1099 return pfn >> PFN_SECTION_SHIFT;
1100}
1101static inline unsigned long section_nr_to_pfn(unsigned long sec)
1102{
1103 return sec << PFN_SECTION_SHIFT;
1104}
1099 1105
1100#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) 1106#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
1101#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1107#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 694cebb50f72..2657f9f51536 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
293#define INPUT_DEVICE_ID_SND_MAX 0x07 293#define INPUT_DEVICE_ID_SND_MAX 0x07
294#define INPUT_DEVICE_ID_FF_MAX 0x7f 294#define INPUT_DEVICE_ID_FF_MAX 0x7f
295#define INPUT_DEVICE_ID_SW_MAX 0x0f 295#define INPUT_DEVICE_ID_SW_MAX 0x0f
296#define INPUT_DEVICE_ID_PROP_MAX 0x1f
296 297
297#define INPUT_DEVICE_ID_MATCH_BUS 1 298#define INPUT_DEVICE_ID_MATCH_BUS 1
298#define INPUT_DEVICE_ID_MATCH_VENDOR 2 299#define INPUT_DEVICE_ID_MATCH_VENDOR 2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
308#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 309#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
309#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 310#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
310#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 311#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
312#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
311 313
312struct input_device_id { 314struct input_device_id {
313 315
@@ -327,6 +329,7 @@ struct input_device_id {
327 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; 329 kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
328 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; 330 kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
329 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; 331 kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
332 kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
330 333
331 kernel_ulong_t driver_info; 334 kernel_ulong_t driver_info;
332}; 335};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f535779d9dc1..2eaac7d75af4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
3694 unsigned char name_assign_type, 3694 unsigned char name_assign_type,
3695 void (*setup)(struct net_device *), 3695 void (*setup)(struct net_device *),
3696 unsigned int txqs, unsigned int rxqs); 3696 unsigned int txqs, unsigned int rxqs);
3697int dev_get_valid_name(struct net *net, struct net_device *dev,
3698 const char *name);
3699
3697#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ 3700#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
3698 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) 3701 alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
3699 3702
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2c2a5514b0df..528b24c78308 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -108,9 +108,10 @@ struct ebt_table {
108 108
109#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ 109#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
110 ~(__alignof__(struct _xt_align)-1)) 110 ~(__alignof__(struct _xt_align)-1))
111extern struct ebt_table *ebt_register_table(struct net *net, 111extern int ebt_register_table(struct net *net,
112 const struct ebt_table *table, 112 const struct ebt_table *table,
113 const struct nf_hook_ops *); 113 const struct nf_hook_ops *ops,
114 struct ebt_table **res);
114extern void ebt_unregister_table(struct net *net, struct ebt_table *table, 115extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
115 const struct nf_hook_ops *); 116 const struct nf_hook_ops *);
116extern unsigned int ebt_do_table(struct sk_buff *skb, 117extern unsigned int ebt_do_table(struct sk_buff *skb,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a36abe2da13e..27e249ed7c5c 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -12,11 +12,31 @@
12 12
13#ifdef CONFIG_LOCKUP_DETECTOR 13#ifdef CONFIG_LOCKUP_DETECTOR
14void lockup_detector_init(void); 14void lockup_detector_init(void);
15void lockup_detector_soft_poweroff(void);
16void lockup_detector_cleanup(void);
17bool is_hardlockup(void);
18
19extern int watchdog_user_enabled;
20extern int nmi_watchdog_user_enabled;
21extern int soft_watchdog_user_enabled;
22extern int watchdog_thresh;
23extern unsigned long watchdog_enabled;
24
25extern struct cpumask watchdog_cpumask;
26extern unsigned long *watchdog_cpumask_bits;
27#ifdef CONFIG_SMP
28extern int sysctl_softlockup_all_cpu_backtrace;
29extern int sysctl_hardlockup_all_cpu_backtrace;
15#else 30#else
16static inline void lockup_detector_init(void) 31#define sysctl_softlockup_all_cpu_backtrace 0
17{ 32#define sysctl_hardlockup_all_cpu_backtrace 0
18} 33#endif /* !CONFIG_SMP */
19#endif 34
35#else /* CONFIG_LOCKUP_DETECTOR */
36static inline void lockup_detector_init(void) { }
37static inline void lockup_detector_soft_poweroff(void) { }
38static inline void lockup_detector_cleanup(void) { }
39#endif /* !CONFIG_LOCKUP_DETECTOR */
20 40
21#ifdef CONFIG_SOFTLOCKUP_DETECTOR 41#ifdef CONFIG_SOFTLOCKUP_DETECTOR
22extern void touch_softlockup_watchdog_sched(void); 42extern void touch_softlockup_watchdog_sched(void);
@@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void);
24extern void touch_softlockup_watchdog_sync(void); 44extern void touch_softlockup_watchdog_sync(void);
25extern void touch_all_softlockup_watchdogs(void); 45extern void touch_all_softlockup_watchdogs(void);
26extern unsigned int softlockup_panic; 46extern unsigned int softlockup_panic;
27extern int soft_watchdog_enabled;
28extern atomic_t watchdog_park_in_progress;
29#else 47#else
30static inline void touch_softlockup_watchdog_sched(void) 48static inline void touch_softlockup_watchdog_sched(void) { }
31{ 49static inline void touch_softlockup_watchdog(void) { }
32} 50static inline void touch_softlockup_watchdog_sync(void) { }
33static inline void touch_softlockup_watchdog(void) 51static inline void touch_all_softlockup_watchdogs(void) { }
34{
35}
36static inline void touch_softlockup_watchdog_sync(void)
37{
38}
39static inline void touch_all_softlockup_watchdogs(void)
40{
41}
42#endif 52#endif
43 53
44#ifdef CONFIG_DETECT_HUNG_TASK 54#ifdef CONFIG_DETECT_HUNG_TASK
45void reset_hung_task_detector(void); 55void reset_hung_task_detector(void);
46#else 56#else
47static inline void reset_hung_task_detector(void) 57static inline void reset_hung_task_detector(void) { }
48{
49}
50#endif 58#endif
51 59
52/* 60/*
@@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void)
54 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - 62 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
55 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. 63 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
56 * 64 *
57 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' 65 * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
58 * are variables that are only used as an 'interface' between the parameters 66 * 'soft_watchdog_user_enabled' are variables that are only used as an
59 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The 67 * 'interface' between the parameters in /proc/sys/kernel and the internal
60 * 'watchdog_thresh' variable is handled differently because its value is not 68 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
61 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' 69 * handled differently because its value is not boolean, and the lockup
62 * is equal zero. 70 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
63 */ 71 */
64#define NMI_WATCHDOG_ENABLED_BIT 0 72#define NMI_WATCHDOG_ENABLED_BIT 0
65#define SOFT_WATCHDOG_ENABLED_BIT 1 73#define SOFT_WATCHDOG_ENABLED_BIT 1
@@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic;
73static inline void hardlockup_detector_disable(void) {} 81static inline void hardlockup_detector_disable(void) {}
74#endif 82#endif
75 83
84#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
85# define NMI_WATCHDOG_SYSCTL_PERM 0644
86#else
87# define NMI_WATCHDOG_SYSCTL_PERM 0444
88#endif
89
76#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) 90#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
77extern void arch_touch_nmi_watchdog(void); 91extern void arch_touch_nmi_watchdog(void);
92extern void hardlockup_detector_perf_stop(void);
93extern void hardlockup_detector_perf_restart(void);
94extern void hardlockup_detector_perf_disable(void);
95extern void hardlockup_detector_perf_enable(void);
96extern void hardlockup_detector_perf_cleanup(void);
97extern int hardlockup_detector_perf_init(void);
78#else 98#else
79#if !defined(CONFIG_HAVE_NMI_WATCHDOG) 99static inline void hardlockup_detector_perf_stop(void) { }
100static inline void hardlockup_detector_perf_restart(void) { }
101static inline void hardlockup_detector_perf_disable(void) { }
102static inline void hardlockup_detector_perf_enable(void) { }
103static inline void hardlockup_detector_perf_cleanup(void) { }
104# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
105static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
80static inline void arch_touch_nmi_watchdog(void) {} 106static inline void arch_touch_nmi_watchdog(void) {}
107# else
108static inline int hardlockup_detector_perf_init(void) { return 0; }
109# endif
81#endif 110#endif
82#endif 111
112void watchdog_nmi_stop(void);
113void watchdog_nmi_start(void);
114int watchdog_nmi_probe(void);
83 115
84/** 116/**
85 * touch_nmi_watchdog - restart NMI watchdog timeout. 117 * touch_nmi_watchdog - restart NMI watchdog timeout.
86 * 118 *
87 * If the architecture supports the NMI watchdog, touch_nmi_watchdog() 119 * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
88 * may be used to reset the timeout - for code which intentionally 120 * may be used to reset the timeout - for code which intentionally
89 * disables interrupts for a long time. This call is stateless. 121 * disables interrupts for a long time. This call is stateless.
@@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
153u64 hw_nmi_get_sample_period(int watchdog_thresh); 185u64 hw_nmi_get_sample_period(int watchdog_thresh);
154#endif 186#endif
155 187
156#ifdef CONFIG_LOCKUP_DETECTOR
157extern int nmi_watchdog_enabled;
158extern int watchdog_user_enabled;
159extern int watchdog_thresh;
160extern unsigned long watchdog_enabled;
161extern struct cpumask watchdog_cpumask;
162extern unsigned long *watchdog_cpumask_bits;
163extern int __read_mostly watchdog_suspended;
164#ifdef CONFIG_SMP
165extern int sysctl_softlockup_all_cpu_backtrace;
166extern int sysctl_hardlockup_all_cpu_backtrace;
167#else
168#define sysctl_softlockup_all_cpu_backtrace 0
169#define sysctl_hardlockup_all_cpu_backtrace 0
170#endif
171
172#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ 188#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
173 defined(CONFIG_HARDLOCKUP_DETECTOR) 189 defined(CONFIG_HARDLOCKUP_DETECTOR)
174void watchdog_update_hrtimer_threshold(u64 period); 190void watchdog_update_hrtimer_threshold(u64 period);
@@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period);
176static inline void watchdog_update_hrtimer_threshold(u64 period) { } 192static inline void watchdog_update_hrtimer_threshold(u64 period) { }
177#endif 193#endif
178 194
179extern bool is_hardlockup(void);
180struct ctl_table; 195struct ctl_table;
181extern int proc_watchdog(struct ctl_table *, int , 196extern int proc_watchdog(struct ctl_table *, int ,
182 void __user *, size_t *, loff_t *); 197 void __user *, size_t *, loff_t *);
@@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int ,
188 void __user *, size_t *, loff_t *); 203 void __user *, size_t *, loff_t *);
189extern int proc_watchdog_cpumask(struct ctl_table *, int, 204extern int proc_watchdog_cpumask(struct ctl_table *, int,
190 void __user *, size_t *, loff_t *); 205 void __user *, size_t *, loff_t *);
191extern int lockup_detector_suspend(void);
192extern void lockup_detector_resume(void);
193#else
194static inline int lockup_detector_suspend(void)
195{
196 return 0;
197}
198
199static inline void lockup_detector_resume(void)
200{
201}
202#endif
203 206
204#ifdef CONFIG_HAVE_ACPI_APEI_NMI 207#ifdef CONFIG_HAVE_ACPI_APEI_NMI
205#include <asm/nmi.h> 208#include <asm/nmi.h>
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 9c5cb4480806..a726f96010d5 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,11 +346,6 @@ struct nvme_fc_remote_port {
346 * indicating an FC transport Aborted status. 346 * indicating an FC transport Aborted status.
347 * Entrypoint is Mandatory. 347 * Entrypoint is Mandatory.
348 * 348 *
349 * @defer_rcv: Called by the transport to signal the LLLD that it has
350 * begun processing of a previously received NVME CMD IU. The LLDD
351 * is now free to re-use the rcv buffer associated with the
352 * nvmefc_tgt_fcp_req.
353 *
354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 349 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
355 * supports for cpu affinitization. 350 * supports for cpu affinitization.
356 * Value is Mandatory. Must be at least 1. 351 * Value is Mandatory. Must be at least 1.
@@ -806,11 +801,19 @@ struct nvmet_fc_target_port {
806 * outstanding operation (if there was one) to complete, then will 801 * outstanding operation (if there was one) to complete, then will
807 * call the fcp_req_release() callback to return the command's 802 * call the fcp_req_release() callback to return the command's
808 * exchange context back to the LLDD. 803 * exchange context back to the LLDD.
804 * Entrypoint is Mandatory.
809 * 805 *
810 * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req 806 * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
811 * to the LLDD after all operations on the fcp operation are complete. 807 * to the LLDD after all operations on the fcp operation are complete.
812 * This may be due to the command completing or upon completion of 808 * This may be due to the command completing or upon completion of
813 * abort cleanup. 809 * abort cleanup.
810 * Entrypoint is Mandatory.
811 *
812 * @defer_rcv: Called by the transport to signal the LLLD that it has
813 * begun processing of a previously received NVME CMD IU. The LLDD
814 * is now free to re-use the rcv buffer associated with the
815 * nvmefc_tgt_fcp_req.
816 * Entrypoint is Optional.
814 * 817 *
815 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 818 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
816 * supports for cpu affinitization. 819 * supports for cpu affinitization.
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 87723c86f136..9310ce77d8e1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -471,12 +471,14 @@ enum nvme_opcode {
471 * 471 *
472 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block 472 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
473 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block 473 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
474 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
474 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation 475 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
475 * request subtype 476 * request subtype
476 */ 477 */
477enum { 478enum {
478 NVME_SGL_FMT_ADDRESS = 0x00, 479 NVME_SGL_FMT_ADDRESS = 0x00,
479 NVME_SGL_FMT_OFFSET = 0x01, 480 NVME_SGL_FMT_OFFSET = 0x01,
481 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
480 NVME_SGL_FMT_INVALIDATE = 0x0f, 482 NVME_SGL_FMT_INVALIDATE = 0x0f,
481}; 483};
482 484
@@ -490,12 +492,16 @@ enum {
490 * 492 *
491 * For struct nvme_keyed_sgl_desc: 493 * For struct nvme_keyed_sgl_desc:
492 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor 494 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
495 *
496 * Transport-specific SGL types:
497 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
493 */ 498 */
494enum { 499enum {
495 NVME_SGL_FMT_DATA_DESC = 0x00, 500 NVME_SGL_FMT_DATA_DESC = 0x00,
496 NVME_SGL_FMT_SEG_DESC = 0x02, 501 NVME_SGL_FMT_SEG_DESC = 0x02,
497 NVME_SGL_FMT_LAST_SEG_DESC = 0x03, 502 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
498 NVME_KEY_SGL_FMT_DATA_DESC = 0x04, 503 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
504 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
499}; 505};
500 506
501struct nvme_sgl_desc { 507struct nvme_sgl_desc {
@@ -1127,19 +1133,6 @@ enum {
1127 NVME_SC_UNWRITTEN_BLOCK = 0x287, 1133 NVME_SC_UNWRITTEN_BLOCK = 0x287,
1128 1134
1129 NVME_SC_DNR = 0x4000, 1135 NVME_SC_DNR = 0x4000,
1130
1131
1132 /*
1133 * FC Transport-specific error status values for NVME commands
1134 *
1135 * Transport-specific status code values must be in the range 0xB0..0xBF
1136 */
1137
1138 /* Generic FC failure - catchall */
1139 NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
1140
1141 /* I/O failure due to FC ABTS'd */
1142 NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
1143}; 1136};
1144 1137
1145struct nvme_completion { 1138struct nvme_completion {
diff --git a/include/linux/of.h b/include/linux/of.h
index cfc34117fc92..b240ed69dc96 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
734 return NULL; 734 return NULL;
735} 735}
736 736
737static inline int of_n_addr_cells(struct device_node *np)
738{
739 return 0;
740
741}
742static inline int of_n_size_cells(struct device_node *np)
743{
744 return 0;
745}
746
737static inline int of_property_read_u64(const struct device_node *np, 747static inline int of_property_read_u64(const struct device_node *np,
738 const char *propname, u64 *out_value) 748 const char *propname, u64 *out_value)
739{ 749{
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f68c58a93dd0..f4f8ee5a7362 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1685,6 +1685,8 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1685 1685
1686#define dev_is_pci(d) (false) 1686#define dev_is_pci(d) (false)
1687#define dev_is_pf(d) (false) 1687#define dev_is_pf(d) (false)
1688static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
1689{ return false; }
1688#endif /* CONFIG_PCI */ 1690#endif /* CONFIG_PCI */
1689 1691
1690/* Include architecture-dependent settings and functions */ 1692/* Include architecture-dependent settings and functions */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..2bea1d5e9930 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
276#define list_entry_rcu(ptr, type, member) \ 276#define list_entry_rcu(ptr, type, member) \
277 container_of(lockless_dereference(ptr), type, member) 277 container_of(lockless_dereference(ptr), type, member)
278 278
279/** 279/*
280 * Where are list_empty_rcu() and list_first_entry_rcu()? 280 * Where are list_empty_rcu() and list_first_entry_rcu()?
281 * 281 *
282 * Implementing those functions following their counterparts list_empty() and 282 * Implementing those functions following their counterparts list_empty() and
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de50d8a4cf41..1a9f70d44af9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
523 * Return the value of the specified RCU-protected pointer, but omit 523 * Return the value of the specified RCU-protected pointer, but omit
524 * both the smp_read_barrier_depends() and the READ_ONCE(). This 524 * both the smp_read_barrier_depends() and the READ_ONCE(). This
525 * is useful in cases where update-side locks prevent the value of the 525 * is useful in cases where update-side locks prevent the value of the
526 * pointer from changing. Please note that this primitive does -not- 526 * pointer from changing. Please note that this primitive does *not*
527 * prevent the compiler from repeating this reference or combining it 527 * prevent the compiler from repeating this reference or combining it
528 * with other references, so it should not be used without protection 528 * with other references, so it should not be used without protection
529 * of appropriate locks. 529 * of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
568 * is handed off from RCU to some other synchronization mechanism, for 568 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 569 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 570 * kill_dependency(). It could be used as follows:
571 * 571 * ``
572 * rcu_read_lock(); 572 * rcu_read_lock();
573 * p = rcu_dereference(gp); 573 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 574 * long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 579 * p = rcu_pointer_handoff(p);
580 * } 580 * }
581 * rcu_read_unlock(); 581 * rcu_read_unlock();
582 *``
582 */ 583 */
583#define rcu_pointer_handoff(p) (p) 584#define rcu_pointer_handoff(p) (p)
584 585
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
778 779
779/** 780/**
780 * RCU_INIT_POINTER() - initialize an RCU protected pointer 781 * RCU_INIT_POINTER() - initialize an RCU protected pointer
782 * @p: The pointer to be initialized.
783 * @v: The value to initialized the pointer to.
781 * 784 *
782 * Initialize an RCU-protected pointer in special cases where readers 785 * Initialize an RCU-protected pointer in special cases where readers
783 * do not need ordering constraints on the CPU or the compiler. These 786 * do not need ordering constraints on the CPU or the compiler. These
784 * special cases are: 787 * special cases are:
785 * 788 *
786 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- 789 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
787 * 2. The caller has taken whatever steps are required to prevent 790 * 2. The caller has taken whatever steps are required to prevent
788 * RCU readers from concurrently accessing this pointer -or- 791 * RCU readers from concurrently accessing this pointer *or*
789 * 3. The referenced data structure has already been exposed to 792 * 3. The referenced data structure has already been exposed to
790 * readers either at compile time or via rcu_assign_pointer() -and- 793 * readers either at compile time or via rcu_assign_pointer() *and*
791 * a. You have not made -any- reader-visible changes to 794 *
792 * this structure since then -or- 795 * a. You have not made *any* reader-visible changes to
796 * this structure since then *or*
793 * b. It is OK for readers accessing this structure from its 797 * b. It is OK for readers accessing this structure from its
794 * new location to see the old state of the structure. (For 798 * new location to see the old state of the structure. (For
795 * example, the changes were to statistical counters or to 799 * example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
805 * by a single external-to-structure RCU-protected pointer, then you may 809 * by a single external-to-structure RCU-protected pointer, then you may
806 * use RCU_INIT_POINTER() to initialize the internal RCU-protected 810 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
807 * pointers, but you must use rcu_assign_pointer() to initialize the 811 * pointers, but you must use rcu_assign_pointer() to initialize the
808 * external-to-structure pointer -after- you have completely initialized 812 * external-to-structure pointer *after* you have completely initialized
809 * the reader-accessible portions of the linked structure. 813 * the reader-accessible portions of the linked structure.
810 * 814 *
811 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no 815 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
819 823
820/** 824/**
821 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer 825 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
826 * @p: The pointer to be initialized.
827 * @v: The value to initialized the pointer to.
822 * 828 *
823 * GCC-style initialization for an RCU-protected pointer in a structure field. 829 * GCC-style initialization for an RCU-protected pointer in a structure field.
824 */ 830 */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 92fb8dd5a9e4..26a7df4e558c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -65,25 +65,23 @@ struct task_group;
65 */ 65 */
66 66
67/* Used in tsk->state: */ 67/* Used in tsk->state: */
68#define TASK_RUNNING 0 68#define TASK_RUNNING 0x0000
69#define TASK_INTERRUPTIBLE 1 69#define TASK_INTERRUPTIBLE 0x0001
70#define TASK_UNINTERRUPTIBLE 2 70#define TASK_UNINTERRUPTIBLE 0x0002
71#define __TASK_STOPPED 4 71#define __TASK_STOPPED 0x0004
72#define __TASK_TRACED 8 72#define __TASK_TRACED 0x0008
73/* Used in tsk->exit_state: */ 73/* Used in tsk->exit_state: */
74#define EXIT_DEAD 16 74#define EXIT_DEAD 0x0010
75#define EXIT_ZOMBIE 32 75#define EXIT_ZOMBIE 0x0020
76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 76#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
77/* Used in tsk->state again: */ 77/* Used in tsk->state again: */
78#define TASK_DEAD 64 78#define TASK_PARKED 0x0040
79#define TASK_WAKEKILL 128 79#define TASK_DEAD 0x0080
80#define TASK_WAKING 256 80#define TASK_WAKEKILL 0x0100
81#define TASK_PARKED 512 81#define TASK_WAKING 0x0200
82#define TASK_NOLOAD 1024 82#define TASK_NOLOAD 0x0400
83#define TASK_NEW 2048 83#define TASK_NEW 0x0800
84#define TASK_STATE_MAX 4096 84#define TASK_STATE_MAX 0x1000
85
86#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
87 85
88/* Convenience macros for the sake of set_current_state: */ 86/* Convenience macros for the sake of set_current_state: */
89#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 87#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -99,7 +97,8 @@ struct task_group;
99/* get_task_state(): */ 97/* get_task_state(): */
100#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ 98#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
101 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ 99 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
102 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) 100 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
101 TASK_PARKED)
103 102
104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) 103#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
105 104
@@ -1243,17 +1242,34 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1243 return task_pgrp_nr_ns(tsk, &init_pid_ns); 1242 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1244} 1243}
1245 1244
1246static inline char task_state_to_char(struct task_struct *task) 1245#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1246#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1247
1248static inline unsigned int __get_task_state(struct task_struct *tsk)
1249{
1250 unsigned int tsk_state = READ_ONCE(tsk->state);
1251 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1252
1253 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1254
1255 if (tsk_state == TASK_IDLE)
1256 state = TASK_REPORT_IDLE;
1257
1258 return fls(state);
1259}
1260
1261static inline char __task_state_to_char(unsigned int state)
1247{ 1262{
1248 const char stat_nam[] = TASK_STATE_TO_CHAR_STR; 1263 static const char state_char[] = "RSDTtXZPI";
1249 unsigned long state = task->state;
1250 1264
1251 state = state ? __ffs(state) + 1 : 0; 1265 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1252 1266
1253 /* Make sure the string lines up properly with the number of task states: */ 1267 return state_char[state];
1254 BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); 1268}
1255 1269
1256 return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; 1270static inline char task_state_to_char(struct task_struct *tsk)
1271{
1272 return __task_state_to_char(__get_task_state(tsk));
1257} 1273}
1258 1274
1259/** 1275/**
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 3a19c253bdb1..ab9bf7b73954 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
84 84
85/* mmput gets rid of the mappings and all user-space */ 85/* mmput gets rid of the mappings and all user-space */
86extern void mmput(struct mm_struct *); 86extern void mmput(struct mm_struct *);
87#ifdef CONFIG_MMU
88/* same as above but performs the slow path from the async context. Can
89 * be called from the atomic context as well
90 */
91void mmput_async(struct mm_struct *);
92#endif
87 93
88/* Grab a reference to a task's mm, if it is not already going away */ 94/* Grab a reference to a task's mm, if it is not already going away */
89extern struct mm_struct *get_task_mm(struct task_struct *task); 95extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -205,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
205 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 211 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
206} 212}
207 213
214#ifdef CONFIG_MEMBARRIER
215enum {
216 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
217 MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
218};
219
220static inline void membarrier_execve(struct task_struct *t)
221{
222 atomic_set(&t->mm->membarrier_state, 0);
223}
224#else
225static inline void membarrier_execve(struct task_struct *t)
226{
227}
228#endif
229
208#endif /* _LINUX_SCHED_MM_H */ 230#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index d7b6dab956ec..7d065abc7a47 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -71,14 +71,6 @@ struct sched_domain_shared {
71 atomic_t ref; 71 atomic_t ref;
72 atomic_t nr_busy_cpus; 72 atomic_t nr_busy_cpus;
73 int has_idle_cores; 73 int has_idle_cores;
74
75 /*
76 * Some variables from the most recent sd_lb_stats for this domain,
77 * used by wake_affine().
78 */
79 unsigned long nr_running;
80 unsigned long load;
81 unsigned long capacity;
82}; 74};
83 75
84struct sched_domain { 76struct sched_domain {
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 82b171e1aa0b..da803dfc7a39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -231,7 +231,7 @@ struct sctp_datahdr {
231 __be32 tsn; 231 __be32 tsn;
232 __be16 stream; 232 __be16 stream;
233 __be16 ssn; 233 __be16 ssn;
234 __be32 ppid; 234 __u32 ppid;
235 __u8 payload[0]; 235 __u8 payload[0];
236}; 236};
237 237
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
716 716
717struct sctp_strreset_outreq { 717struct sctp_strreset_outreq {
718 struct sctp_paramhdr param_hdr; 718 struct sctp_paramhdr param_hdr;
719 __u32 request_seq; 719 __be32 request_seq;
720 __u32 response_seq; 720 __be32 response_seq;
721 __u32 send_reset_at_tsn; 721 __be32 send_reset_at_tsn;
722 __u16 list_of_streams[0]; 722 __be16 list_of_streams[0];
723}; 723};
724 724
725struct sctp_strreset_inreq { 725struct sctp_strreset_inreq {
726 struct sctp_paramhdr param_hdr; 726 struct sctp_paramhdr param_hdr;
727 __u32 request_seq; 727 __be32 request_seq;
728 __u16 list_of_streams[0]; 728 __be16 list_of_streams[0];
729}; 729};
730 730
731struct sctp_strreset_tsnreq { 731struct sctp_strreset_tsnreq {
732 struct sctp_paramhdr param_hdr; 732 struct sctp_paramhdr param_hdr;
733 __u32 request_seq; 733 __be32 request_seq;
734}; 734};
735 735
736struct sctp_strreset_addstrm { 736struct sctp_strreset_addstrm {
737 struct sctp_paramhdr param_hdr; 737 struct sctp_paramhdr param_hdr;
738 __u32 request_seq; 738 __be32 request_seq;
739 __u16 number_of_streams; 739 __be16 number_of_streams;
740 __u16 reserved; 740 __be16 reserved;
741}; 741};
742 742
743enum { 743enum {
@@ -752,16 +752,16 @@ enum {
752 752
753struct sctp_strreset_resp { 753struct sctp_strreset_resp {
754 struct sctp_paramhdr param_hdr; 754 struct sctp_paramhdr param_hdr;
755 __u32 response_seq; 755 __be32 response_seq;
756 __u32 result; 756 __be32 result;
757}; 757};
758 758
759struct sctp_strreset_resptsn { 759struct sctp_strreset_resptsn {
760 struct sctp_paramhdr param_hdr; 760 struct sctp_paramhdr param_hdr;
761 __u32 response_seq; 761 __be32 response_seq;
762 __u32 result; 762 __be32 result;
763 __u32 senders_next_tsn; 763 __be32 senders_next_tsn;
764 __u32 receivers_next_tsn; 764 __be32 receivers_next_tsn;
765}; 765};
766 766
767#endif /* __LINUX_SCTP_H__ */ 767#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index 12910cf19869..c149aa7bedf3 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
55} 55}
56 56
57void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); 57void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
58int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, 58void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
59 const struct cpumask *); 59 const struct cpumask *);
60 60
61#endif 61#endif
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 39af9bc0f653..62be8966e837 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
78 78
79/** 79/**
80 * srcu_read_lock_held - might we be in SRCU read-side critical section? 80 * srcu_read_lock_held - might we be in SRCU read-side critical section?
81 * @sp: The srcu_struct structure to check
81 * 82 *
82 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 83 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
83 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 84 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 73e97a08d3d0..cf30f5022472 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -9,13 +9,16 @@
9/* 9/*
10 * Simple wait queues 10 * Simple wait queues
11 * 11 *
12 * While these are very similar to the other/complex wait queues (wait.h) the 12 * While these are very similar to regular wait queues (wait.h) the most
13 * most important difference is that the simple waitqueue allows for 13 * important difference is that the simple waitqueue allows for deterministic
14 * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold 14 * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
15 * times.
16 * 15 *
17 * In order to make this so, we had to drop a fair number of features of the 16 * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
18 * other waitqueue code; notably: 17 * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
18 * priority task a chance to run.
19 *
20 * Secondly, we had to drop a fair number of features of the other waitqueue
21 * code; notably:
19 * 22 *
20 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; 23 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
21 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right 24 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
@@ -24,12 +27,14 @@
24 * - the exclusive mode; because this requires preserving the list order 27 * - the exclusive mode; because this requires preserving the list order
25 * and this is hard. 28 * and this is hard.
26 * 29 *
27 * - custom wake functions; because you cannot give any guarantees about 30 * - custom wake callback functions; because you cannot give any guarantees
28 * random code. 31 * about random code. This also allows swait to be used in RT, such that
29 * 32 * raw spinlock can be used for the swait queue head.
30 * As a side effect of this; the data structures are slimmer.
31 * 33 *
32 * One would recommend using this wait queue where possible. 34 * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
35 * For all the above, note that simple wait queues should _only_ be used under
36 * very specific realtime constraints -- it is best to stick with the regular
37 * wait queues in most cases.
33 */ 38 */
34 39
35struct task_struct; 40struct task_struct;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 905d769d8ddc..5f7eeab990fe 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -42,7 +42,7 @@ enum {
42#define THREAD_ALIGN THREAD_SIZE 42#define THREAD_ALIGN THREAD_SIZE
43#endif 43#endif
44 44
45#ifdef CONFIG_DEBUG_STACK_USAGE 45#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
46# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ 46# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
47 __GFP_ZERO) 47 __GFP_ZERO)
48#else 48#else
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e6789b8757d5..6383c528b148 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -168,6 +168,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
168#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \ 168#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
169 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED) 169 __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
170 170
171#define TIMER_DATA_TYPE unsigned long
172#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
173
174static inline void timer_setup(struct timer_list *timer,
175 void (*callback)(struct timer_list *),
176 unsigned int flags)
177{
178 __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
179 (TIMER_DATA_TYPE)timer, flags);
180}
181
182#define from_timer(var, callback_timer, timer_fieldname) \
183 container_of(callback_timer, typeof(*var), timer_fieldname)
184
171/** 185/**
172 * timer_pending - is a timer pending? 186 * timer_pending - is a timer pending?
173 * @timer: the timer in question 187 * @timer: the timer in question
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 4e6131cd3f43..ac1a2317941e 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq,
146 fq_flow_get_default_t get_default_func) 146 fq_flow_get_default_t get_default_func)
147{ 147{
148 struct fq_flow *flow; 148 struct fq_flow *flow;
149 bool oom;
149 150
150 lockdep_assert_held(&fq->lock); 151 lockdep_assert_held(&fq->lock);
151 152
@@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq,
167 } 168 }
168 169
169 __skb_queue_tail(&flow->queue, skb); 170 __skb_queue_tail(&flow->queue, skb);
170 171 oom = (fq->memory_usage > fq->memory_limit);
171 if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) { 172 while (fq->backlog > fq->limit || oom) {
172 flow = list_first_entry_or_null(&fq->backlogs, 173 flow = list_first_entry_or_null(&fq->backlogs,
173 struct fq_flow, 174 struct fq_flow,
174 backlogchain); 175 backlogchain);
@@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq,
183 184
184 flow->tin->overlimit++; 185 flow->tin->overlimit++;
185 fq->overlimit++; 186 fq->overlimit++;
186 if (fq->memory_usage > fq->memory_limit) 187 if (oom) {
187 fq->overmemory++; 188 fq->overmemory++;
189 oom = (fq->memory_usage > fq->memory_limit);
190 }
188 } 191 }
189} 192}
190 193
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053dfc78..db8162dd8c0b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -96,7 +96,7 @@ struct inet_request_sock {
96 kmemcheck_bitfield_end(flags); 96 kmemcheck_bitfield_end(flags);
97 u32 ir_mark; 97 u32 ir_mark;
98 union { 98 union {
99 struct ip_options_rcu *opt; 99 struct ip_options_rcu __rcu *ireq_opt;
100#if IS_ENABLED(CONFIG_IPV6) 100#if IS_ENABLED(CONFIG_IPV6)
101 struct { 101 struct {
102 struct ipv6_txoptions *ipv6_opt; 102 struct ipv6_txoptions *ipv6_opt;
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
132 return sk->sk_bound_dev_if; 132 return sk->sk_bound_dev_if;
133} 133}
134 134
135static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
136{
137 return rcu_dereference_check(ireq->ireq_opt,
138 refcount_read(&ireq->req.rsk_refcnt) > 0);
139}
140
135struct inet_cork { 141struct inet_cork {
136 unsigned int flags; 142 unsigned int flags;
137 __be32 addr; 143 __be32 addr;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e51cf5f81597..14c289393071 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
773 */ 773 */
774static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) 774static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
775{ 775{
776 return nla_put(skb, attrtype, sizeof(u8), &value); 776 /* temporary variables to work around GCC PR81715 with asan-stack=1 */
777 u8 tmp = value;
778
779 return nla_put(skb, attrtype, sizeof(u8), &tmp);
777} 780}
778 781
779/** 782/**
@@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
784 */ 787 */
785static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) 788static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
786{ 789{
787 return nla_put(skb, attrtype, sizeof(u16), &value); 790 u16 tmp = value;
791
792 return nla_put(skb, attrtype, sizeof(u16), &tmp);
788} 793}
789 794
790/** 795/**
@@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
795 */ 800 */
796static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) 801static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
797{ 802{
798 return nla_put(skb, attrtype, sizeof(__be16), &value); 803 __be16 tmp = value;
804
805 return nla_put(skb, attrtype, sizeof(__be16), &tmp);
799} 806}
800 807
801/** 808/**
@@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
806 */ 813 */
807static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) 814static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
808{ 815{
809 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); 816 __be16 tmp = value;
817
818 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
810} 819}
811 820
812/** 821/**
@@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
817 */ 826 */
818static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) 827static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
819{ 828{
820 return nla_put(skb, attrtype, sizeof(__le16), &value); 829 __le16 tmp = value;
830
831 return nla_put(skb, attrtype, sizeof(__le16), &tmp);
821} 832}
822 833
823/** 834/**
@@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
828 */ 839 */
829static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) 840static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
830{ 841{
831 return nla_put(skb, attrtype, sizeof(u32), &value); 842 u32 tmp = value;
843
844 return nla_put(skb, attrtype, sizeof(u32), &tmp);
832} 845}
833 846
834/** 847/**
@@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
839 */ 852 */
840static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) 853static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
841{ 854{
842 return nla_put(skb, attrtype, sizeof(__be32), &value); 855 __be32 tmp = value;
856
857 return nla_put(skb, attrtype, sizeof(__be32), &tmp);
843} 858}
844 859
845/** 860/**
@@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
850 */ 865 */
851static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) 866static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
852{ 867{
853 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); 868 __be32 tmp = value;
869
870 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
854} 871}
855 872
856/** 873/**
@@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
861 */ 878 */
862static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) 879static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
863{ 880{
864 return nla_put(skb, attrtype, sizeof(__le32), &value); 881 __le32 tmp = value;
882
883 return nla_put(skb, attrtype, sizeof(__le32), &tmp);
865} 884}
866 885
867/** 886/**
@@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
874static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, 893static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
875 u64 value, int padattr) 894 u64 value, int padattr)
876{ 895{
877 return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr); 896 u64 tmp = value;
897
898 return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr);
878} 899}
879 900
880/** 901/**
@@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype,
887static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, 908static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
888 int padattr) 909 int padattr)
889{ 910{
890 return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr); 911 __be64 tmp = value;
912
913 return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr);
891} 914}
892 915
893/** 916/**
@@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value,
900static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, 923static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
901 int padattr) 924 int padattr)
902{ 925{
903 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value, 926 __be64 tmp = value;
927
928 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp,
904 padattr); 929 padattr);
905} 930}
906 931
@@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value,
914static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, 939static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
915 int padattr) 940 int padattr)
916{ 941{
917 return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr); 942 __le64 tmp = value;
943
944 return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr);
918} 945}
919 946
920/** 947/**
@@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value,
925 */ 952 */
926static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) 953static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
927{ 954{
928 return nla_put(skb, attrtype, sizeof(s8), &value); 955 s8 tmp = value;
956
957 return nla_put(skb, attrtype, sizeof(s8), &tmp);
929} 958}
930 959
931/** 960/**
@@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
936 */ 965 */
937static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) 966static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
938{ 967{
939 return nla_put(skb, attrtype, sizeof(s16), &value); 968 s16 tmp = value;
969
970 return nla_put(skb, attrtype, sizeof(s16), &tmp);
940} 971}
941 972
942/** 973/**
@@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
947 */ 978 */
948static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) 979static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
949{ 980{
950 return nla_put(skb, attrtype, sizeof(s32), &value); 981 s32 tmp = value;
982
983 return nla_put(skb, attrtype, sizeof(s32), &tmp);
951} 984}
952 985
953/** 986/**
@@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
960static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, 993static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value,
961 int padattr) 994 int padattr)
962{ 995{
963 return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr); 996 s64 tmp = value;
997
998 return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr);
964} 999}
965 1000
966/** 1001/**
@@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
1010static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, 1045static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
1011 __be32 addr) 1046 __be32 addr)
1012{ 1047{
1013 return nla_put_be32(skb, attrtype, addr); 1048 __be32 tmp = addr;
1049
1050 return nla_put_be32(skb, attrtype, tmp);
1014} 1051}
1015 1052
1016/** 1053/**
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e80edd8879ef..3009547f3c66 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -2,6 +2,7 @@
2#define __NET_PKT_CLS_H 2#define __NET_PKT_CLS_H
3 3
4#include <linux/pkt_cls.h> 4#include <linux/pkt_cls.h>
5#include <linux/workqueue.h>
5#include <net/sch_generic.h> 6#include <net/sch_generic.h>
6#include <net/act_api.h> 7#include <net/act_api.h>
7 8
@@ -17,6 +18,8 @@ struct tcf_walker {
17int register_tcf_proto_ops(struct tcf_proto_ops *ops); 18int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 19int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
19 20
21bool tcf_queue_work(struct work_struct *work);
22
20#ifdef CONFIG_NET_CLS 23#ifdef CONFIG_NET_CLS
21struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 24struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
22 bool create); 25 bool create);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 65ba335b0e7e..4fc75f7ae23b 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -39,8 +39,8 @@
39 39
40/* This is used to register protocols. */ 40/* This is used to register protocols. */
41struct net_protocol { 41struct net_protocol {
42 void (*early_demux)(struct sk_buff *skb); 42 int (*early_demux)(struct sk_buff *skb);
43 void (*early_demux_handler)(struct sk_buff *skb); 43 int (*early_demux_handler)(struct sk_buff *skb);
44 int (*handler)(struct sk_buff *skb); 44 int (*handler)(struct sk_buff *skb);
45 void (*err_handler)(struct sk_buff *skb, u32 info); 45 void (*err_handler)(struct sk_buff *skb, u32 info);
46 unsigned int no_policy:1, 46 unsigned int no_policy:1,
diff --git a/include/net/route.h b/include/net/route.h
index 57dfc6850d37..d538e6db1afe 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
175 fl4->fl4_gre_key = gre_key; 175 fl4->fl4_gre_key = gre_key;
176 return ip_route_output_key(net, fl4); 176 return ip_route_output_key(net, fl4);
177} 177}
178 178int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
179 u8 tos, struct net_device *dev,
180 struct in_device *in_dev, u32 *itag);
179int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, 181int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
180 u8 tos, struct net_device *devin); 182 u8 tos, struct net_device *devin);
181int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, 183int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 135f5a2dd931..0dec8a23be57 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
10#include <linux/dynamic_queue_limits.h> 10#include <linux/dynamic_queue_limits.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/refcount.h> 12#include <linux/refcount.h>
13#include <linux/workqueue.h>
13#include <net/gen_stats.h> 14#include <net/gen_stats.h>
14#include <net/rtnetlink.h> 15#include <net/rtnetlink.h>
15 16
@@ -271,6 +272,7 @@ struct tcf_chain {
271 272
272struct tcf_block { 273struct tcf_block {
273 struct list_head chain_list; 274 struct list_head chain_list;
275 struct work_struct work;
274}; 276};
275 277
276static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 278static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2db3d3a9ce1d..88233cf8b8d4 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
261 struct sctp_fwdtsn_skip *skiplist); 261 struct sctp_fwdtsn_skip *skiplist);
262struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); 262struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
263struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, 263struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
264 __u16 stream_num, __u16 *stream_list, 264 __u16 stream_num, __be16 *stream_list,
265 bool out, bool in); 265 bool out, bool in);
266struct sctp_chunk *sctp_make_strreset_tsnreq( 266struct sctp_chunk *sctp_make_strreset_tsnreq(
267 const struct sctp_association *asoc); 267 const struct sctp_association *asoc);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index b8c86ec1a8f5..231dc42f1da6 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
130 130
131struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 131struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
132 const struct sctp_association *asoc, __u16 flags, 132 const struct sctp_association *asoc, __u16 flags,
133 __u16 stream_num, __u16 *stream_list, gfp_t gfp); 133 __u16 stream_num, __be16 *stream_list, gfp_t gfp);
134 134
135struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( 135struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
136 const struct sctp_association *asoc, __u16 flags, 136 const struct sctp_association *asoc, __u16 flags,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 7dc131d62ad5..d96b59f45eba 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -74,10 +74,9 @@ struct strparser {
74 u32 unrecov_intr : 1; 74 u32 unrecov_intr : 1;
75 75
76 struct sk_buff **skb_nextp; 76 struct sk_buff **skb_nextp;
77 struct timer_list msg_timer;
78 struct sk_buff *skb_head; 77 struct sk_buff *skb_head;
79 unsigned int need_bytes; 78 unsigned int need_bytes;
80 struct delayed_work delayed_work; 79 struct delayed_work msg_timer_work;
81 struct work_struct work; 80 struct work_struct work;
82 struct strp_stats stats; 81 struct strp_stats stats;
83 struct strp_callbacks cb; 82 struct strp_callbacks cb;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3bc910a9bfc6..e6d0002a1b0b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32);
345 345
346void tcp_shutdown(struct sock *sk, int how); 346void tcp_shutdown(struct sock *sk, int how);
347 347
348void tcp_v4_early_demux(struct sk_buff *skb); 348int tcp_v4_early_demux(struct sk_buff *skb);
349int tcp_v4_rcv(struct sk_buff *skb); 349int tcp_v4_rcv(struct sk_buff *skb);
350 350
351int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 351int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
@@ -840,6 +840,12 @@ struct tcp_skb_cb {
840 struct inet6_skb_parm h6; 840 struct inet6_skb_parm h6;
841#endif 841#endif
842 } header; /* For incoming skbs */ 842 } header; /* For incoming skbs */
843 struct {
844 __u32 key;
845 __u32 flags;
846 struct bpf_map *map;
847 void *data_end;
848 } bpf;
843 }; 849 };
844}; 850};
845 851
@@ -1765,12 +1771,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
1765 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); 1771 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1766} 1772}
1767 1773
1768/* Called when old skb is about to be deleted (to be combined with new skb) */ 1774/* Called when old skb is about to be deleted and replaced by new skb */
1769static inline void tcp_highest_sack_combine(struct sock *sk, 1775static inline void tcp_highest_sack_replace(struct sock *sk,
1770 struct sk_buff *old, 1776 struct sk_buff *old,
1771 struct sk_buff *new) 1777 struct sk_buff *new)
1772{ 1778{
1773 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) 1779 if (old == tcp_highest_sack(sk))
1774 tcp_sk(sk)->highest_sack = new; 1780 tcp_sk(sk)->highest_sack = new;
1775} 1781}
1776 1782
diff --git a/include/net/udp.h b/include/net/udp.h
index 12dfbfe2e2d7..6c759c8594e2 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
259 return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); 259 return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
260} 260}
261 261
262void udp_v4_early_demux(struct sk_buff *skb); 262int udp_v4_early_demux(struct sk_buff *skb);
263bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); 263bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
264int udp_get_port(struct sock *sk, unsigned short snum, 264int udp_get_port(struct sock *sk, unsigned short snum,
265 int (*saddr_cmp)(const struct sock *, 265 int (*saddr_cmp)(const struct sock *,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index bdb1279a415b..e8608b2dc844 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -285,7 +285,7 @@ enum ib_tm_cap_flags {
285 IB_TM_CAP_RC = 1 << 0, 285 IB_TM_CAP_RC = 1 << 0,
286}; 286};
287 287
288struct ib_xrq_caps { 288struct ib_tm_caps {
289 /* Max size of RNDV header */ 289 /* Max size of RNDV header */
290 u32 max_rndv_hdr_size; 290 u32 max_rndv_hdr_size;
291 /* Max number of entries in tag matching list */ 291 /* Max number of entries in tag matching list */
@@ -358,7 +358,7 @@ struct ib_device_attr {
358 struct ib_rss_caps rss_caps; 358 struct ib_rss_caps rss_caps;
359 u32 max_wq_type_rq; 359 u32 max_wq_type_rq;
360 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */ 360 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
361 struct ib_xrq_caps xrq_caps; 361 struct ib_tm_caps tm_caps;
362}; 362};
363 363
364enum ib_mtu { 364enum ib_mtu {
@@ -1739,7 +1739,7 @@ struct ib_mr {
1739 u32 lkey; 1739 u32 lkey;
1740 u32 rkey; 1740 u32 rkey;
1741 u64 iova; 1741 u64 iova;
1742 u32 length; 1742 u64 length;
1743 unsigned int page_size; 1743 unsigned int page_size;
1744 bool need_inval; 1744 bool need_inval;
1745 union { 1745 union {
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 82e93ee94708..67c5a9f223f7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -192,6 +192,7 @@ struct scsi_device {
192 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ 192 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
193 unsigned broken_fua:1; /* Don't set FUA bit */ 193 unsigned broken_fua:1; /* Don't set FUA bit */
194 unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ 194 unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
195 unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
195 196
196 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 197 atomic_t disk_events_disable_depth; /* disable depth for disk events */
197 198
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 9592570e092a..36b03013d629 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -29,5 +29,6 @@
29#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ 29#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
30#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ 30#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
31#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ 31#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
32#define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */
32 33
33#endif 34#endif
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 6183d20a01fb..b266d2a3bcb1 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
434 unsigned int target_id); 434 unsigned int target_id);
435extern void iscsi_remove_session(struct iscsi_cls_session *session); 435extern void iscsi_remove_session(struct iscsi_cls_session *session);
436extern void iscsi_free_session(struct iscsi_cls_session *session); 436extern void iscsi_free_session(struct iscsi_cls_session *session);
437extern int iscsi_destroy_session(struct iscsi_cls_session *session);
438extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, 437extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
439 int dd_size, uint32_t cid); 438 int dd_size, uint32_t cid);
440extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); 439extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
diff --git a/include/sound/control.h b/include/sound/control.h
index bd7246de58e7..a1f1152bc687 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
248 void *private_data); 248 void *private_data);
249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); 249void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) 250#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
251int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
252 int (*func)(struct snd_kcontrol *, void *),
253 void *arg);
251 254
252/* 255/*
253 * Helper functions for jack-detection controls 256 * Helper functions for jack-detection controls
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h
index d0509db6d0ec..f89cd5ee1c7a 100644
--- a/include/sound/hda_verbs.h
+++ b/include/sound/hda_verbs.h
@@ -95,6 +95,7 @@ enum {
95#define AC_VERB_SET_EAPD_BTLENABLE 0x70c 95#define AC_VERB_SET_EAPD_BTLENABLE 0x70c
96#define AC_VERB_SET_DIGI_CONVERT_1 0x70d 96#define AC_VERB_SET_DIGI_CONVERT_1 0x70d
97#define AC_VERB_SET_DIGI_CONVERT_2 0x70e 97#define AC_VERB_SET_DIGI_CONVERT_2 0x70e
98#define AC_VERB_SET_DIGI_CONVERT_3 0x73e
98#define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f 99#define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f
99#define AC_VERB_SET_GPIO_DATA 0x715 100#define AC_VERB_SET_GPIO_DATA 0x715
100#define AC_VERB_SET_GPIO_MASK 0x716 101#define AC_VERB_SET_GPIO_MASK 0x716
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index a03acd0d398a..695257ae64ac 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
60 int port; /* created/attached port */ 60 int port; /* created/attached port */
61 unsigned int flags; /* SNDRV_VIRMIDI_* */ 61 unsigned int flags; /* SNDRV_VIRMIDI_* */
62 rwlock_t filelist_lock; 62 rwlock_t filelist_lock;
63 struct rw_semaphore filelist_sem;
63 struct list_head filelist; 64 struct list_head filelist;
64}; 65};
65 66
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ae1409ffe99a..3c8b7f625670 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -114,7 +114,10 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
114 * Preemption ignores task state, therefore preempted tasks are always 114 * Preemption ignores task state, therefore preempted tasks are always
115 * RUNNING (we will not have dequeued if state != RUNNING). 115 * RUNNING (we will not have dequeued if state != RUNNING).
116 */ 116 */
117 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; 117 if (preempt)
118 return TASK_STATE_MAX;
119
120 return __get_task_state(p);
118} 121}
119#endif /* CREATE_TRACE_POINTS */ 122#endif /* CREATE_TRACE_POINTS */
120 123
@@ -152,12 +155,14 @@ TRACE_EVENT(sched_switch,
152 155
153 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", 156 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
154 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, 157 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
155 __entry->prev_state & (TASK_STATE_MAX-1) ? 158
156 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", 159 (__entry->prev_state & (TASK_REPORT_MAX - 1)) ?
157 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, 160 __print_flags(__entry->prev_state & (TASK_REPORT_MAX - 1), "|",
158 { 16, "Z" }, { 32, "X" }, { 64, "x" }, 161 { 0x01, "S" }, { 0x02, "D" }, { 0x04, "T" },
159 { 128, "K" }, { 256, "W" }, { 512, "P" }, 162 { 0x08, "t" }, { 0x10, "X" }, { 0x20, "Z" },
160 { 1024, "N" }) : "R", 163 { 0x40, "P" }, { 0x80, "I" }) :
164 "R",
165
161 __entry->prev_state & TASK_STATE_MAX ? "+" : "", 166 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
162 __entry->next_comm, __entry->next_pid, __entry->next_prio) 167 __entry->next_comm, __entry->next_pid, __entry->next_prio)
163); 168);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 43ab5c402f98..7bf4c750dd3a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -312,7 +312,7 @@ union bpf_attr {
312 * jump into another BPF program 312 * jump into another BPF program
313 * @ctx: context pointer passed to next program 313 * @ctx: context pointer passed to next program
314 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 314 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
315 * @index: index inside array that selects specific program to run 315 * @index: 32-bit index inside array that selects specific program to run
316 * Return: 0 on success or negative error 316 * Return: 0 on success or negative error
317 * 317 *
318 * int bpf_clone_redirect(skb, ifindex, flags) 318 * int bpf_clone_redirect(skb, ifindex, flags)
@@ -575,7 +575,7 @@ union bpf_attr {
575 * @map: pointer to sockmap 575 * @map: pointer to sockmap
576 * @key: key to lookup sock in map 576 * @key: key to lookup sock in map
577 * @flags: reserved for future use 577 * @flags: reserved for future use
578 * Return: SK_REDIRECT 578 * Return: SK_PASS
579 * 579 *
580 * int bpf_sock_map_update(skops, map, key, flags) 580 * int bpf_sock_map_update(skops, map, key, flags)
581 * @skops: pointer to bpf_sock_ops 581 * @skops: pointer to bpf_sock_ops
@@ -786,9 +786,8 @@ struct xdp_md {
786}; 786};
787 787
788enum sk_action { 788enum sk_action {
789 SK_ABORTED = 0, 789 SK_DROP = 0,
790 SK_DROP, 790 SK_PASS,
791 SK_REDIRECT,
792}; 791};
793 792
794#define BPF_TAG_SIZE 8 793#define BPF_TAG_SIZE 8
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 412c06a624c8..ccaea525340b 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -269,9 +269,9 @@ enum {
269#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 269#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
270 270
271#define DM_VERSION_MAJOR 4 271#define DM_VERSION_MAJOR 4
272#define DM_VERSION_MINOR 36 272#define DM_VERSION_MINOR 37
273#define DM_VERSION_PATCHLEVEL 0 273#define DM_VERSION_PATCHLEVEL 0
274#define DM_VERSION_EXTRA "-ioctl (2017-06-09)" 274#define DM_VERSION_EXTRA "-ioctl (2017-09-20)"
275 275
276/* Status bits */ 276/* Status bits */
277#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 277#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 6d47b3249d8a..4e01ad7ffe98 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -52,21 +52,30 @@
52 * (non-running threads are de facto in such a 52 * (non-running threads are de facto in such a
53 * state). This only covers threads from the 53 * state). This only covers threads from the
54 * same processes as the caller thread. This 54 * same processes as the caller thread. This
55 * command returns 0. The "expedited" commands 55 * command returns 0 on success. The
56 * complete faster than the non-expedited ones, 56 * "expedited" commands complete faster than
57 * they never block, but have the downside of 57 * the non-expedited ones, they never block,
58 * causing extra overhead. 58 * but have the downside of causing extra
59 * overhead. A process needs to register its
60 * intent to use the private expedited command
61 * prior to using it, otherwise this command
62 * returns -EPERM.
63 * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
64 * Register the process intent to use
65 * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
66 * returns 0.
59 * 67 *
60 * Command to be passed to the membarrier system call. The commands need to 68 * Command to be passed to the membarrier system call. The commands need to
61 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to 69 * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
62 * the value 0. 70 * the value 0.
63 */ 71 */
64enum membarrier_cmd { 72enum membarrier_cmd {
65 MEMBARRIER_CMD_QUERY = 0, 73 MEMBARRIER_CMD_QUERY = 0,
66 MEMBARRIER_CMD_SHARED = (1 << 0), 74 MEMBARRIER_CMD_SHARED = (1 << 0),
67 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ 75 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
68 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ 76 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
69 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), 77 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
78 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
70}; 79};
71 80
72#endif /* _UAPI_LINUX_MEMBARRIER_H */ 81#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index b97725af2ac0..da161b56c79e 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -23,6 +23,7 @@ enum xt_bpf_modes {
23 XT_BPF_MODE_FD_PINNED, 23 XT_BPF_MODE_FD_PINNED,
24 XT_BPF_MODE_FD_ELF, 24 XT_BPF_MODE_FD_ELF,
25}; 25};
26#define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED
26 27
27struct xt_bpf_info_v1 { 28struct xt_bpf_info_v1 {
28 __u16 mode; 29 __u16 mode;
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6217ff8500a1..84fc2914b7fb 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -376,7 +376,7 @@ struct sctp_remote_error {
376 __u16 sre_type; 376 __u16 sre_type;
377 __u16 sre_flags; 377 __u16 sre_flags;
378 __u32 sre_length; 378 __u32 sre_length;
379 __u16 sre_error; 379 __be16 sre_error;
380 sctp_assoc_t sre_assoc_id; 380 sctp_assoc_t sre_assoc_id;
381 __u8 sre_data[0]; 381 __u8 sre_data[0];
382}; 382};
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e75805..856de39d0b89 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
23#define SPIDEV_H 23#define SPIDEV_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/ioctl.h>
26 27
27/* User space versions of kernel symbols for SPI clocking modes, 28/* User space versions of kernel symbols for SPI clocking modes,
28 * matching <linux/spi/spi.h> 29 * matching <linux/spi/spi.h>
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index ce1169af39d7..2a5d63040a0b 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -780,6 +780,7 @@ struct usb_interface_assoc_descriptor {
780 __u8 iFunction; 780 __u8 iFunction;
781} __attribute__ ((packed)); 781} __attribute__ ((packed));
782 782
783#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
783 784
784/*-------------------------------------------------------------------------*/ 785/*-------------------------------------------------------------------------*/
785 786
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 9a0b6479fe0c..d4e0b53bfc75 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -261,7 +261,7 @@ struct ib_uverbs_ex_query_device_resp {
261 struct ib_uverbs_rss_caps rss_caps; 261 struct ib_uverbs_rss_caps rss_caps;
262 __u32 max_wq_type_rq; 262 __u32 max_wq_type_rq;
263 __u32 raw_packet_caps; 263 __u32 raw_packet_caps;
264 struct ib_uverbs_tm_caps xrq_caps; 264 struct ib_uverbs_tm_caps tm_caps;
265}; 265};
266 266
267struct ib_uverbs_query_port { 267struct ib_uverbs_query_port {
diff --git a/init/Kconfig b/init/Kconfig
index 78cb2461012e..3c1faaa2af4a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1033,7 +1033,7 @@ endif
1033 1033
1034choice 1034choice
1035 prompt "Compiler optimization level" 1035 prompt "Compiler optimization level"
1036 default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE 1036 default CC_OPTIMIZE_FOR_PERFORMANCE
1037 1037
1038config CC_OPTIMIZE_FOR_PERFORMANCE 1038config CC_OPTIMIZE_FOR_PERFORMANCE
1039 bool "Optimize for performance" 1039 bool "Optimize for performance"
diff --git a/ipc/shm.c b/ipc/shm.c
index 1e2b1692ba2c..badac463e2c8 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1154,7 +1154,7 @@ static int put_compat_shm_info(struct shm_info *ip,
1154 info.shm_swp = ip->shm_swp; 1154 info.shm_swp = ip->shm_swp;
1155 info.swap_attempts = ip->swap_attempts; 1155 info.swap_attempts = ip->swap_attempts;
1156 info.swap_successes = ip->swap_successes; 1156 info.swap_successes = ip->swap_successes;
1157 return copy_to_user(up, &info, sizeof(info)); 1157 return copy_to_user(uip, &info, sizeof(info));
1158} 1158}
1159 1159
1160static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, 1160static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in,
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 98c0f00c3f5e..e2636737b69b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
98 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); 98 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
99 99
100 if (array_size >= U32_MAX - PAGE_SIZE || 100 if (array_size >= U32_MAX - PAGE_SIZE ||
101 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { 101 bpf_array_alloc_percpu(array)) {
102 bpf_map_area_free(array); 102 bpf_map_area_free(array);
103 return ERR_PTR(-ENOMEM); 103 return ERR_PTR(-ENOMEM);
104 } 104 }
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 917cc04a0a94..7b62df86be1d 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1022,7 +1022,7 @@ select_insn:
1022 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; 1022 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1023 struct bpf_array *array = container_of(map, struct bpf_array, map); 1023 struct bpf_array *array = container_of(map, struct bpf_array, map);
1024 struct bpf_prog *prog; 1024 struct bpf_prog *prog;
1025 u64 index = BPF_R3; 1025 u32 index = BPF_R3;
1026 1026
1027 if (unlikely(index >= array->map.max_entries)) 1027 if (unlikely(index >= array->map.max_entries))
1028 goto out; 1028 goto out;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index e093d9a2c4dd..e745d6a88224 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list);
69 69
70static u64 dev_map_bitmap_size(const union bpf_attr *attr) 70static u64 dev_map_bitmap_size(const union bpf_attr *attr)
71{ 71{
72 return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); 72 return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
73} 73}
74 74
75static struct bpf_map *dev_map_alloc(union bpf_attr *attr) 75static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
78 int err = -EINVAL; 78 int err = -EINVAL;
79 u64 cost; 79 u64 cost;
80 80
81 if (!capable(CAP_NET_ADMIN))
82 return ERR_PTR(-EPERM);
83
81 /* check sanity of attributes */ 84 /* check sanity of attributes */
82 if (attr->max_entries == 0 || attr->key_size != 4 || 85 if (attr->max_entries == 0 || attr->key_size != 4 ||
83 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) 86 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
111 err = -ENOMEM; 114 err = -ENOMEM;
112 115
113 /* A per cpu bitfield with a bit per possible net device */ 116 /* A per cpu bitfield with a bit per possible net device */
114 dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), 117 dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
115 __alignof__(unsigned long)); 118 __alignof__(unsigned long),
119 GFP_KERNEL | __GFP_NOWARN);
116 if (!dtab->flush_needed) 120 if (!dtab->flush_needed)
117 goto free_dtab; 121 goto free_dtab;
118 122
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 431126f31ea3..6533f08d1238 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
317 */ 317 */
318 goto free_htab; 318 goto free_htab;
319 319
320 if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
321 /* make sure the size for pcpu_alloc() is reasonable */
322 goto free_htab;
323
324 htab->elem_size = sizeof(struct htab_elem) + 320 htab->elem_size = sizeof(struct htab_elem) +
325 round_up(htab->map.key_size, 8); 321 round_up(htab->map.key_size, 8);
326 if (percpu) 322 if (percpu)
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index e833ed914358..be1dde967208 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -363,6 +363,7 @@ out:
363 putname(pname); 363 putname(pname);
364 return ret; 364 return ret;
365} 365}
366EXPORT_SYMBOL_GPL(bpf_obj_get_user);
366 367
367static void bpf_evict_inode(struct inode *inode) 368static void bpf_evict_inode(struct inode *inode)
368{ 369{
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 6424ce0e4969..dbd7b322a86b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -39,6 +39,7 @@
39#include <linux/workqueue.h> 39#include <linux/workqueue.h>
40#include <linux/list.h> 40#include <linux/list.h>
41#include <net/strparser.h> 41#include <net/strparser.h>
42#include <net/tcp.h>
42 43
43struct bpf_stab { 44struct bpf_stab {
44 struct bpf_map map; 45 struct bpf_map map;
@@ -92,21 +93,45 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
92 return rcu_dereference_sk_user_data(sk); 93 return rcu_dereference_sk_user_data(sk);
93} 94}
94 95
96/* compute the linear packet data range [data, data_end) for skb when
97 * sk_skb type programs are in use.
98 */
99static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
100{
101 TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
102}
103
104enum __sk_action {
105 __SK_DROP = 0,
106 __SK_PASS,
107 __SK_REDIRECT,
108};
109
95static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) 110static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
96{ 111{
97 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); 112 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
98 int rc; 113 int rc;
99 114
100 if (unlikely(!prog)) 115 if (unlikely(!prog))
101 return SK_DROP; 116 return __SK_DROP;
102 117
103 skb_orphan(skb); 118 skb_orphan(skb);
119 /* We need to ensure that BPF metadata for maps is also cleared
120 * when we orphan the skb so that we don't have the possibility
121 * to reference a stale map.
122 */
123 TCP_SKB_CB(skb)->bpf.map = NULL;
104 skb->sk = psock->sock; 124 skb->sk = psock->sock;
105 bpf_compute_data_end(skb); 125 bpf_compute_data_end_sk_skb(skb);
126 preempt_disable();
106 rc = (*prog->bpf_func)(skb, prog->insnsi); 127 rc = (*prog->bpf_func)(skb, prog->insnsi);
128 preempt_enable();
107 skb->sk = NULL; 129 skb->sk = NULL;
108 130
109 return rc; 131 /* Moving return codes from UAPI namespace into internal namespace */
132 return rc == SK_PASS ?
133 (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
134 __SK_DROP;
110} 135}
111 136
112static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) 137static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
@@ -114,17 +139,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
114 struct sock *sk; 139 struct sock *sk;
115 int rc; 140 int rc;
116 141
117 /* Because we use per cpu values to feed input from sock redirect
118 * in BPF program to do_sk_redirect_map() call we need to ensure we
119 * are not preempted. RCU read lock is not sufficient in this case
120 * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
121 */
122 preempt_disable();
123 rc = smap_verdict_func(psock, skb); 142 rc = smap_verdict_func(psock, skb);
124 switch (rc) { 143 switch (rc) {
125 case SK_REDIRECT: 144 case __SK_REDIRECT:
126 sk = do_sk_redirect_map(); 145 sk = do_sk_redirect_map(skb);
127 preempt_enable();
128 if (likely(sk)) { 146 if (likely(sk)) {
129 struct smap_psock *peer = smap_psock_sk(sk); 147 struct smap_psock *peer = smap_psock_sk(sk);
130 148
@@ -139,10 +157,8 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
139 } 157 }
140 } 158 }
141 /* Fall through and free skb otherwise */ 159 /* Fall through and free skb otherwise */
142 case SK_DROP: 160 case __SK_DROP:
143 default: 161 default:
144 if (rc != SK_REDIRECT)
145 preempt_enable();
146 kfree_skb(skb); 162 kfree_skb(skb);
147 } 163 }
148} 164}
@@ -369,7 +385,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
369 * any socket yet. 385 * any socket yet.
370 */ 386 */
371 skb->sk = psock->sock; 387 skb->sk = psock->sock;
372 bpf_compute_data_end(skb); 388 bpf_compute_data_end_sk_skb(skb);
373 rc = (*prog->bpf_func)(skb, prog->insnsi); 389 rc = (*prog->bpf_func)(skb, prog->insnsi);
374 skb->sk = NULL; 390 skb->sk = NULL;
375 rcu_read_unlock(); 391 rcu_read_unlock();
@@ -487,6 +503,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
487 int err = -EINVAL; 503 int err = -EINVAL;
488 u64 cost; 504 u64 cost;
489 505
506 if (!capable(CAP_NET_ADMIN))
507 return ERR_PTR(-EPERM);
508
490 /* check sanity of attributes */ 509 /* check sanity of attributes */
491 if (attr->max_entries == 0 || attr->key_size != 4 || 510 if (attr->max_entries == 0 || attr->key_size != 4 ||
492 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) 511 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +859,12 @@ static int sock_map_update_elem(struct bpf_map *map,
840 return -EINVAL; 859 return -EINVAL;
841 } 860 }
842 861
862 if (skops.sk->sk_type != SOCK_STREAM ||
863 skops.sk->sk_protocol != IPPROTO_TCP) {
864 fput(socket->file);
865 return -EOPNOTSUPP;
866 }
867
843 err = sock_map_ctx_update_elem(&skops, map, key, flags); 868 err = sock_map_ctx_update_elem(&skops, map, key, flags);
844 fput(socket->file); 869 fput(socket->file);
845 return err; 870 return err;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b914fbe1383e..c48ca2a34b5e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
653{ 653{
654 struct bpf_verifier_state *parent = state->parent; 654 struct bpf_verifier_state *parent = state->parent;
655 655
656 if (regno == BPF_REG_FP)
657 /* We don't need to worry about FP liveness because it's read-only */
658 return;
659
656 while (parent) { 660 while (parent) {
657 /* if read wasn't screened by an earlier write ... */ 661 /* if read wasn't screened by an earlier write ... */
658 if (state->regs[regno].live & REG_LIVE_WRITTEN) 662 if (state->regs[regno].live & REG_LIVE_WRITTEN)
@@ -1112,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1112 /* ctx accesses must be at a fixed offset, so that we can 1116 /* ctx accesses must be at a fixed offset, so that we can
1113 * determine what type of data were returned. 1117 * determine what type of data were returned.
1114 */ 1118 */
1115 if (!tnum_is_const(reg->var_off)) { 1119 if (reg->off) {
1120 verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
1121 regno, reg->off, off - reg->off);
1122 return -EACCES;
1123 }
1124 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
1116 char tn_buf[48]; 1125 char tn_buf[48];
1117 1126
1118 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1127 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1120,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1120 tn_buf, off, size); 1129 tn_buf, off, size);
1121 return -EACCES; 1130 return -EACCES;
1122 } 1131 }
1123 off += reg->var_off.value;
1124 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); 1132 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
1125 if (!err && t == BPF_READ && value_regno >= 0) { 1133 if (!err && t == BPF_READ && value_regno >= 0) {
1126 /* ctx access returns either a scalar, or a 1134 /* ctx access returns either a scalar, or a
@@ -2345,6 +2353,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2345 * copy register state to dest reg 2353 * copy register state to dest reg
2346 */ 2354 */
2347 regs[insn->dst_reg] = regs[insn->src_reg]; 2355 regs[insn->dst_reg] = regs[insn->src_reg];
2356 regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
2348 } else { 2357 } else {
2349 /* R1 = (u32) R2 */ 2358 /* R1 = (u32) R2 */
2350 if (is_pointer_value(env, insn->src_reg)) { 2359 if (is_pointer_value(env, insn->src_reg)) {
@@ -2421,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2421} 2430}
2422 2431
2423static void find_good_pkt_pointers(struct bpf_verifier_state *state, 2432static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2424 struct bpf_reg_state *dst_reg) 2433 struct bpf_reg_state *dst_reg,
2434 bool range_right_open)
2425{ 2435{
2426 struct bpf_reg_state *regs = state->regs, *reg; 2436 struct bpf_reg_state *regs = state->regs, *reg;
2437 u16 new_range;
2427 int i; 2438 int i;
2428 2439
2429 if (dst_reg->off < 0) 2440 if (dst_reg->off < 0 ||
2441 (dst_reg->off == 0 && range_right_open))
2430 /* This doesn't give us any range */ 2442 /* This doesn't give us any range */
2431 return; 2443 return;
2432 2444
@@ -2437,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2437 */ 2449 */
2438 return; 2450 return;
2439 2451
2440 /* LLVM can generate four kind of checks: 2452 new_range = dst_reg->off;
2453 if (range_right_open)
2454 new_range--;
2455
2456 /* Examples for register markings:
2441 * 2457 *
2442 * Type 1/2: 2458 * pkt_data in dst register:
2443 * 2459 *
2444 * r2 = r3; 2460 * r2 = r3;
2445 * r2 += 8; 2461 * r2 += 8;
@@ -2456,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2456 * r2=pkt(id=n,off=8,r=0) 2472 * r2=pkt(id=n,off=8,r=0)
2457 * r3=pkt(id=n,off=0,r=0) 2473 * r3=pkt(id=n,off=0,r=0)
2458 * 2474 *
2459 * Type 3/4: 2475 * pkt_data in src register:
2460 * 2476 *
2461 * r2 = r3; 2477 * r2 = r3;
2462 * r2 += 8; 2478 * r2 += 8;
@@ -2474,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2474 * r3=pkt(id=n,off=0,r=0) 2490 * r3=pkt(id=n,off=0,r=0)
2475 * 2491 *
2476 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 2492 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
2477 * so that range of bytes [r3, r3 + 8) is safe to access. 2493 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
2494 * and [r3, r3 + 8-1) respectively is safe to access depending on
2495 * the check.
2478 */ 2496 */
2479 2497
2480 /* If our ids match, then we must have the same max_value. And we 2498 /* If our ids match, then we must have the same max_value. And we
@@ -2485,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
2485 for (i = 0; i < MAX_BPF_REG; i++) 2503 for (i = 0; i < MAX_BPF_REG; i++)
2486 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) 2504 if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
2487 /* keep the maximum range already checked */ 2505 /* keep the maximum range already checked */
2488 regs[i].range = max_t(u16, regs[i].range, dst_reg->off); 2506 regs[i].range = max(regs[i].range, new_range);
2489 2507
2490 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { 2508 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
2491 if (state->stack_slot_type[i] != STACK_SPILL) 2509 if (state->stack_slot_type[i] != STACK_SPILL)
2492 continue; 2510 continue;
2493 reg = &state->spilled_regs[i / BPF_REG_SIZE]; 2511 reg = &state->spilled_regs[i / BPF_REG_SIZE];
2494 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) 2512 if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
2495 reg->range = max_t(u16, reg->range, dst_reg->off); 2513 reg->range = max(reg->range, new_range);
2496 } 2514 }
2497} 2515}
2498 2516
@@ -2856,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
2856 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && 2874 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2857 dst_reg->type == PTR_TO_PACKET && 2875 dst_reg->type == PTR_TO_PACKET &&
2858 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2876 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2859 find_good_pkt_pointers(this_branch, dst_reg); 2877 /* pkt_data' > pkt_end */
2878 find_good_pkt_pointers(this_branch, dst_reg, false);
2879 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
2880 dst_reg->type == PTR_TO_PACKET_END &&
2881 regs[insn->src_reg].type == PTR_TO_PACKET) {
2882 /* pkt_end > pkt_data' */
2883 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
2860 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && 2884 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2861 dst_reg->type == PTR_TO_PACKET && 2885 dst_reg->type == PTR_TO_PACKET &&
2862 regs[insn->src_reg].type == PTR_TO_PACKET_END) { 2886 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2863 find_good_pkt_pointers(other_branch, dst_reg); 2887 /* pkt_data' < pkt_end */
2888 find_good_pkt_pointers(other_branch, dst_reg, true);
2889 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
2890 dst_reg->type == PTR_TO_PACKET_END &&
2891 regs[insn->src_reg].type == PTR_TO_PACKET) {
2892 /* pkt_end < pkt_data' */
2893 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
2894 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2895 dst_reg->type == PTR_TO_PACKET &&
2896 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2897 /* pkt_data' >= pkt_end */
2898 find_good_pkt_pointers(this_branch, dst_reg, true);
2864 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && 2899 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
2865 dst_reg->type == PTR_TO_PACKET_END && 2900 dst_reg->type == PTR_TO_PACKET_END &&
2866 regs[insn->src_reg].type == PTR_TO_PACKET) { 2901 regs[insn->src_reg].type == PTR_TO_PACKET) {
2867 find_good_pkt_pointers(other_branch, &regs[insn->src_reg]); 2902 /* pkt_end >= pkt_data' */
2903 find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
2904 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2905 dst_reg->type == PTR_TO_PACKET &&
2906 regs[insn->src_reg].type == PTR_TO_PACKET_END) {
2907 /* pkt_data' <= pkt_end */
2908 find_good_pkt_pointers(other_branch, dst_reg, false);
2868 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && 2909 } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
2869 dst_reg->type == PTR_TO_PACKET_END && 2910 dst_reg->type == PTR_TO_PACKET_END &&
2870 regs[insn->src_reg].type == PTR_TO_PACKET) { 2911 regs[insn->src_reg].type == PTR_TO_PACKET) {
2871 find_good_pkt_pointers(this_branch, &regs[insn->src_reg]); 2912 /* pkt_end <= pkt_data' */
2913 find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
2872 } else if (is_pointer_value(env, insn->dst_reg)) { 2914 } else if (is_pointer_value(env, insn->dst_reg)) {
2873 verbose("R%d pointer comparison prohibited\n", insn->dst_reg); 2915 verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
2874 return -EACCES; 2916 return -EACCES;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index d6551cd45238..44857278eb8a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2311,6 +2311,14 @@ out_release_tset:
2311 list_del_init(&cset->mg_node); 2311 list_del_init(&cset->mg_node);
2312 } 2312 }
2313 spin_unlock_irq(&css_set_lock); 2313 spin_unlock_irq(&css_set_lock);
2314
2315 /*
2316 * Re-initialize the cgroup_taskset structure in case it is reused
2317 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute()
2318 * iteration.
2319 */
2320 tset->nr_tasks = 0;
2321 tset->csets = &tset->src_csets;
2314 return ret; 2322 return ret;
2315} 2323}
2316 2324
diff --git a/kernel/cpu.c b/kernel/cpu.c
index acf5308fad51..04892a82f6ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -24,6 +24,7 @@
24#include <linux/lockdep.h> 24#include <linux/lockdep.h>
25#include <linux/tick.h> 25#include <linux/tick.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/nmi.h>
27#include <linux/smpboot.h> 28#include <linux/smpboot.h>
28#include <linux/relay.h> 29#include <linux/relay.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
@@ -46,11 +47,13 @@
46 * @bringup: Single callback bringup or teardown selector 47 * @bringup: Single callback bringup or teardown selector
47 * @cb_state: The state for a single callback (install/uninstall) 48 * @cb_state: The state for a single callback (install/uninstall)
48 * @result: Result of the operation 49 * @result: Result of the operation
49 * @done: Signal completion to the issuer of the task 50 * @done_up: Signal completion to the issuer of the task for cpu-up
51 * @done_down: Signal completion to the issuer of the task for cpu-down
50 */ 52 */
51struct cpuhp_cpu_state { 53struct cpuhp_cpu_state {
52 enum cpuhp_state state; 54 enum cpuhp_state state;
53 enum cpuhp_state target; 55 enum cpuhp_state target;
56 enum cpuhp_state fail;
54#ifdef CONFIG_SMP 57#ifdef CONFIG_SMP
55 struct task_struct *thread; 58 struct task_struct *thread;
56 bool should_run; 59 bool should_run;
@@ -58,18 +61,39 @@ struct cpuhp_cpu_state {
58 bool single; 61 bool single;
59 bool bringup; 62 bool bringup;
60 struct hlist_node *node; 63 struct hlist_node *node;
64 struct hlist_node *last;
61 enum cpuhp_state cb_state; 65 enum cpuhp_state cb_state;
62 int result; 66 int result;
63 struct completion done; 67 struct completion done_up;
68 struct completion done_down;
64#endif 69#endif
65}; 70};
66 71
67static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); 72static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
73 .fail = CPUHP_INVALID,
74};
68 75
69#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) 76#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70static struct lock_class_key cpuhp_state_key; 77static struct lockdep_map cpuhp_state_up_map =
71static struct lockdep_map cpuhp_state_lock_map = 78 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key); 79static struct lockdep_map cpuhp_state_down_map =
80 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
81
82
83static void inline cpuhp_lock_acquire(bool bringup)
84{
85 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
86}
87
88static void inline cpuhp_lock_release(bool bringup)
89{
90 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
91}
92#else
93
94static void inline cpuhp_lock_acquire(bool bringup) { }
95static void inline cpuhp_lock_release(bool bringup) { }
96
73#endif 97#endif
74 98
75/** 99/**
@@ -123,13 +147,16 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
123/** 147/**
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state 148 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked 149 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine 150 * @state: The state to do callbacks for
127 * @bringup: True if the bringup callback should be invoked 151 * @bringup: True if the bringup callback should be invoked
152 * @node: For multi-instance, do a single entry callback for install/remove
153 * @lastp: For multi-instance rollback, remember how far we got
128 * 154 *
129 * Called from cpu hotplug and from the state register machinery. 155 * Called from cpu hotplug and from the state register machinery.
130 */ 156 */
131static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, 157static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
132 bool bringup, struct hlist_node *node) 158 bool bringup, struct hlist_node *node,
159 struct hlist_node **lastp)
133{ 160{
134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 161 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
135 struct cpuhp_step *step = cpuhp_get_step(state); 162 struct cpuhp_step *step = cpuhp_get_step(state);
@@ -137,7 +164,17 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
137 int (*cb)(unsigned int cpu); 164 int (*cb)(unsigned int cpu);
138 int ret, cnt; 165 int ret, cnt;
139 166
167 if (st->fail == state) {
168 st->fail = CPUHP_INVALID;
169
170 if (!(bringup ? step->startup.single : step->teardown.single))
171 return 0;
172
173 return -EAGAIN;
174 }
175
140 if (!step->multi_instance) { 176 if (!step->multi_instance) {
177 WARN_ON_ONCE(lastp && *lastp);
141 cb = bringup ? step->startup.single : step->teardown.single; 178 cb = bringup ? step->startup.single : step->teardown.single;
142 if (!cb) 179 if (!cb)
143 return 0; 180 return 0;
@@ -152,6 +189,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
152 189
153 /* Single invocation for instance add/remove */ 190 /* Single invocation for instance add/remove */
154 if (node) { 191 if (node) {
192 WARN_ON_ONCE(lastp && *lastp);
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 193 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node); 194 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret); 195 trace_cpuhp_exit(cpu, st->state, state, ret);
@@ -161,13 +199,23 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
161 /* State transition. Invoke on all instances */ 199 /* State transition. Invoke on all instances */
162 cnt = 0; 200 cnt = 0;
163 hlist_for_each(node, &step->list) { 201 hlist_for_each(node, &step->list) {
202 if (lastp && node == *lastp)
203 break;
204
164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); 205 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
165 ret = cbm(cpu, node); 206 ret = cbm(cpu, node);
166 trace_cpuhp_exit(cpu, st->state, state, ret); 207 trace_cpuhp_exit(cpu, st->state, state, ret);
167 if (ret) 208 if (ret) {
168 goto err; 209 if (!lastp)
210 goto err;
211
212 *lastp = node;
213 return ret;
214 }
169 cnt++; 215 cnt++;
170 } 216 }
217 if (lastp)
218 *lastp = NULL;
171 return 0; 219 return 0;
172err: 220err:
173 /* Rollback the instances if one failed */ 221 /* Rollback the instances if one failed */
@@ -178,12 +226,39 @@ err:
178 hlist_for_each(node, &step->list) { 226 hlist_for_each(node, &step->list) {
179 if (!cnt--) 227 if (!cnt--)
180 break; 228 break;
181 cbm(cpu, node); 229
230 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
231 ret = cbm(cpu, node);
232 trace_cpuhp_exit(cpu, st->state, state, ret);
233 /*
234 * Rollback must not fail,
235 */
236 WARN_ON_ONCE(ret);
182 } 237 }
183 return ret; 238 return ret;
184} 239}
185 240
186#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
242static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
243{
244 struct completion *done = bringup ? &st->done_up : &st->done_down;
245 wait_for_completion(done);
246}
247
248static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
249{
250 struct completion *done = bringup ? &st->done_up : &st->done_down;
251 complete(done);
252}
253
254/*
255 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
256 */
257static bool cpuhp_is_atomic_state(enum cpuhp_state state)
258{
259 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
260}
261
187/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 262/* Serializes the updates to cpu_online_mask, cpu_present_mask */
188static DEFINE_MUTEX(cpu_add_remove_lock); 263static DEFINE_MUTEX(cpu_add_remove_lock);
189bool cpuhp_tasks_frozen; 264bool cpuhp_tasks_frozen;
@@ -271,14 +346,79 @@ void cpu_hotplug_enable(void)
271EXPORT_SYMBOL_GPL(cpu_hotplug_enable); 346EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
272#endif /* CONFIG_HOTPLUG_CPU */ 347#endif /* CONFIG_HOTPLUG_CPU */
273 348
274static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); 349static inline enum cpuhp_state
350cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
351{
352 enum cpuhp_state prev_state = st->state;
353
354 st->rollback = false;
355 st->last = NULL;
356
357 st->target = target;
358 st->single = false;
359 st->bringup = st->state < target;
360
361 return prev_state;
362}
363
364static inline void
365cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
366{
367 st->rollback = true;
368
369 /*
370 * If we have st->last we need to undo partial multi_instance of this
371 * state first. Otherwise start undo at the previous state.
372 */
373 if (!st->last) {
374 if (st->bringup)
375 st->state--;
376 else
377 st->state++;
378 }
379
380 st->target = prev_state;
381 st->bringup = !st->bringup;
382}
383
384/* Regular hotplug invocation of the AP hotplug thread */
385static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
386{
387 if (!st->single && st->state == st->target)
388 return;
389
390 st->result = 0;
391 /*
392 * Make sure the above stores are visible before should_run becomes
393 * true. Paired with the mb() above in cpuhp_thread_fun()
394 */
395 smp_mb();
396 st->should_run = true;
397 wake_up_process(st->thread);
398 wait_for_ap_thread(st, st->bringup);
399}
400
401static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
402{
403 enum cpuhp_state prev_state;
404 int ret;
405
406 prev_state = cpuhp_set_state(st, target);
407 __cpuhp_kick_ap(st);
408 if ((ret = st->result)) {
409 cpuhp_reset_state(st, prev_state);
410 __cpuhp_kick_ap(st);
411 }
412
413 return ret;
414}
275 415
276static int bringup_wait_for_ap(unsigned int cpu) 416static int bringup_wait_for_ap(unsigned int cpu)
277{ 417{
278 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 418 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
279 419
280 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ 420 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
281 wait_for_completion(&st->done); 421 wait_for_ap_thread(st, true);
282 if (WARN_ON_ONCE((!cpu_online(cpu)))) 422 if (WARN_ON_ONCE((!cpu_online(cpu))))
283 return -ECANCELED; 423 return -ECANCELED;
284 424
@@ -286,12 +426,10 @@ static int bringup_wait_for_ap(unsigned int cpu)
286 stop_machine_unpark(cpu); 426 stop_machine_unpark(cpu);
287 kthread_unpark(st->thread); 427 kthread_unpark(st->thread);
288 428
289 /* Should we go further up ? */ 429 if (st->target <= CPUHP_AP_ONLINE_IDLE)
290 if (st->target > CPUHP_AP_ONLINE_IDLE) { 430 return 0;
291 __cpuhp_kick_ap_work(st); 431
292 wait_for_completion(&st->done); 432 return cpuhp_kick_ap(st, st->target);
293 }
294 return st->result;
295} 433}
296 434
297static int bringup_cpu(unsigned int cpu) 435static int bringup_cpu(unsigned int cpu)
@@ -317,32 +455,6 @@ static int bringup_cpu(unsigned int cpu)
317/* 455/*
318 * Hotplug state machine related functions 456 * Hotplug state machine related functions
319 */ 457 */
320static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
321{
322 for (st->state++; st->state < st->target; st->state++) {
323 struct cpuhp_step *step = cpuhp_get_step(st->state);
324
325 if (!step->skip_onerr)
326 cpuhp_invoke_callback(cpu, st->state, true, NULL);
327 }
328}
329
330static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
331 enum cpuhp_state target)
332{
333 enum cpuhp_state prev_state = st->state;
334 int ret = 0;
335
336 for (; st->state > target; st->state--) {
337 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
338 if (ret) {
339 st->target = prev_state;
340 undo_cpu_down(cpu, st);
341 break;
342 }
343 }
344 return ret;
345}
346 458
347static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) 459static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
348{ 460{
@@ -350,7 +462,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
350 struct cpuhp_step *step = cpuhp_get_step(st->state); 462 struct cpuhp_step *step = cpuhp_get_step(st->state);
351 463
352 if (!step->skip_onerr) 464 if (!step->skip_onerr)
353 cpuhp_invoke_callback(cpu, st->state, false, NULL); 465 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
354 } 466 }
355} 467}
356 468
@@ -362,7 +474,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
362 474
363 while (st->state < target) { 475 while (st->state < target) {
364 st->state++; 476 st->state++;
365 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL); 477 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
366 if (ret) { 478 if (ret) {
367 st->target = prev_state; 479 st->target = prev_state;
368 undo_cpu_up(cpu, st); 480 undo_cpu_up(cpu, st);
@@ -379,7 +491,8 @@ static void cpuhp_create(unsigned int cpu)
379{ 491{
380 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 492 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
381 493
382 init_completion(&st->done); 494 init_completion(&st->done_up);
495 init_completion(&st->done_down);
383} 496}
384 497
385static int cpuhp_should_run(unsigned int cpu) 498static int cpuhp_should_run(unsigned int cpu)
@@ -389,69 +502,90 @@ static int cpuhp_should_run(unsigned int cpu)
389 return st->should_run; 502 return st->should_run;
390} 503}
391 504
392/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
393static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
394{
395 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
396
397 return cpuhp_down_callbacks(cpu, st, target);
398}
399
400/* Execute the online startup callbacks. Used to be CPU_ONLINE */
401static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
402{
403 return cpuhp_up_callbacks(cpu, st, st->target);
404}
405
406/* 505/*
407 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke 506 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
408 * callbacks when a state gets [un]installed at runtime. 507 * callbacks when a state gets [un]installed at runtime.
508 *
509 * Each invocation of this function by the smpboot thread does a single AP
510 * state callback.
511 *
512 * It has 3 modes of operation:
513 * - single: runs st->cb_state
514 * - up: runs ++st->state, while st->state < st->target
515 * - down: runs st->state--, while st->state > st->target
516 *
517 * When complete or on error, should_run is cleared and the completion is fired.
409 */ 518 */
410static void cpuhp_thread_fun(unsigned int cpu) 519static void cpuhp_thread_fun(unsigned int cpu)
411{ 520{
412 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 521 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
413 int ret = 0; 522 bool bringup = st->bringup;
523 enum cpuhp_state state;
414 524
415 /* 525 /*
416 * Paired with the mb() in cpuhp_kick_ap_work and 526 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
417 * cpuhp_invoke_ap_callback, so the work set is consistent visible. 527 * that if we see ->should_run we also see the rest of the state.
418 */ 528 */
419 smp_mb(); 529 smp_mb();
420 if (!st->should_run) 530
531 if (WARN_ON_ONCE(!st->should_run))
421 return; 532 return;
422 533
423 st->should_run = false; 534 cpuhp_lock_acquire(bringup);
424 535
425 lock_map_acquire(&cpuhp_state_lock_map);
426 /* Single callback invocation for [un]install ? */
427 if (st->single) { 536 if (st->single) {
428 if (st->cb_state < CPUHP_AP_ONLINE) { 537 state = st->cb_state;
429 local_irq_disable(); 538 st->should_run = false;
430 ret = cpuhp_invoke_callback(cpu, st->cb_state, 539 } else {
431 st->bringup, st->node); 540 if (bringup) {
432 local_irq_enable(); 541 st->state++;
542 state = st->state;
543 st->should_run = (st->state < st->target);
544 WARN_ON_ONCE(st->state > st->target);
433 } else { 545 } else {
434 ret = cpuhp_invoke_callback(cpu, st->cb_state, 546 state = st->state;
435 st->bringup, st->node); 547 st->state--;
548 st->should_run = (st->state > st->target);
549 WARN_ON_ONCE(st->state < st->target);
436 } 550 }
437 } else if (st->rollback) { 551 }
438 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 552
553 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
439 554
440 undo_cpu_down(cpu, st); 555 if (st->rollback) {
441 st->rollback = false; 556 struct cpuhp_step *step = cpuhp_get_step(state);
557 if (step->skip_onerr)
558 goto next;
559 }
560
561 if (cpuhp_is_atomic_state(state)) {
562 local_irq_disable();
563 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
564 local_irq_enable();
565
566 /*
567 * STARTING/DYING must not fail!
568 */
569 WARN_ON_ONCE(st->result);
442 } else { 570 } else {
443 /* Cannot happen .... */ 571 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
444 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 572 }
445 573
446 /* Regular hotplug work */ 574 if (st->result) {
447 if (st->state < st->target) 575 /*
448 ret = cpuhp_ap_online(cpu, st); 576 * If we fail on a rollback, we're up a creek without no
449 else if (st->state > st->target) 577 * paddle, no way forward, no way back. We loose, thanks for
450 ret = cpuhp_ap_offline(cpu, st); 578 * playing.
579 */
580 WARN_ON_ONCE(st->rollback);
581 st->should_run = false;
451 } 582 }
452 lock_map_release(&cpuhp_state_lock_map); 583
453 st->result = ret; 584next:
454 complete(&st->done); 585 cpuhp_lock_release(bringup);
586
587 if (!st->should_run)
588 complete_ap_thread(st, bringup);
455} 589}
456 590
457/* Invoke a single callback on a remote cpu */ 591/* Invoke a single callback on a remote cpu */
@@ -460,62 +594,69 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
460 struct hlist_node *node) 594 struct hlist_node *node)
461{ 595{
462 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 596 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
597 int ret;
463 598
464 if (!cpu_online(cpu)) 599 if (!cpu_online(cpu))
465 return 0; 600 return 0;
466 601
467 lock_map_acquire(&cpuhp_state_lock_map); 602 cpuhp_lock_acquire(false);
468 lock_map_release(&cpuhp_state_lock_map); 603 cpuhp_lock_release(false);
604
605 cpuhp_lock_acquire(true);
606 cpuhp_lock_release(true);
469 607
470 /* 608 /*
471 * If we are up and running, use the hotplug thread. For early calls 609 * If we are up and running, use the hotplug thread. For early calls
472 * we invoke the thread function directly. 610 * we invoke the thread function directly.
473 */ 611 */
474 if (!st->thread) 612 if (!st->thread)
475 return cpuhp_invoke_callback(cpu, state, bringup, node); 613 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
476 614
615 st->rollback = false;
616 st->last = NULL;
617
618 st->node = node;
619 st->bringup = bringup;
477 st->cb_state = state; 620 st->cb_state = state;
478 st->single = true; 621 st->single = true;
479 st->bringup = bringup; 622
480 st->node = node; 623 __cpuhp_kick_ap(st);
481 624
482 /* 625 /*
483 * Make sure the above stores are visible before should_run becomes 626 * If we failed and did a partial, do a rollback.
484 * true. Paired with the mb() above in cpuhp_thread_fun()
485 */ 627 */
486 smp_mb(); 628 if ((ret = st->result) && st->last) {
487 st->should_run = true; 629 st->rollback = true;
488 wake_up_process(st->thread); 630 st->bringup = !bringup;
489 wait_for_completion(&st->done); 631
490 return st->result; 632 __cpuhp_kick_ap(st);
491} 633 }
492 634
493/* Regular hotplug invocation of the AP hotplug thread */
494static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
495{
496 st->result = 0;
497 st->single = false;
498 /* 635 /*
499 * Make sure the above stores are visible before should_run becomes 636 * Clean up the leftovers so the next hotplug operation wont use stale
500 * true. Paired with the mb() above in cpuhp_thread_fun() 637 * data.
501 */ 638 */
502 smp_mb(); 639 st->node = st->last = NULL;
503 st->should_run = true; 640 return ret;
504 wake_up_process(st->thread);
505} 641}
506 642
507static int cpuhp_kick_ap_work(unsigned int cpu) 643static int cpuhp_kick_ap_work(unsigned int cpu)
508{ 644{
509 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 645 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
510 enum cpuhp_state state = st->state; 646 enum cpuhp_state prev_state = st->state;
647 int ret;
648
649 cpuhp_lock_acquire(false);
650 cpuhp_lock_release(false);
651
652 cpuhp_lock_acquire(true);
653 cpuhp_lock_release(true);
511 654
512 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work); 655 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
513 lock_map_acquire(&cpuhp_state_lock_map); 656 ret = cpuhp_kick_ap(st, st->target);
514 lock_map_release(&cpuhp_state_lock_map); 657 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
515 __cpuhp_kick_ap_work(st); 658
516 wait_for_completion(&st->done); 659 return ret;
517 trace_cpuhp_exit(cpu, st->state, state, st->result);
518 return st->result;
519} 660}
520 661
521static struct smp_hotplug_thread cpuhp_threads = { 662static struct smp_hotplug_thread cpuhp_threads = {
@@ -581,6 +722,7 @@ static int take_cpu_down(void *_param)
581 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); 722 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
582 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); 723 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
583 int err, cpu = smp_processor_id(); 724 int err, cpu = smp_processor_id();
725 int ret;
584 726
585 /* Ensure this CPU doesn't handle any more interrupts. */ 727 /* Ensure this CPU doesn't handle any more interrupts. */
586 err = __cpu_disable(); 728 err = __cpu_disable();
@@ -594,8 +736,13 @@ static int take_cpu_down(void *_param)
594 WARN_ON(st->state != CPUHP_TEARDOWN_CPU); 736 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
595 st->state--; 737 st->state--;
596 /* Invoke the former CPU_DYING callbacks */ 738 /* Invoke the former CPU_DYING callbacks */
597 for (; st->state > target; st->state--) 739 for (; st->state > target; st->state--) {
598 cpuhp_invoke_callback(cpu, st->state, false, NULL); 740 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
741 /*
742 * DYING must not fail!
743 */
744 WARN_ON_ONCE(ret);
745 }
599 746
600 /* Give up timekeeping duties */ 747 /* Give up timekeeping duties */
601 tick_handover_do_timer(); 748 tick_handover_do_timer();
@@ -639,7 +786,7 @@ static int takedown_cpu(unsigned int cpu)
639 * 786 *
640 * Wait for the stop thread to go away. 787 * Wait for the stop thread to go away.
641 */ 788 */
642 wait_for_completion(&st->done); 789 wait_for_ap_thread(st, false);
643 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); 790 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
644 791
645 /* Interrupts are moved away from the dying cpu, reenable alloc/free */ 792 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
@@ -658,7 +805,7 @@ static void cpuhp_complete_idle_dead(void *arg)
658{ 805{
659 struct cpuhp_cpu_state *st = arg; 806 struct cpuhp_cpu_state *st = arg;
660 807
661 complete(&st->done); 808 complete_ap_thread(st, false);
662} 809}
663 810
664void cpuhp_report_idle_dead(void) 811void cpuhp_report_idle_dead(void)
@@ -676,11 +823,32 @@ void cpuhp_report_idle_dead(void)
676 cpuhp_complete_idle_dead, st, 0); 823 cpuhp_complete_idle_dead, st, 0);
677} 824}
678 825
679#else 826static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
680#define takedown_cpu NULL 827{
681#endif 828 for (st->state++; st->state < st->target; st->state++) {
829 struct cpuhp_step *step = cpuhp_get_step(st->state);
682 830
683#ifdef CONFIG_HOTPLUG_CPU 831 if (!step->skip_onerr)
832 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
833 }
834}
835
836static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
837 enum cpuhp_state target)
838{
839 enum cpuhp_state prev_state = st->state;
840 int ret = 0;
841
842 for (; st->state > target; st->state--) {
843 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
844 if (ret) {
845 st->target = prev_state;
846 undo_cpu_down(cpu, st);
847 break;
848 }
849 }
850 return ret;
851}
684 852
685/* Requires cpu_add_remove_lock to be held */ 853/* Requires cpu_add_remove_lock to be held */
686static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, 854static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@@ -699,13 +867,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
699 867
700 cpuhp_tasks_frozen = tasks_frozen; 868 cpuhp_tasks_frozen = tasks_frozen;
701 869
702 prev_state = st->state; 870 prev_state = cpuhp_set_state(st, target);
703 st->target = target;
704 /* 871 /*
705 * If the current CPU state is in the range of the AP hotplug thread, 872 * If the current CPU state is in the range of the AP hotplug thread,
706 * then we need to kick the thread. 873 * then we need to kick the thread.
707 */ 874 */
708 if (st->state > CPUHP_TEARDOWN_CPU) { 875 if (st->state > CPUHP_TEARDOWN_CPU) {
876 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
709 ret = cpuhp_kick_ap_work(cpu); 877 ret = cpuhp_kick_ap_work(cpu);
710 /* 878 /*
711 * The AP side has done the error rollback already. Just 879 * The AP side has done the error rollback already. Just
@@ -720,6 +888,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
720 */ 888 */
721 if (st->state > CPUHP_TEARDOWN_CPU) 889 if (st->state > CPUHP_TEARDOWN_CPU)
722 goto out; 890 goto out;
891
892 st->target = target;
723 } 893 }
724 /* 894 /*
725 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need 895 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
@@ -727,13 +897,17 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
727 */ 897 */
728 ret = cpuhp_down_callbacks(cpu, st, target); 898 ret = cpuhp_down_callbacks(cpu, st, target);
729 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 899 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
730 st->target = prev_state; 900 cpuhp_reset_state(st, prev_state);
731 st->rollback = true; 901 __cpuhp_kick_ap(st);
732 cpuhp_kick_ap_work(cpu);
733 } 902 }
734 903
735out: 904out:
736 cpus_write_unlock(); 905 cpus_write_unlock();
906 /*
907 * Do post unplug cleanup. This is still protected against
908 * concurrent CPU hotplug via cpu_add_remove_lock.
909 */
910 lockup_detector_cleanup();
737 return ret; 911 return ret;
738} 912}
739 913
@@ -754,11 +928,15 @@ out:
754 cpu_maps_update_done(); 928 cpu_maps_update_done();
755 return err; 929 return err;
756} 930}
931
757int cpu_down(unsigned int cpu) 932int cpu_down(unsigned int cpu)
758{ 933{
759 return do_cpu_down(cpu, CPUHP_OFFLINE); 934 return do_cpu_down(cpu, CPUHP_OFFLINE);
760} 935}
761EXPORT_SYMBOL(cpu_down); 936EXPORT_SYMBOL(cpu_down);
937
938#else
939#define takedown_cpu NULL
762#endif /*CONFIG_HOTPLUG_CPU*/ 940#endif /*CONFIG_HOTPLUG_CPU*/
763 941
764/** 942/**
@@ -772,11 +950,16 @@ void notify_cpu_starting(unsigned int cpu)
772{ 950{
773 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); 951 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
774 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); 952 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
953 int ret;
775 954
776 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ 955 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
777 while (st->state < target) { 956 while (st->state < target) {
778 st->state++; 957 st->state++;
779 cpuhp_invoke_callback(cpu, st->state, true, NULL); 958 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
959 /*
960 * STARTING must not fail!
961 */
962 WARN_ON_ONCE(ret);
780 } 963 }
781} 964}
782 965
@@ -794,7 +977,7 @@ void cpuhp_online_idle(enum cpuhp_state state)
794 return; 977 return;
795 978
796 st->state = CPUHP_AP_ONLINE_IDLE; 979 st->state = CPUHP_AP_ONLINE_IDLE;
797 complete(&st->done); 980 complete_ap_thread(st, true);
798} 981}
799 982
800/* Requires cpu_add_remove_lock to be held */ 983/* Requires cpu_add_remove_lock to be held */
@@ -829,7 +1012,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
829 1012
830 cpuhp_tasks_frozen = tasks_frozen; 1013 cpuhp_tasks_frozen = tasks_frozen;
831 1014
832 st->target = target; 1015 cpuhp_set_state(st, target);
833 /* 1016 /*
834 * If the current CPU state is in the range of the AP hotplug thread, 1017 * If the current CPU state is in the range of the AP hotplug thread,
835 * then we need to kick the thread once more. 1018 * then we need to kick the thread once more.
@@ -1296,6 +1479,10 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1296 struct cpuhp_step *sp = cpuhp_get_step(state); 1479 struct cpuhp_step *sp = cpuhp_get_step(state);
1297 int ret; 1480 int ret;
1298 1481
1482 /*
1483 * If there's nothing to do, we done.
1484 * Relies on the union for multi_instance.
1485 */
1299 if ((bringup && !sp->startup.single) || 1486 if ((bringup && !sp->startup.single) ||
1300 (!bringup && !sp->teardown.single)) 1487 (!bringup && !sp->teardown.single))
1301 return 0; 1488 return 0;
@@ -1307,9 +1494,9 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1307 if (cpuhp_is_ap_state(state)) 1494 if (cpuhp_is_ap_state(state))
1308 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); 1495 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1309 else 1496 else
1310 ret = cpuhp_invoke_callback(cpu, state, bringup, node); 1497 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1311#else 1498#else
1312 ret = cpuhp_invoke_callback(cpu, state, bringup, node); 1499 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1313#endif 1500#endif
1314 BUG_ON(ret && !bringup); 1501 BUG_ON(ret && !bringup);
1315 return ret; 1502 return ret;
@@ -1641,9 +1828,55 @@ static ssize_t show_cpuhp_target(struct device *dev,
1641} 1828}
1642static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); 1829static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1643 1830
1831
1832static ssize_t write_cpuhp_fail(struct device *dev,
1833 struct device_attribute *attr,
1834 const char *buf, size_t count)
1835{
1836 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1837 struct cpuhp_step *sp;
1838 int fail, ret;
1839
1840 ret = kstrtoint(buf, 10, &fail);
1841 if (ret)
1842 return ret;
1843
1844 /*
1845 * Cannot fail STARTING/DYING callbacks.
1846 */
1847 if (cpuhp_is_atomic_state(fail))
1848 return -EINVAL;
1849
1850 /*
1851 * Cannot fail anything that doesn't have callbacks.
1852 */
1853 mutex_lock(&cpuhp_state_mutex);
1854 sp = cpuhp_get_step(fail);
1855 if (!sp->startup.single && !sp->teardown.single)
1856 ret = -EINVAL;
1857 mutex_unlock(&cpuhp_state_mutex);
1858 if (ret)
1859 return ret;
1860
1861 st->fail = fail;
1862
1863 return count;
1864}
1865
1866static ssize_t show_cpuhp_fail(struct device *dev,
1867 struct device_attribute *attr, char *buf)
1868{
1869 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1870
1871 return sprintf(buf, "%d\n", st->fail);
1872}
1873
1874static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1875
1644static struct attribute *cpuhp_cpu_attrs[] = { 1876static struct attribute *cpuhp_cpu_attrs[] = {
1645 &dev_attr_state.attr, 1877 &dev_attr_state.attr,
1646 &dev_attr_target.attr, 1878 &dev_attr_target.attr,
1879 &dev_attr_fail.attr,
1647 NULL 1880 NULL
1648}; 1881};
1649 1882
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6bc21e202ae4..9d93db81fa36 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
662 /* 662 /*
663 * Do not update time when cgroup is not active 663 * Do not update time when cgroup is not active
664 */ 664 */
665 if (cgrp == event->cgrp) 665 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
666 __update_cgrp_time(event->cgrp); 666 __update_cgrp_time(event->cgrp);
667} 667}
668 668
@@ -8955,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
8955 8955
8956static void free_pmu_context(struct pmu *pmu) 8956static void free_pmu_context(struct pmu *pmu)
8957{ 8957{
8958 /*
8959 * Static contexts such as perf_sw_context have a global lifetime
8960 * and may be shared between different PMUs. Avoid freeing them
8961 * when a single PMU is going away.
8962 */
8963 if (pmu->task_ctx_nr > perf_invalid_context)
8964 return;
8965
8958 mutex_lock(&pmus_lock); 8966 mutex_lock(&pmus_lock);
8959 free_percpu(pmu->pmu_cpu_context); 8967 free_percpu(pmu->pmu_cpu_context);
8960 mutex_unlock(&pmus_lock); 8968 mutex_unlock(&pmus_lock);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index af71a84e12ee..f684d8e5fa2b 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -412,6 +412,19 @@ err:
412 return NULL; 412 return NULL;
413} 413}
414 414
415static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
416{
417 if (rb->aux_overwrite)
418 return false;
419
420 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
421 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
422 return true;
423 }
424
425 return false;
426}
427
415/* 428/*
416 * Commit the data written by hardware into the ring buffer by adjusting 429 * Commit the data written by hardware into the ring buffer by adjusting
417 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the 430 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
@@ -451,10 +464,8 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
451 } 464 }
452 465
453 rb->user_page->aux_head = rb->aux_head; 466 rb->user_page->aux_head = rb->aux_head;
454 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { 467 if (rb_need_aux_wakeup(rb))
455 wakeup = true; 468 wakeup = true;
456 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
457 }
458 469
459 if (wakeup) { 470 if (wakeup) {
460 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED) 471 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
@@ -484,9 +495,8 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
484 rb->aux_head += size; 495 rb->aux_head += size;
485 496
486 rb->user_page->aux_head = rb->aux_head; 497 rb->user_page->aux_head = rb->aux_head;
487 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) { 498 if (rb_need_aux_wakeup(rb)) {
488 perf_output_wakeup(handle); 499 perf_output_wakeup(handle);
489 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
490 handle->wakeup = rb->aux_wakeup + rb->aux_watermark; 500 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
491 } 501 }
492 502
diff --git a/kernel/exit.c b/kernel/exit.c
index 3481ababd06a..f6cad39f35df 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1600,18 +1600,19 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1600 struct waitid_info info = {.status = 0}; 1600 struct waitid_info info = {.status = 0};
1601 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1601 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
1602 int signo = 0; 1602 int signo = 0;
1603
1603 if (err > 0) { 1604 if (err > 0) {
1604 signo = SIGCHLD; 1605 signo = SIGCHLD;
1605 err = 0; 1606 err = 0;
1606 }
1607
1608 if (!err) {
1609 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1607 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1610 return -EFAULT; 1608 return -EFAULT;
1611 } 1609 }
1612 if (!infop) 1610 if (!infop)
1613 return err; 1611 return err;
1614 1612
1613 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1614 return -EFAULT;
1615
1615 user_access_begin(); 1616 user_access_begin();
1616 unsafe_put_user(signo, &infop->si_signo, Efault); 1617 unsafe_put_user(signo, &infop->si_signo, Efault);
1617 unsafe_put_user(0, &infop->si_errno, Efault); 1618 unsafe_put_user(0, &infop->si_errno, Efault);
@@ -1723,21 +1724,23 @@ COMPAT_SYSCALL_DEFINE5(waitid,
1723 if (err > 0) { 1724 if (err > 0) {
1724 signo = SIGCHLD; 1725 signo = SIGCHLD;
1725 err = 0; 1726 err = 0;
1726 } 1727 if (uru) {
1727 1728 /* kernel_waitid() overwrites everything in ru */
1728 if (!err && uru) { 1729 if (COMPAT_USE_64BIT_TIME)
1729 /* kernel_waitid() overwrites everything in ru */ 1730 err = copy_to_user(uru, &ru, sizeof(ru));
1730 if (COMPAT_USE_64BIT_TIME) 1731 else
1731 err = copy_to_user(uru, &ru, sizeof(ru)); 1732 err = put_compat_rusage(&ru, uru);
1732 else 1733 if (err)
1733 err = put_compat_rusage(&ru, uru); 1734 return -EFAULT;
1734 if (err) 1735 }
1735 return -EFAULT;
1736 } 1736 }
1737 1737
1738 if (!infop) 1738 if (!infop)
1739 return err; 1739 return err;
1740 1740
1741 if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
1742 return -EFAULT;
1743
1741 user_access_begin(); 1744 user_access_begin();
1742 unsafe_put_user(signo, &infop->si_signo, Efault); 1745 unsafe_put_user(signo, &infop->si_signo, Efault);
1743 unsafe_put_user(0, &infop->si_errno, Efault); 1746 unsafe_put_user(0, &infop->si_errno, Efault);
diff --git a/kernel/extable.c b/kernel/extable.c
index 38c2412401a1..9aa1cc41ecf7 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -102,15 +102,7 @@ int core_kernel_data(unsigned long addr)
102 102
103int __kernel_text_address(unsigned long addr) 103int __kernel_text_address(unsigned long addr)
104{ 104{
105 if (core_kernel_text(addr)) 105 if (kernel_text_address(addr))
106 return 1;
107 if (is_module_text_address(addr))
108 return 1;
109 if (is_ftrace_trampoline(addr))
110 return 1;
111 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
112 return 1;
113 if (is_bpf_text_address(addr))
114 return 1; 106 return 1;
115 /* 107 /*
116 * There might be init symbols in saved stacktraces. 108 * There might be init symbols in saved stacktraces.
@@ -127,17 +119,42 @@ int __kernel_text_address(unsigned long addr)
127 119
128int kernel_text_address(unsigned long addr) 120int kernel_text_address(unsigned long addr)
129{ 121{
122 bool no_rcu;
123 int ret = 1;
124
130 if (core_kernel_text(addr)) 125 if (core_kernel_text(addr))
131 return 1; 126 return 1;
127
128 /*
129 * If a stack dump happens while RCU is not watching, then
130 * RCU needs to be notified that it requires to start
131 * watching again. This can happen either by tracing that
132 * triggers a stack trace, or a WARN() that happens during
133 * coming back from idle, or cpu on or offlining.
134 *
135 * is_module_text_address() as well as the kprobe slots
136 * and is_bpf_text_address() require RCU to be watching.
137 */
138 no_rcu = !rcu_is_watching();
139
140 /* Treat this like an NMI as it can happen anywhere */
141 if (no_rcu)
142 rcu_nmi_enter();
143
132 if (is_module_text_address(addr)) 144 if (is_module_text_address(addr))
133 return 1; 145 goto out;
134 if (is_ftrace_trampoline(addr)) 146 if (is_ftrace_trampoline(addr))
135 return 1; 147 goto out;
136 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr)) 148 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
137 return 1; 149 goto out;
138 if (is_bpf_text_address(addr)) 150 if (is_bpf_text_address(addr))
139 return 1; 151 goto out;
140 return 0; 152 ret = 0;
153out:
154 if (no_rcu)
155 rcu_nmi_exit();
156
157 return ret;
141} 158}
142 159
143/* 160/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 10646182440f..07cc743698d3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
215 if (!s) 215 if (!s)
216 continue; 216 continue;
217 217
218#ifdef CONFIG_DEBUG_KMEMLEAK
219 /* Clear stale pointers from reused stack. */
220 memset(s->addr, 0, THREAD_SIZE);
221#endif
218 tsk->stack_vm_area = s; 222 tsk->stack_vm_area = s;
219 return s->addr; 223 return s->addr;
220 } 224 }
@@ -946,6 +950,24 @@ void mmput(struct mm_struct *mm)
946} 950}
947EXPORT_SYMBOL_GPL(mmput); 951EXPORT_SYMBOL_GPL(mmput);
948 952
953#ifdef CONFIG_MMU
954static void mmput_async_fn(struct work_struct *work)
955{
956 struct mm_struct *mm = container_of(work, struct mm_struct,
957 async_put_work);
958
959 __mmput(mm);
960}
961
962void mmput_async(struct mm_struct *mm)
963{
964 if (atomic_dec_and_test(&mm->mm_users)) {
965 INIT_WORK(&mm->async_put_work, mmput_async_fn);
966 schedule_work(&mm->async_put_work);
967 }
968}
969#endif
970
949/** 971/**
950 * set_mm_exe_file - change a reference to the mm's executable file 972 * set_mm_exe_file - change a reference to the mm's executable file
951 * 973 *
diff --git a/kernel/futex.c b/kernel/futex.c
index 3d38eaf05492..0518a0bfc746 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -821,8 +821,6 @@ static void get_pi_state(struct futex_pi_state *pi_state)
821/* 821/*
822 * Drops a reference to the pi_state object and frees or caches it 822 * Drops a reference to the pi_state object and frees or caches it
823 * when the last reference is gone. 823 * when the last reference is gone.
824 *
825 * Must be called with the hb lock held.
826 */ 824 */
827static void put_pi_state(struct futex_pi_state *pi_state) 825static void put_pi_state(struct futex_pi_state *pi_state)
828{ 826{
@@ -837,16 +835,22 @@ static void put_pi_state(struct futex_pi_state *pi_state)
837 * and has cleaned up the pi_state already 835 * and has cleaned up the pi_state already
838 */ 836 */
839 if (pi_state->owner) { 837 if (pi_state->owner) {
840 raw_spin_lock_irq(&pi_state->owner->pi_lock); 838 struct task_struct *owner;
841 list_del_init(&pi_state->list);
842 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
843 839
844 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); 840 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
841 owner = pi_state->owner;
842 if (owner) {
843 raw_spin_lock(&owner->pi_lock);
844 list_del_init(&pi_state->list);
845 raw_spin_unlock(&owner->pi_lock);
846 }
847 rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
848 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
845 } 849 }
846 850
847 if (current->pi_state_cache) 851 if (current->pi_state_cache) {
848 kfree(pi_state); 852 kfree(pi_state);
849 else { 853 } else {
850 /* 854 /*
851 * pi_state->list is already empty. 855 * pi_state->list is already empty.
852 * clear pi_state->owner. 856 * clear pi_state->owner.
@@ -907,13 +911,14 @@ void exit_pi_state_list(struct task_struct *curr)
907 raw_spin_unlock_irq(&curr->pi_lock); 911 raw_spin_unlock_irq(&curr->pi_lock);
908 912
909 spin_lock(&hb->lock); 913 spin_lock(&hb->lock);
910 914 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
911 raw_spin_lock_irq(&curr->pi_lock); 915 raw_spin_lock(&curr->pi_lock);
912 /* 916 /*
913 * We dropped the pi-lock, so re-check whether this 917 * We dropped the pi-lock, so re-check whether this
914 * task still owns the PI-state: 918 * task still owns the PI-state:
915 */ 919 */
916 if (head->next != next) { 920 if (head->next != next) {
921 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
917 spin_unlock(&hb->lock); 922 spin_unlock(&hb->lock);
918 continue; 923 continue;
919 } 924 }
@@ -922,9 +927,10 @@ void exit_pi_state_list(struct task_struct *curr)
922 WARN_ON(list_empty(&pi_state->list)); 927 WARN_ON(list_empty(&pi_state->list));
923 list_del_init(&pi_state->list); 928 list_del_init(&pi_state->list);
924 pi_state->owner = NULL; 929 pi_state->owner = NULL;
925 raw_spin_unlock_irq(&curr->pi_lock); 930 raw_spin_unlock(&curr->pi_lock);
926 931
927 get_pi_state(pi_state); 932 get_pi_state(pi_state);
933 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
928 spin_unlock(&hb->lock); 934 spin_unlock(&hb->lock);
929 935
930 rt_mutex_futex_unlock(&pi_state->pi_mutex); 936 rt_mutex_futex_unlock(&pi_state->pi_mutex);
@@ -1208,6 +1214,10 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
1208 1214
1209 WARN_ON(!list_empty(&pi_state->list)); 1215 WARN_ON(!list_empty(&pi_state->list));
1210 list_add(&pi_state->list, &p->pi_state_list); 1216 list_add(&pi_state->list, &p->pi_state_list);
1217 /*
1218 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1219 * because there is no concurrency as the object is not published yet.
1220 */
1211 pi_state->owner = p; 1221 pi_state->owner = p;
1212 raw_spin_unlock_irq(&p->pi_lock); 1222 raw_spin_unlock_irq(&p->pi_lock);
1213 1223
@@ -2878,6 +2888,7 @@ retry:
2878 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 2888 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2879 spin_unlock(&hb->lock); 2889 spin_unlock(&hb->lock);
2880 2890
2891 /* drops pi_state->pi_mutex.wait_lock */
2881 ret = wake_futex_pi(uaddr, uval, pi_state); 2892 ret = wake_futex_pi(uaddr, uval, pi_state);
2882 2893
2883 put_pi_state(pi_state); 2894 put_pi_state(pi_state);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6fc89fd93824..5a2ef92c2782 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
265 irq_setup_affinity(desc); 265 irq_setup_affinity(desc);
266 break; 266 break;
267 case IRQ_STARTUP_MANAGED: 267 case IRQ_STARTUP_MANAGED:
268 irq_do_set_affinity(d, aff, false);
268 ret = __irq_startup(desc); 269 ret = __irq_startup(desc);
269 irq_set_affinity_locked(d, aff, false);
270 break; 270 break;
271 case IRQ_STARTUP_ABORT: 271 case IRQ_STARTUP_ABORT:
272 return 0; 272 return 0;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 638eb9c83d9f..9eb09aef0313 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -18,8 +18,34 @@
18static inline bool irq_needs_fixup(struct irq_data *d) 18static inline bool irq_needs_fixup(struct irq_data *d)
19{ 19{
20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); 20 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
21 unsigned int cpu = smp_processor_id();
21 22
22 return cpumask_test_cpu(smp_processor_id(), m); 23#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
24 /*
25 * The cpumask_empty() check is a workaround for interrupt chips,
26 * which do not implement effective affinity, but the architecture has
27 * enabled the config switch. Use the general affinity mask instead.
28 */
29 if (cpumask_empty(m))
30 m = irq_data_get_affinity_mask(d);
31
32 /*
33 * Sanity check. If the mask is not empty when excluding the outgoing
34 * CPU then it must contain at least one online CPU. The outgoing CPU
35 * has been removed from the online mask already.
36 */
37 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
38 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
39 /*
40 * If this happens then there was a missed IRQ fixup at some
41 * point. Warn about it and enforce fixup.
42 */
43 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
44 cpumask_pr_args(m), d->irq, cpu);
45 return true;
46 }
47#endif
48 return cpumask_test_cpu(cpu, m);
23} 49}
24 50
25static bool migrate_one_irq(struct irq_desc *desc) 51static bool migrate_one_irq(struct irq_desc *desc)
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index f7086b78ad6e..c26c5bb6b491 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
135} 135}
136 136
137/** 137/**
138 * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt 138 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
139 * @d: irq_data 139 * @d: irq_data
140 *
141 * This generic implementation of the irq_mask_ack method is for chips
142 * with separate enable/disable registers instead of a single mask
143 * register and where a pending interrupt is acknowledged by setting a
144 * bit.
145 *
146 * Note: This is the only permutation currently used. Similar generic
147 * functions should be added here if other permutations are required.
140 */ 148 */
141void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) 149void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
142{ 150{
143 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 151 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
144 struct irq_chip_type *ct = irq_data_get_chip_type(d); 152 struct irq_chip_type *ct = irq_data_get_chip_type(d);
145 u32 mask = d->mask; 153 u32 mask = d->mask;
146 154
147 irq_gc_lock(gc); 155 irq_gc_lock(gc);
148 irq_reg_writel(gc, mask, ct->regs.mask); 156 irq_reg_writel(gc, mask, ct->regs.disable);
157 *ct->mask_cache &= ~mask;
149 irq_reg_writel(gc, mask, ct->regs.ack); 158 irq_reg_writel(gc, mask, ct->regs.ack);
150 irq_gc_unlock(gc); 159 irq_gc_unlock(gc);
151} 160}
@@ -322,7 +331,6 @@ int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
322 /* Calc pointer to the next generic chip */ 331 /* Calc pointer to the next generic chip */
323 tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); 332 tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
324 } 333 }
325 d->name = name;
326 return 0; 334 return 0;
327} 335}
328EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips); 336EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index e84b7056bb08..ac4644e92b49 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -945,7 +945,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
945 struct irq_desc *desc; 945 struct irq_desc *desc;
946 struct irq_domain *domain; 946 struct irq_domain *domain;
947 struct radix_tree_iter iter; 947 struct radix_tree_iter iter;
948 void **slot; 948 void __rcu **slot;
949 int i; 949 int i;
950 950
951 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n", 951 seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
@@ -1453,7 +1453,7 @@ out_free_desc:
1453/* The irq_data was moved, fix the revmap to refer to the new location */ 1453/* The irq_data was moved, fix the revmap to refer to the new location */
1454static void irq_domain_fix_revmap(struct irq_data *d) 1454static void irq_domain_fix_revmap(struct irq_data *d)
1455{ 1455{
1456 void **slot; 1456 void __rcu **slot;
1457 1457
1458 if (d->hwirq < d->domain->revmap_size) 1458 if (d->hwirq < d->domain->revmap_size)
1459 return; /* Not using radix tree. */ 1459 return; /* Not using radix tree. */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 573dc52b0806..4bff6a10ae8e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags); 168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169} 169}
170 170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
171int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
172 bool force) 185 bool force)
173{ 186{
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
175 struct irq_chip *chip = irq_data_get_irq_chip(data); 188 struct irq_chip *chip = irq_data_get_irq_chip(data);
176 int ret; 189 int ret;
177 190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
178 ret = chip->irq_set_affinity(data, mask, force); 194 ret = chip->irq_set_affinity(data, mask, force);
179 switch (ret) { 195 switch (ret) {
180 case IRQ_SET_MASK_OK: 196 case IRQ_SET_MASK_OK:
181 case IRQ_SET_MASK_OK_DONE: 197 case IRQ_SET_MASK_OK_DONE:
182 cpumask_copy(desc->irq_common_data.affinity, mask); 198 cpumask_copy(desc->irq_common_data.affinity, mask);
183 case IRQ_SET_MASK_OK_NOCOPY: 199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
184 irq_set_thread_affinity(desc); 201 irq_set_thread_affinity(desc);
185 ret = 0; 202 ret = 0;
186 } 203 }
@@ -1643,6 +1660,10 @@ const void *free_irq(unsigned int irq, void *dev_id)
1643#endif 1660#endif
1644 1661
1645 action = __free_irq(irq, dev_id); 1662 action = __free_irq(irq, dev_id);
1663
1664 if (!action)
1665 return NULL;
1666
1646 devname = action->name; 1667 devname = action->name;
1647 kfree(action); 1668 kfree(action);
1648 return devname; 1669 return devname;
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index ea34ed8bb952..055bb2962a0b 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -131,7 +131,7 @@ static int kcmp_epoll_target(struct task_struct *task1,
131 if (filp_epoll) { 131 if (filp_epoll) {
132 filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); 132 filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff);
133 fput(filp_epoll); 133 fput(filp_epoll);
134 } else 134 }
135 135
136 if (IS_ERR(filp_tgt)) 136 if (IS_ERR(filp_tgt))
137 return PTR_ERR(filp_tgt); 137 return PTR_ERR(filp_tgt);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e43c78f..bf8c8fd72589 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch)
830} 830}
831EXPORT_SYMBOL_GPL(klp_register_patch); 831EXPORT_SYMBOL_GPL(klp_register_patch);
832 832
833/*
834 * Remove parts of patches that touch a given kernel module. The list of
835 * patches processed might be limited. When limit is NULL, all patches
836 * will be handled.
837 */
838static void klp_cleanup_module_patches_limited(struct module *mod,
839 struct klp_patch *limit)
840{
841 struct klp_patch *patch;
842 struct klp_object *obj;
843
844 list_for_each_entry(patch, &klp_patches, list) {
845 if (patch == limit)
846 break;
847
848 klp_for_each_object(patch, obj) {
849 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
850 continue;
851
852 /*
853 * Only unpatch the module if the patch is enabled or
854 * is in transition.
855 */
856 if (patch->enabled || patch == klp_transition_patch) {
857 pr_notice("reverting patch '%s' on unloading module '%s'\n",
858 patch->mod->name, obj->mod->name);
859 klp_unpatch_object(obj);
860 }
861
862 klp_free_object_loaded(obj);
863 break;
864 }
865 }
866}
867
833int klp_module_coming(struct module *mod) 868int klp_module_coming(struct module *mod)
834{ 869{
835 int ret; 870 int ret;
@@ -894,7 +929,7 @@ err:
894 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", 929 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
895 patch->mod->name, obj->mod->name, obj->mod->name); 930 patch->mod->name, obj->mod->name, obj->mod->name);
896 mod->klp_alive = false; 931 mod->klp_alive = false;
897 klp_free_object_loaded(obj); 932 klp_cleanup_module_patches_limited(mod, patch);
898 mutex_unlock(&klp_mutex); 933 mutex_unlock(&klp_mutex);
899 934
900 return ret; 935 return ret;
@@ -902,9 +937,6 @@ err:
902 937
903void klp_module_going(struct module *mod) 938void klp_module_going(struct module *mod)
904{ 939{
905 struct klp_patch *patch;
906 struct klp_object *obj;
907
908 if (WARN_ON(mod->state != MODULE_STATE_GOING && 940 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
909 mod->state != MODULE_STATE_COMING)) 941 mod->state != MODULE_STATE_COMING))
910 return; 942 return;
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod)
917 */ 949 */
918 mod->klp_alive = false; 950 mod->klp_alive = false;
919 951
920 list_for_each_entry(patch, &klp_patches, list) { 952 klp_cleanup_module_patches_limited(mod, NULL);
921 klp_for_each_object(patch, obj) {
922 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
923 continue;
924
925 /*
926 * Only unpatch the module if the patch is enabled or
927 * is in transition.
928 */
929 if (patch->enabled || patch == klp_transition_patch) {
930 pr_notice("reverting patch '%s' on unloading module '%s'\n",
931 patch->mod->name, obj->mod->name);
932 klp_unpatch_object(obj);
933 }
934
935 klp_free_object_loaded(obj);
936 break;
937 }
938 }
939 953
940 mutex_unlock(&klp_mutex); 954 mutex_unlock(&klp_mutex);
941} 955}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 44c8d0d17170..e36e652d996f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1873 struct held_lock *next, int distance, struct stack_trace *trace, 1873 struct held_lock *next, int distance, struct stack_trace *trace,
1874 int (*save)(struct stack_trace *trace)) 1874 int (*save)(struct stack_trace *trace))
1875{ 1875{
1876 struct lock_list *uninitialized_var(target_entry);
1876 struct lock_list *entry; 1877 struct lock_list *entry;
1877 int ret;
1878 struct lock_list this; 1878 struct lock_list this;
1879 struct lock_list *uninitialized_var(target_entry); 1879 int ret;
1880 1880
1881 /* 1881 /*
1882 * Prove that the new <prev> -> <next> dependency would not 1882 * Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1890 this.class = hlock_class(next); 1890 this.class = hlock_class(next);
1891 this.parent = NULL; 1891 this.parent = NULL;
1892 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 1892 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1893 if (unlikely(!ret)) 1893 if (unlikely(!ret)) {
1894 if (!trace->entries) {
1895 /*
1896 * If @save fails here, the printing might trigger
1897 * a WARN but because of the !nr_entries it should
1898 * not do bad things.
1899 */
1900 save(trace);
1901 }
1894 return print_circular_bug(&this, target_entry, next, prev, trace); 1902 return print_circular_bug(&this, target_entry, next, prev, trace);
1903 }
1895 else if (unlikely(ret < 0)) 1904 else if (unlikely(ret < 0))
1896 return print_bfs_bug(ret); 1905 return print_bfs_bug(ret);
1897 1906
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1938 return print_bfs_bug(ret); 1947 return print_bfs_bug(ret);
1939 1948
1940 1949
1941 if (save && !save(trace)) 1950 if (!trace->entries && !save(trace))
1942 return 0; 1951 return 0;
1943 1952
1944 /* 1953 /*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1958 if (!ret) 1967 if (!ret)
1959 return 0; 1968 return 0;
1960 1969
1961 /*
1962 * Debugging printouts:
1963 */
1964 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1965 graph_unlock();
1966 printk("\n new dependency: ");
1967 print_lock_name(hlock_class(prev));
1968 printk(KERN_CONT " => ");
1969 print_lock_name(hlock_class(next));
1970 printk(KERN_CONT "\n");
1971 dump_stack();
1972 if (!graph_lock())
1973 return 0;
1974 }
1975 return 2; 1970 return 2;
1976} 1971}
1977 1972
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1986{ 1981{
1987 int depth = curr->lockdep_depth; 1982 int depth = curr->lockdep_depth;
1988 struct held_lock *hlock; 1983 struct held_lock *hlock;
1989 struct stack_trace trace; 1984 struct stack_trace trace = {
1990 int (*save)(struct stack_trace *trace) = save_trace; 1985 .nr_entries = 0,
1986 .max_entries = 0,
1987 .entries = NULL,
1988 .skip = 0,
1989 };
1991 1990
1992 /* 1991 /*
1993 * Debugging checks. 1992 * Debugging checks.
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2018 */ 2017 */
2019 if (hlock->read != 2 && hlock->check) { 2018 if (hlock->read != 2 && hlock->check) {
2020 int ret = check_prev_add(curr, hlock, next, 2019 int ret = check_prev_add(curr, hlock, next,
2021 distance, &trace, save); 2020 distance, &trace, save_trace);
2022 if (!ret) 2021 if (!ret)
2023 return 0; 2022 return 0;
2024 2023
2025 /* 2024 /*
2026 * Stop saving stack_trace if save_trace() was
2027 * called at least once:
2028 */
2029 if (save && ret == 2)
2030 save = NULL;
2031
2032 /*
2033 * Stop after the first non-trylock entry, 2025 * Stop after the first non-trylock entry,
2034 * as non-trylock entries have added their 2026 * as non-trylock entries have added their
2035 * own direct dependencies already, so this 2027 * own direct dependencies already, so this
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 02f660666ab8..1fefe6dcafd7 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -613,6 +613,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
613 DEFINE_WAKE_Q(wake_q); 613 DEFINE_WAKE_Q(wake_q);
614 614
615 /* 615 /*
616 * __rwsem_down_write_failed_common(sem)
617 * rwsem_optimistic_spin(sem)
618 * osq_unlock(sem->osq)
619 * ...
620 * atomic_long_add_return(&sem->count)
621 *
622 * - VS -
623 *
624 * __up_write()
625 * if (atomic_long_sub_return_release(&sem->count) < 0)
626 * rwsem_wake(sem)
627 * osq_is_locked(&sem->osq)
628 *
629 * And __up_write() must observe !osq_is_locked() when it observes the
630 * atomic_long_add_return() in order to not miss a wakeup.
631 *
632 * This boils down to:
633 *
634 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
635 * MB RMB
636 * [RmW] Y += 1 [L] r1 = X
637 *
638 * exists (r0=1 /\ r1=0)
639 */
640 smp_rmb();
641
642 /*
616 * If a spinner is present, it is not necessary to do the wakeup. 643 * If a spinner is present, it is not necessary to do the wakeup.
617 * Try to do wakeup only if the trylock succeeds to minimize 644 * Try to do wakeup only if the trylock succeeds to minimize
618 * spinlock contention which may introduce too much delay in the 645 * spinlock contention which may introduce too much delay in the
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6bcbfbf1a8fd..403ab9cdb949 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -350,7 +350,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
350 pgprot_t pgprot = PAGE_KERNEL; 350 pgprot_t pgprot = PAGE_KERNEL;
351 struct dev_pagemap *pgmap; 351 struct dev_pagemap *pgmap;
352 struct page_map *page_map; 352 struct page_map *page_map;
353 int error, nid, is_ram; 353 int error, nid, is_ram, i = 0;
354 354
355 align_start = res->start & ~(SECTION_SIZE - 1); 355 align_start = res->start & ~(SECTION_SIZE - 1);
356 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) 356 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -448,6 +448,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
448 list_del(&page->lru); 448 list_del(&page->lru);
449 page->pgmap = pgmap; 449 page->pgmap = pgmap;
450 percpu_ref_get(ref); 450 percpu_ref_get(ref);
451 if (!(++i % 1024))
452 cond_resched();
451 } 453 }
452 devres_add(dev, page_map); 454 devres_add(dev, page_map);
453 return __va(res->start); 455 return __va(res->start);
diff --git a/kernel/params.c b/kernel/params.c
index 60b2d8101355..cc9108c2a1fd 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -224,7 +224,7 @@ char *parse_args(const char *doing,
224 } \ 224 } \
225 int param_get_##name(char *buffer, const struct kernel_param *kp) \ 225 int param_get_##name(char *buffer, const struct kernel_param *kp) \
226 { \ 226 { \
227 return scnprintf(buffer, PAGE_SIZE, format, \ 227 return scnprintf(buffer, PAGE_SIZE, format "\n", \
228 *((type *)kp->arg)); \ 228 *((type *)kp->arg)); \
229 } \ 229 } \
230 const struct kernel_param_ops param_ops_##name = { \ 230 const struct kernel_param_ops param_ops_##name = { \
@@ -236,14 +236,14 @@ char *parse_args(const char *doing,
236 EXPORT_SYMBOL(param_ops_##name) 236 EXPORT_SYMBOL(param_ops_##name)
237 237
238 238
239STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); 239STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8);
240STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); 240STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16);
241STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); 241STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16);
242STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); 242STANDARD_PARAM_DEF(int, int, "%i", kstrtoint);
243STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); 243STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint);
244STANDARD_PARAM_DEF(long, long, "%li", kstrtol); 244STANDARD_PARAM_DEF(long, long, "%li", kstrtol);
245STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); 245STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul);
246STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); 246STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull);
247 247
248int param_set_charp(const char *val, const struct kernel_param *kp) 248int param_set_charp(const char *val, const struct kernel_param *kp)
249{ 249{
@@ -270,7 +270,7 @@ EXPORT_SYMBOL(param_set_charp);
270 270
271int param_get_charp(char *buffer, const struct kernel_param *kp) 271int param_get_charp(char *buffer, const struct kernel_param *kp)
272{ 272{
273 return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg)); 273 return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg));
274} 274}
275EXPORT_SYMBOL(param_get_charp); 275EXPORT_SYMBOL(param_get_charp);
276 276
@@ -301,7 +301,7 @@ EXPORT_SYMBOL(param_set_bool);
301int param_get_bool(char *buffer, const struct kernel_param *kp) 301int param_get_bool(char *buffer, const struct kernel_param *kp)
302{ 302{
303 /* Y and N chosen as being relatively non-coder friendly */ 303 /* Y and N chosen as being relatively non-coder friendly */
304 return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N'); 304 return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N');
305} 305}
306EXPORT_SYMBOL(param_get_bool); 306EXPORT_SYMBOL(param_get_bool);
307 307
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(param_set_invbool);
360 360
361int param_get_invbool(char *buffer, const struct kernel_param *kp) 361int param_get_invbool(char *buffer, const struct kernel_param *kp)
362{ 362{
363 return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); 363 return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y');
364} 364}
365EXPORT_SYMBOL(param_get_invbool); 365EXPORT_SYMBOL(param_get_invbool);
366 366
@@ -460,8 +460,9 @@ static int param_array_get(char *buffer, const struct kernel_param *kp)
460 struct kernel_param p = *kp; 460 struct kernel_param p = *kp;
461 461
462 for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { 462 for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) {
463 /* Replace \n with comma */
463 if (i) 464 if (i)
464 buffer[off++] = ','; 465 buffer[off - 1] = ',';
465 p.arg = arr->elem + arr->elemsize * i; 466 p.arg = arr->elem + arr->elemsize * i;
466 check_kparam_locked(p.mod); 467 check_kparam_locked(p.mod);
467 ret = arr->ops->get(buffer + off, &p); 468 ret = arr->ops->get(buffer + off, &p);
@@ -507,7 +508,7 @@ EXPORT_SYMBOL(param_set_copystring);
507int param_get_string(char *buffer, const struct kernel_param *kp) 508int param_get_string(char *buffer, const struct kernel_param *kp)
508{ 509{
509 const struct kparam_string *kps = kp->str; 510 const struct kparam_string *kps = kp->str;
510 return strlcpy(buffer, kps->string, kps->maxlen); 511 return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string);
511} 512}
512EXPORT_SYMBOL(param_get_string); 513EXPORT_SYMBOL(param_get_string);
513 514
@@ -549,10 +550,6 @@ static ssize_t param_attr_show(struct module_attribute *mattr,
549 kernel_param_lock(mk->mod); 550 kernel_param_lock(mk->mod);
550 count = attribute->param->ops->get(buf, attribute->param); 551 count = attribute->param->ops->get(buf, attribute->param);
551 kernel_param_unlock(mk->mod); 552 kernel_param_unlock(mk->mod);
552 if (count > 0) {
553 strcat(buf, "\n");
554 ++count;
555 }
556 return count; 553 return count;
557} 554}
558 555
@@ -600,7 +597,7 @@ EXPORT_SYMBOL(kernel_param_unlock);
600/* 597/*
601 * add_sysfs_param - add a parameter to sysfs 598 * add_sysfs_param - add a parameter to sysfs
602 * @mk: struct module_kobject 599 * @mk: struct module_kobject
603 * @kparam: the actual parameter definition to add to sysfs 600 * @kp: the actual parameter definition to add to sysfs
604 * @name: name of parameter 601 * @name: name of parameter
605 * 602 *
606 * Create a kobject if for a (per-module) parameter if mp NULL, and 603 * Create a kobject if for a (per-module) parameter if mp NULL, and
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3e2b4f519009..ccd2d20e6b06 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -120,22 +120,26 @@ static void s2idle_loop(void)
120 * frozen processes + suspended devices + idle processors. 120 * frozen processes + suspended devices + idle processors.
121 * Thus s2idle_enter() should be called right after 121 * Thus s2idle_enter() should be called right after
122 * all devices have been suspended. 122 * all devices have been suspended.
123 *
124 * Wakeups during the noirq suspend of devices may be spurious,
125 * so prevent them from terminating the loop right away.
123 */ 126 */
124 error = dpm_noirq_suspend_devices(PMSG_SUSPEND); 127 error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
125 if (!error) 128 if (!error)
126 s2idle_enter(); 129 s2idle_enter();
130 else if (error == -EBUSY && pm_wakeup_pending())
131 error = 0;
127 132
128 dpm_noirq_resume_devices(PMSG_RESUME); 133 if (!error && s2idle_ops && s2idle_ops->wake)
129 if (error && (error != -EBUSY || !pm_wakeup_pending())) {
130 dpm_noirq_end();
131 break;
132 }
133
134 if (s2idle_ops && s2idle_ops->wake)
135 s2idle_ops->wake(); 134 s2idle_ops->wake();
136 135
136 dpm_noirq_resume_devices(PMSG_RESUME);
137
137 dpm_noirq_end(); 138 dpm_noirq_end();
138 139
140 if (error)
141 break;
142
139 if (s2idle_ops && s2idle_ops->sync) 143 if (s2idle_ops && s2idle_ops->sync)
140 s2idle_ops->sync(); 144 s2idle_ops->sync();
141 145
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 729a8706751d..6d5880089ff6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
854/** 854/**
855 * call_srcu() - Queue a callback for invocation after an SRCU grace period 855 * call_srcu() - Queue a callback for invocation after an SRCU grace period
856 * @sp: srcu_struct in queue the callback 856 * @sp: srcu_struct in queue the callback
857 * @head: structure to be used for queueing the SRCU callback. 857 * @rhp: structure to be used for queueing the SRCU callback.
858 * @func: function to be invoked after the SRCU grace period 858 * @func: function to be invoked after the SRCU grace period
859 * 859 *
860 * The callback function will be invoked some time after a full SRCU 860 * The callback function will be invoked some time after a full SRCU
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 50d1861f7759..3f943efcf61c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
85} 85}
86 86
87/** 87/**
88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
89 * @rsp: Pointer to rcu_sync structure to use for synchronization
90 *
88 * Must be called after rcu_sync_init() and before first use. 91 * Must be called after rcu_sync_init() and before first use.
89 * 92 *
90 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() 93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
142 145
143/** 146/**
144 * rcu_sync_func() - Callback function managing reader access to fastpath 147 * rcu_sync_func() - Callback function managing reader access to fastpath
145 * @rsp: Pointer to rcu_sync structure to use for synchronization 148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
146 * 149 *
147 * This function is passed to one of the call_rcu() functions by 150 * This function is passed to one of the call_rcu() functions by
148 * rcu_sync_exit(), so that it is invoked after a grace period following the 151 * rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
158 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 161 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
159 * can again use their fastpaths. 162 * can again use their fastpaths.
160 */ 163 */
161static void rcu_sync_func(struct rcu_head *rcu) 164static void rcu_sync_func(struct rcu_head *rhp)
162{ 165{
163 struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head); 166 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
164 unsigned long flags; 167 unsigned long flags;
165 168
166 BUG_ON(rsp->gp_state != GP_PASSED); 169 BUG_ON(rsp->gp_state != GP_PASSED);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1250e4bd4b85..3e3650e94ae6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -882,6 +882,11 @@ void rcu_irq_exit(void)
882 882
883 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); 883 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
884 rdtp = this_cpu_ptr(&rcu_dynticks); 884 rdtp = this_cpu_ptr(&rcu_dynticks);
885
886 /* Page faults can happen in NMI handlers, so check... */
887 if (rdtp->dynticks_nmi_nesting)
888 return;
889
885 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 890 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
886 rdtp->dynticks_nesting < 1); 891 rdtp->dynticks_nesting < 1);
887 if (rdtp->dynticks_nesting <= 1) { 892 if (rdtp->dynticks_nesting <= 1) {
@@ -1015,6 +1020,11 @@ void rcu_irq_enter(void)
1015 1020
1016 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); 1021 RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
1017 rdtp = this_cpu_ptr(&rcu_dynticks); 1022 rdtp = this_cpu_ptr(&rcu_dynticks);
1023
1024 /* Page faults can happen in NMI handlers, so check... */
1025 if (rdtp->dynticks_nmi_nesting)
1026 return;
1027
1018 oldval = rdtp->dynticks_nesting; 1028 oldval = rdtp->dynticks_nesting;
1019 rdtp->dynticks_nesting++; 1029 rdtp->dynticks_nesting++;
1020 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && 1030 WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
@@ -3087,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
3087 * read-side critical sections have completed. call_rcu_sched() assumes 3097 * read-side critical sections have completed. call_rcu_sched() assumes
3088 * that the read-side critical sections end on enabling of preemption 3098 * that the read-side critical sections end on enabling of preemption
3089 * or on voluntary preemption. 3099 * or on voluntary preemption.
3090 * RCU read-side critical sections are delimited by : 3100 * RCU read-side critical sections are delimited by:
3091 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR 3101 *
3092 * - anything that disables preemption. 3102 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
3103 * - anything that disables preemption.
3093 * 3104 *
3094 * These may be nested. 3105 * These may be nested.
3095 * 3106 *
@@ -3114,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
3114 * handler. This means that read-side critical sections in process 3125 * handler. This means that read-side critical sections in process
3115 * context must not be interrupted by softirqs. This interface is to be 3126 * context must not be interrupted by softirqs. This interface is to be
3116 * used when most of the read-side critical sections are in softirq context. 3127 * used when most of the read-side critical sections are in softirq context.
3117 * RCU read-side critical sections are delimited by : 3128 * RCU read-side critical sections are delimited by:
3118 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 3129 *
3119 * OR 3130 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
3120 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 3131 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3121 * These may be nested. 3132 *
3133 * These may be nested.
3122 * 3134 *
3123 * See the description of call_rcu() for more detailed information on 3135 * See the description of call_rcu() for more detailed information on
3124 * memory ordering guarantees. 3136 * memory ordering guarantees.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 18a6966567da..d17c5da523a0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5166,6 +5166,28 @@ void sched_show_task(struct task_struct *p)
5166 put_task_stack(p); 5166 put_task_stack(p);
5167} 5167}
5168 5168
5169static inline bool
5170state_filter_match(unsigned long state_filter, struct task_struct *p)
5171{
5172 /* no filter, everything matches */
5173 if (!state_filter)
5174 return true;
5175
5176 /* filter, but doesn't match */
5177 if (!(p->state & state_filter))
5178 return false;
5179
5180 /*
5181 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
5182 * TASK_KILLABLE).
5183 */
5184 if (state_filter == TASK_UNINTERRUPTIBLE && p->state == TASK_IDLE)
5185 return false;
5186
5187 return true;
5188}
5189
5190
5169void show_state_filter(unsigned long state_filter) 5191void show_state_filter(unsigned long state_filter)
5170{ 5192{
5171 struct task_struct *g, *p; 5193 struct task_struct *g, *p;
@@ -5188,7 +5210,7 @@ void show_state_filter(unsigned long state_filter)
5188 */ 5210 */
5189 touch_nmi_watchdog(); 5211 touch_nmi_watchdog();
5190 touch_all_softlockup_watchdogs(); 5212 touch_all_softlockup_watchdogs();
5191 if (!state_filter || (p->state & state_filter)) 5213 if (state_filter_match(state_filter, p))
5192 sched_show_task(p); 5214 sched_show_task(p);
5193 } 5215 }
5194 5216
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 01217fb5a5de..2f93e4a2d9f6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -466,8 +466,6 @@ static char *task_group_path(struct task_group *tg)
466} 466}
467#endif 467#endif
468 468
469static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
470
471static void 469static void
472print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 470print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
473{ 471{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 70ba32e08a23..d3f3094856fe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p)
5356 return 1; 5356 return 1;
5357} 5357}
5358 5358
5359struct llc_stats { 5359/*
5360 unsigned long nr_running; 5360 * The purpose of wake_affine() is to quickly determine on which CPU we can run
5361 unsigned long load; 5361 * soonest. For the purpose of speed we only consider the waking and previous
5362 unsigned long capacity; 5362 * CPU.
5363 int has_capacity; 5363 *
5364}; 5364 * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
5365 * will be) idle.
5366 *
5367 * wake_affine_weight() - considers the weight to reflect the average
5368 * scheduling latency of the CPUs. This seems to work
5369 * for the overloaded case.
5370 */
5365 5371
5366static bool get_llc_stats(struct llc_stats *stats, int cpu) 5372static bool
5373wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
5374 int this_cpu, int prev_cpu, int sync)
5367{ 5375{
5368 struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); 5376 if (idle_cpu(this_cpu))
5369 5377 return true;
5370 if (!sds)
5371 return false;
5372 5378
5373 stats->nr_running = READ_ONCE(sds->nr_running); 5379 if (sync && cpu_rq(this_cpu)->nr_running == 1)
5374 stats->load = READ_ONCE(sds->load); 5380 return true;
5375 stats->capacity = READ_ONCE(sds->capacity);
5376 stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu);
5377 5381
5378 return true; 5382 return false;
5379} 5383}
5380 5384
5381/*
5382 * Can a task be moved from prev_cpu to this_cpu without causing a load
5383 * imbalance that would trigger the load balancer?
5384 *
5385 * Since we're running on 'stale' values, we might in fact create an imbalance
5386 * but recomputing these values is expensive, as that'd mean iteration 2 cache
5387 * domains worth of CPUs.
5388 */
5389static bool 5385static bool
5390wake_affine_llc(struct sched_domain *sd, struct task_struct *p, 5386wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5391 int this_cpu, int prev_cpu, int sync) 5387 int this_cpu, int prev_cpu, int sync)
5392{ 5388{
5393 struct llc_stats prev_stats, this_stats;
5394 s64 this_eff_load, prev_eff_load; 5389 s64 this_eff_load, prev_eff_load;
5395 unsigned long task_load; 5390 unsigned long task_load;
5396 5391
5397 if (!get_llc_stats(&prev_stats, prev_cpu) || 5392 this_eff_load = target_load(this_cpu, sd->wake_idx);
5398 !get_llc_stats(&this_stats, this_cpu)) 5393 prev_eff_load = source_load(prev_cpu, sd->wake_idx);
5399 return false;
5400 5394
5401 /*
5402 * If sync wakeup then subtract the (maximum possible)
5403 * effect of the currently running task from the load
5404 * of the current LLC.
5405 */
5406 if (sync) { 5395 if (sync) {
5407 unsigned long current_load = task_h_load(current); 5396 unsigned long current_load = task_h_load(current);
5408 5397
5409 /* in this case load hits 0 and this LLC is considered 'idle' */ 5398 if (current_load > this_eff_load)
5410 if (current_load > this_stats.load)
5411 return true; 5399 return true;
5412 5400
5413 this_stats.load -= current_load; 5401 this_eff_load -= current_load;
5414 } 5402 }
5415 5403
5416 /*
5417 * The has_capacity stuff is not SMT aware, but by trying to balance
5418 * the nr_running on both ends we try and fill the domain at equal
5419 * rates, thereby first consuming cores before siblings.
5420 */
5421
5422 /* if the old cache has capacity, stay there */
5423 if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1)
5424 return false;
5425
5426 /* if this cache has capacity, come here */
5427 if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running)
5428 return true;
5429
5430 /*
5431 * Check to see if we can move the load without causing too much
5432 * imbalance.
5433 */
5434 task_load = task_h_load(p); 5404 task_load = task_h_load(p);
5435 5405
5436 this_eff_load = 100; 5406 this_eff_load += task_load;
5437 this_eff_load *= prev_stats.capacity; 5407 if (sched_feat(WA_BIAS))
5438 5408 this_eff_load *= 100;
5439 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; 5409 this_eff_load *= capacity_of(prev_cpu);
5440 prev_eff_load *= this_stats.capacity;
5441 5410
5442 this_eff_load *= this_stats.load + task_load; 5411 prev_eff_load -= task_load;
5443 prev_eff_load *= prev_stats.load - task_load; 5412 if (sched_feat(WA_BIAS))
5413 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
5414 prev_eff_load *= capacity_of(this_cpu);
5444 5415
5445 return this_eff_load <= prev_eff_load; 5416 return this_eff_load <= prev_eff_load;
5446} 5417}
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5449 int prev_cpu, int sync) 5420 int prev_cpu, int sync)
5450{ 5421{
5451 int this_cpu = smp_processor_id(); 5422 int this_cpu = smp_processor_id();
5452 bool affine; 5423 bool affine = false;
5453 5424
5454 /* 5425 if (sched_feat(WA_IDLE) && !affine)
5455 * Default to no affine wakeups; wake_affine() should not effect a task 5426 affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
5456 * placement the load-balancer feels inclined to undo. The conservative
5457 * option is therefore to not move tasks when they wake up.
5458 */
5459 affine = false;
5460 5427
5461 /* 5428 if (sched_feat(WA_WEIGHT) && !affine)
5462 * If the wakeup is across cache domains, try to evaluate if movement 5429 affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
5463 * makes sense, otherwise rely on select_idle_siblings() to do
5464 * placement inside the cache domain.
5465 */
5466 if (!cpus_share_cache(prev_cpu, this_cpu))
5467 affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync);
5468 5430
5469 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); 5431 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
5470 if (affine) { 5432 if (affine) {
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7600 */ 7562 */
7601static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) 7563static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
7602{ 7564{
7603 struct sched_domain_shared *shared = env->sd->shared;
7604 struct sched_domain *child = env->sd->child; 7565 struct sched_domain *child = env->sd->child;
7605 struct sched_group *sg = env->sd->groups; 7566 struct sched_group *sg = env->sd->groups;
7606 struct sg_lb_stats *local = &sds->local_stat; 7567 struct sg_lb_stats *local = &sds->local_stat;
@@ -7672,22 +7633,6 @@ next_group:
7672 if (env->dst_rq->rd->overload != overload) 7633 if (env->dst_rq->rd->overload != overload)
7673 env->dst_rq->rd->overload = overload; 7634 env->dst_rq->rd->overload = overload;
7674 } 7635 }
7675
7676 if (!shared)
7677 return;
7678
7679 /*
7680 * Since these are sums over groups they can contain some CPUs
7681 * multiple times for the NUMA domains.
7682 *
7683 * Currently only wake_affine_llc() and find_busiest_group()
7684 * uses these numbers, only the last is affected by this problem.
7685 *
7686 * XXX fix that.
7687 */
7688 WRITE_ONCE(shared->nr_running, sds->total_running);
7689 WRITE_ONCE(shared->load, sds->total_load);
7690 WRITE_ONCE(shared->capacity, sds->total_capacity);
7691} 7636}
7692 7637
7693/** 7638/**
@@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env)
8098 int cpu, balance_cpu = -1; 8043 int cpu, balance_cpu = -1;
8099 8044
8100 /* 8045 /*
8046 * Ensure the balancing environment is consistent; can happen
8047 * when the softirq triggers 'during' hotplug.
8048 */
8049 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
8050 return 0;
8051
8052 /*
8101 * In the newly idle case, we will allow all the cpu's 8053 * In the newly idle case, we will allow all the cpu's
8102 * to do the newly idle load balance. 8054 * to do the newly idle load balance.
8103 */ 8055 */
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index d3fb15555291..319ed0e8a347 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
81SCHED_FEAT(LB_MIN, false) 81SCHED_FEAT(LB_MIN, false)
82SCHED_FEAT(ATTACH_AGE_LOAD, true) 82SCHED_FEAT(ATTACH_AGE_LOAD, true)
83 83
84SCHED_FEAT(WA_IDLE, true)
85SCHED_FEAT(WA_WEIGHT, true)
86SCHED_FEAT(WA_BIAS, true)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a92fddc22747..dd7908743dab 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -18,6 +18,7 @@
18#include <linux/membarrier.h> 18#include <linux/membarrier.h>
19#include <linux/tick.h> 19#include <linux/tick.h>
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
21#include <linux/atomic.h>
21 22
22#include "sched.h" /* for cpu_rq(). */ 23#include "sched.h" /* for cpu_rq(). */
23 24
@@ -26,21 +27,26 @@
26 * except MEMBARRIER_CMD_QUERY. 27 * except MEMBARRIER_CMD_QUERY.
27 */ 28 */
28#define MEMBARRIER_CMD_BITMASK \ 29#define MEMBARRIER_CMD_BITMASK \
29 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED) 30 (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
31 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
30 32
31static void ipi_mb(void *info) 33static void ipi_mb(void *info)
32{ 34{
33 smp_mb(); /* IPIs should be serializing but paranoid. */ 35 smp_mb(); /* IPIs should be serializing but paranoid. */
34} 36}
35 37
36static void membarrier_private_expedited(void) 38static int membarrier_private_expedited(void)
37{ 39{
38 int cpu; 40 int cpu;
39 bool fallback = false; 41 bool fallback = false;
40 cpumask_var_t tmpmask; 42 cpumask_var_t tmpmask;
41 43
44 if (!(atomic_read(&current->mm->membarrier_state)
45 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
46 return -EPERM;
47
42 if (num_online_cpus() == 1) 48 if (num_online_cpus() == 1)
43 return; 49 return 0;
44 50
45 /* 51 /*
46 * Matches memory barriers around rq->curr modification in 52 * Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
94 * rq->curr modification in scheduler. 100 * rq->curr modification in scheduler.
95 */ 101 */
96 smp_mb(); /* exit from system call is not a mb */ 102 smp_mb(); /* exit from system call is not a mb */
103 return 0;
104}
105
106static void membarrier_register_private_expedited(void)
107{
108 struct task_struct *p = current;
109 struct mm_struct *mm = p->mm;
110
111 /*
112 * We need to consider threads belonging to different thread
113 * groups, which use the same mm. (CLONE_VM but not
114 * CLONE_THREAD).
115 */
116 if (atomic_read(&mm->membarrier_state)
117 & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
118 return;
119 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
120 &mm->membarrier_state);
97} 121}
98 122
99/** 123/**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
144 synchronize_sched(); 168 synchronize_sched();
145 return 0; 169 return 0;
146 case MEMBARRIER_CMD_PRIVATE_EXPEDITED: 170 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
147 membarrier_private_expedited(); 171 return membarrier_private_expedited();
172 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
173 membarrier_register_private_expedited();
148 return 0; 174 return 0;
149 default: 175 default:
150 return -EINVAL; 176 return -EINVAL;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index c24579dfa7a1..0ae832e13b97 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -473,14 +473,19 @@ static long seccomp_attach_filter(unsigned int flags,
473 return 0; 473 return 0;
474} 474}
475 475
476static void __get_seccomp_filter(struct seccomp_filter *filter)
477{
478 /* Reference count is bounded by the number of total processes. */
479 refcount_inc(&filter->usage);
480}
481
476/* get_seccomp_filter - increments the reference count of the filter on @tsk */ 482/* get_seccomp_filter - increments the reference count of the filter on @tsk */
477void get_seccomp_filter(struct task_struct *tsk) 483void get_seccomp_filter(struct task_struct *tsk)
478{ 484{
479 struct seccomp_filter *orig = tsk->seccomp.filter; 485 struct seccomp_filter *orig = tsk->seccomp.filter;
480 if (!orig) 486 if (!orig)
481 return; 487 return;
482 /* Reference count is bounded by the number of total processes. */ 488 __get_seccomp_filter(orig);
483 refcount_inc(&orig->usage);
484} 489}
485 490
486static inline void seccomp_filter_free(struct seccomp_filter *filter) 491static inline void seccomp_filter_free(struct seccomp_filter *filter)
@@ -491,10 +496,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
491 } 496 }
492} 497}
493 498
494/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ 499static void __put_seccomp_filter(struct seccomp_filter *orig)
495void put_seccomp_filter(struct task_struct *tsk)
496{ 500{
497 struct seccomp_filter *orig = tsk->seccomp.filter;
498 /* Clean up single-reference branches iteratively. */ 501 /* Clean up single-reference branches iteratively. */
499 while (orig && refcount_dec_and_test(&orig->usage)) { 502 while (orig && refcount_dec_and_test(&orig->usage)) {
500 struct seccomp_filter *freeme = orig; 503 struct seccomp_filter *freeme = orig;
@@ -503,6 +506,12 @@ void put_seccomp_filter(struct task_struct *tsk)
503 } 506 }
504} 507}
505 508
509/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
510void put_seccomp_filter(struct task_struct *tsk)
511{
512 __put_seccomp_filter(tsk->seccomp.filter);
513}
514
506static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason) 515static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
507{ 516{
508 memset(info, 0, sizeof(*info)); 517 memset(info, 0, sizeof(*info));
@@ -1025,13 +1034,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
1025 if (!data) 1034 if (!data)
1026 goto out; 1035 goto out;
1027 1036
1028 get_seccomp_filter(task); 1037 __get_seccomp_filter(filter);
1029 spin_unlock_irq(&task->sighand->siglock); 1038 spin_unlock_irq(&task->sighand->siglock);
1030 1039
1031 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog))) 1040 if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
1032 ret = -EFAULT; 1041 ret = -EFAULT;
1033 1042
1034 put_seccomp_filter(task); 1043 __put_seccomp_filter(filter);
1035 return ret; 1044 return ret;
1036 1045
1037out: 1046out:
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 1d71c051a951..5043e7433f4b 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
344 * by the client, but only by calling this function. 344 * by the client, but only by calling this function.
345 * This function can only be called on a registered smp_hotplug_thread. 345 * This function can only be called on a registered smp_hotplug_thread.
346 */ 346 */
347int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, 347void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
348 const struct cpumask *new) 348 const struct cpumask *new)
349{ 349{
350 struct cpumask *old = plug_thread->cpumask; 350 struct cpumask *old = plug_thread->cpumask;
351 cpumask_var_t tmp; 351 static struct cpumask tmp;
352 unsigned int cpu; 352 unsigned int cpu;
353 353
354 if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) 354 lockdep_assert_cpus_held();
355 return -ENOMEM;
356
357 get_online_cpus();
358 mutex_lock(&smpboot_threads_lock); 355 mutex_lock(&smpboot_threads_lock);
359 356
360 /* Park threads that were exclusively enabled on the old mask. */ 357 /* Park threads that were exclusively enabled on the old mask. */
361 cpumask_andnot(tmp, old, new); 358 cpumask_andnot(&tmp, old, new);
362 for_each_cpu_and(cpu, tmp, cpu_online_mask) 359 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
363 smpboot_park_thread(plug_thread, cpu); 360 smpboot_park_thread(plug_thread, cpu);
364 361
365 /* Unpark threads that are exclusively enabled on the new mask. */ 362 /* Unpark threads that are exclusively enabled on the new mask. */
366 cpumask_andnot(tmp, new, old); 363 cpumask_andnot(&tmp, new, old);
367 for_each_cpu_and(cpu, tmp, cpu_online_mask) 364 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
368 smpboot_unpark_thread(plug_thread, cpu); 365 smpboot_unpark_thread(plug_thread, cpu);
369 366
370 cpumask_copy(old, new); 367 cpumask_copy(old, new);
371 368
372 mutex_unlock(&smpboot_threads_lock); 369 mutex_unlock(&smpboot_threads_lock);
373 put_online_cpus();
374
375 free_cpumask_var(tmp);
376
377 return 0;
378} 370}
379EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
380 371
381static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); 372static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
382 373
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6648fbbb8157..d9c31bc2eaea 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -367,7 +367,8 @@ static struct ctl_table kern_table[] = {
367 .data = &sysctl_sched_time_avg, 367 .data = &sysctl_sched_time_avg,
368 .maxlen = sizeof(unsigned int), 368 .maxlen = sizeof(unsigned int),
369 .mode = 0644, 369 .mode = 0644,
370 .proc_handler = proc_dointvec, 370 .proc_handler = proc_dointvec_minmax,
371 .extra1 = &one,
371 }, 372 },
372#ifdef CONFIG_SCHEDSTATS 373#ifdef CONFIG_SCHEDSTATS
373 { 374 {
@@ -871,9 +872,9 @@ static struct ctl_table kern_table[] = {
871#if defined(CONFIG_LOCKUP_DETECTOR) 872#if defined(CONFIG_LOCKUP_DETECTOR)
872 { 873 {
873 .procname = "watchdog", 874 .procname = "watchdog",
874 .data = &watchdog_user_enabled, 875 .data = &watchdog_user_enabled,
875 .maxlen = sizeof (int), 876 .maxlen = sizeof(int),
876 .mode = 0644, 877 .mode = 0644,
877 .proc_handler = proc_watchdog, 878 .proc_handler = proc_watchdog,
878 .extra1 = &zero, 879 .extra1 = &zero,
879 .extra2 = &one, 880 .extra2 = &one,
@@ -889,16 +890,12 @@ static struct ctl_table kern_table[] = {
889 }, 890 },
890 { 891 {
891 .procname = "nmi_watchdog", 892 .procname = "nmi_watchdog",
892 .data = &nmi_watchdog_enabled, 893 .data = &nmi_watchdog_user_enabled,
893 .maxlen = sizeof (int), 894 .maxlen = sizeof(int),
894 .mode = 0644, 895 .mode = NMI_WATCHDOG_SYSCTL_PERM,
895 .proc_handler = proc_nmi_watchdog, 896 .proc_handler = proc_nmi_watchdog,
896 .extra1 = &zero, 897 .extra1 = &zero,
897#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
898 .extra2 = &one, 898 .extra2 = &one,
899#else
900 .extra2 = &zero,
901#endif
902 }, 899 },
903 { 900 {
904 .procname = "watchdog_cpumask", 901 .procname = "watchdog_cpumask",
@@ -910,9 +907,9 @@ static struct ctl_table kern_table[] = {
910#ifdef CONFIG_SOFTLOCKUP_DETECTOR 907#ifdef CONFIG_SOFTLOCKUP_DETECTOR
911 { 908 {
912 .procname = "soft_watchdog", 909 .procname = "soft_watchdog",
913 .data = &soft_watchdog_enabled, 910 .data = &soft_watchdog_user_enabled,
914 .maxlen = sizeof (int), 911 .maxlen = sizeof(int),
915 .mode = 0644, 912 .mode = 0644,
916 .proc_handler = proc_soft_watchdog, 913 .proc_handler = proc_soft_watchdog,
917 .extra1 = &zero, 914 .extra1 = &zero,
918 .extra2 = &one, 915 .extra2 = &one,
@@ -2187,8 +2184,6 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,
2187 if (write) { 2184 if (write) {
2188 if (*lvalp > UINT_MAX) 2185 if (*lvalp > UINT_MAX)
2189 return -EINVAL; 2186 return -EINVAL;
2190 if (*lvalp > UINT_MAX)
2191 return -EINVAL;
2192 *valp = *lvalp; 2187 *valp = *lvalp;
2193 } else { 2188 } else {
2194 unsigned int val = *valp; 2189 unsigned int val = *valp;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2a685b45b73b..45a3928544ce 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -648,6 +648,12 @@ int blk_trace_startstop(struct request_queue *q, int start)
648} 648}
649EXPORT_SYMBOL_GPL(blk_trace_startstop); 649EXPORT_SYMBOL_GPL(blk_trace_startstop);
650 650
651/*
652 * When reading or writing the blktrace sysfs files, the references to the
653 * opened sysfs or device files should prevent the underlying block device
654 * from being removed. So no further delete protection is really needed.
655 */
656
651/** 657/**
652 * blk_trace_ioctl: - handle the ioctls associated with tracing 658 * blk_trace_ioctl: - handle the ioctls associated with tracing
653 * @bdev: the block device 659 * @bdev: the block device
@@ -665,7 +671,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
665 if (!q) 671 if (!q)
666 return -ENXIO; 672 return -ENXIO;
667 673
668 mutex_lock(&bdev->bd_mutex); 674 mutex_lock(&q->blk_trace_mutex);
669 675
670 switch (cmd) { 676 switch (cmd) {
671 case BLKTRACESETUP: 677 case BLKTRACESETUP:
@@ -691,7 +697,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
691 break; 697 break;
692 } 698 }
693 699
694 mutex_unlock(&bdev->bd_mutex); 700 mutex_unlock(&q->blk_trace_mutex);
695 return ret; 701 return ret;
696} 702}
697 703
@@ -1727,7 +1733,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1727 if (q == NULL) 1733 if (q == NULL)
1728 goto out_bdput; 1734 goto out_bdput;
1729 1735
1730 mutex_lock(&bdev->bd_mutex); 1736 mutex_lock(&q->blk_trace_mutex);
1731 1737
1732 if (attr == &dev_attr_enable) { 1738 if (attr == &dev_attr_enable) {
1733 ret = sprintf(buf, "%u\n", !!q->blk_trace); 1739 ret = sprintf(buf, "%u\n", !!q->blk_trace);
@@ -1746,7 +1752,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1746 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); 1752 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1747 1753
1748out_unlock_bdev: 1754out_unlock_bdev:
1749 mutex_unlock(&bdev->bd_mutex); 1755 mutex_unlock(&q->blk_trace_mutex);
1750out_bdput: 1756out_bdput:
1751 bdput(bdev); 1757 bdput(bdev);
1752out: 1758out:
@@ -1788,7 +1794,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1788 if (q == NULL) 1794 if (q == NULL)
1789 goto out_bdput; 1795 goto out_bdput;
1790 1796
1791 mutex_lock(&bdev->bd_mutex); 1797 mutex_lock(&q->blk_trace_mutex);
1792 1798
1793 if (attr == &dev_attr_enable) { 1799 if (attr == &dev_attr_enable) {
1794 if (value) 1800 if (value)
@@ -1814,7 +1820,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1814 } 1820 }
1815 1821
1816out_unlock_bdev: 1822out_unlock_bdev:
1817 mutex_unlock(&bdev->bd_mutex); 1823 mutex_unlock(&q->blk_trace_mutex);
1818out_bdput: 1824out_bdput:
1819 bdput(bdev); 1825 bdput(bdev);
1820out: 1826out:
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6abfafd7f173..8319e09e15b9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4954static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4954static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4955static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 4955static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4956 4956
4957static unsigned long save_global_trampoline;
4958static unsigned long save_global_flags;
4959
4960static int __init set_graph_function(char *str) 4957static int __init set_graph_function(char *str)
4961{ 4958{
4962 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 4959 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -6808,17 +6805,6 @@ void unregister_ftrace_graph(void)
6808 unregister_pm_notifier(&ftrace_suspend_notifier); 6805 unregister_pm_notifier(&ftrace_suspend_notifier);
6809 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 6806 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6810 6807
6811#ifdef CONFIG_DYNAMIC_FTRACE
6812 /*
6813 * Function graph does not allocate the trampoline, but
6814 * other global_ops do. We need to reset the ALLOC_TRAMP flag
6815 * if one was used.
6816 */
6817 global_ops.trampoline = save_global_trampoline;
6818 if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
6819 global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
6820#endif
6821
6822 out: 6808 out:
6823 mutex_unlock(&ftrace_lock); 6809 mutex_unlock(&ftrace_lock);
6824} 6810}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index bac629af2285..c738e764e2a5 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -656,15 +656,6 @@ int trace_print_lat_context(struct trace_iterator *iter)
656 return !trace_seq_has_overflowed(s); 656 return !trace_seq_has_overflowed(s);
657} 657}
658 658
659static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
660
661static int task_state_char(unsigned long state)
662{
663 int bit = state ? __ffs(state) + 1 : 0;
664
665 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
666}
667
668/** 659/**
669 * ftrace_find_event - find a registered event 660 * ftrace_find_event - find a registered event
670 * @type: the type of event to look for 661 * @type: the type of event to look for
@@ -930,8 +921,8 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
930 921
931 trace_assign_type(field, iter->ent); 922 trace_assign_type(field, iter->ent);
932 923
933 T = task_state_char(field->next_state); 924 T = __task_state_to_char(field->next_state);
934 S = task_state_char(field->prev_state); 925 S = __task_state_to_char(field->prev_state);
935 trace_find_cmdline(field->next_pid, comm); 926 trace_find_cmdline(field->next_pid, comm);
936 trace_seq_printf(&iter->seq, 927 trace_seq_printf(&iter->seq,
937 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", 928 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
@@ -966,8 +957,8 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
966 trace_assign_type(field, iter->ent); 957 trace_assign_type(field, iter->ent);
967 958
968 if (!S) 959 if (!S)
969 S = task_state_char(field->prev_state); 960 S = __task_state_to_char(field->prev_state);
970 T = task_state_char(field->next_state); 961 T = __task_state_to_char(field->next_state);
971 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 962 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
972 field->prev_pid, 963 field->prev_pid,
973 field->prev_prio, 964 field->prev_prio,
@@ -1002,8 +993,8 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
1002 trace_assign_type(field, iter->ent); 993 trace_assign_type(field, iter->ent);
1003 994
1004 if (!S) 995 if (!S)
1005 S = task_state_char(field->prev_state); 996 S = __task_state_to_char(field->prev_state);
1006 T = task_state_char(field->next_state); 997 T = __task_state_to_char(field->next_state);
1007 998
1008 SEQ_PUT_HEX_FIELD(s, field->prev_pid); 999 SEQ_PUT_HEX_FIELD(s, field->prev_pid);
1009 SEQ_PUT_HEX_FIELD(s, field->prev_prio); 1000 SEQ_PUT_HEX_FIELD(s, field->prev_prio);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index ddec53b67646..0c331978b1a6 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -397,10 +397,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
397 entry = ring_buffer_event_data(event); 397 entry = ring_buffer_event_data(event);
398 entry->prev_pid = prev->pid; 398 entry->prev_pid = prev->pid;
399 entry->prev_prio = prev->prio; 399 entry->prev_prio = prev->prio;
400 entry->prev_state = prev->state; 400 entry->prev_state = __get_task_state(prev);
401 entry->next_pid = next->pid; 401 entry->next_pid = next->pid;
402 entry->next_prio = next->prio; 402 entry->next_prio = next->prio;
403 entry->next_state = next->state; 403 entry->next_state = __get_task_state(next);
404 entry->next_cpu = task_cpu(next); 404 entry->next_cpu = task_cpu(next);
405 405
406 if (!call_filter_check_discard(call, entry, buffer, event)) 406 if (!call_filter_check_discard(call, entry, buffer, event))
@@ -425,10 +425,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
425 entry = ring_buffer_event_data(event); 425 entry = ring_buffer_event_data(event);
426 entry->prev_pid = curr->pid; 426 entry->prev_pid = curr->pid;
427 entry->prev_prio = curr->prio; 427 entry->prev_prio = curr->prio;
428 entry->prev_state = curr->state; 428 entry->prev_state = __get_task_state(curr);
429 entry->next_pid = wakee->pid; 429 entry->next_pid = wakee->pid;
430 entry->next_prio = wakee->prio; 430 entry->next_prio = wakee->prio;
431 entry->next_state = wakee->state; 431 entry->next_state = __get_task_state(wakee);
432 entry->next_cpu = task_cpu(wakee); 432 entry->next_cpu = task_cpu(wakee);
433 433
434 if (!call_filter_check_discard(call, entry, buffer, event)) 434 if (!call_filter_check_discard(call, entry, buffer, event))
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index a4df67cbc711..49cb41412eec 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -96,23 +96,9 @@ check_stack(unsigned long ip, unsigned long *stack)
96 if (in_nmi()) 96 if (in_nmi())
97 return; 97 return;
98 98
99 /*
100 * There's a slight chance that we are tracing inside the
101 * RCU infrastructure, and rcu_irq_enter() will not work
102 * as expected.
103 */
104 if (unlikely(rcu_irq_enter_disabled()))
105 return;
106
107 local_irq_save(flags); 99 local_irq_save(flags);
108 arch_spin_lock(&stack_trace_max_lock); 100 arch_spin_lock(&stack_trace_max_lock);
109 101
110 /*
111 * RCU may not be watching, make it see us.
112 * The stack trace code uses rcu_sched.
113 */
114 rcu_irq_enter();
115
116 /* In case another CPU set the tracer_frame on us */ 102 /* In case another CPU set the tracer_frame on us */
117 if (unlikely(!frame_size)) 103 if (unlikely(!frame_size))
118 this_size -= tracer_frame; 104 this_size -= tracer_frame;
@@ -205,7 +191,6 @@ check_stack(unsigned long ip, unsigned long *stack)
205 } 191 }
206 192
207 out: 193 out:
208 rcu_irq_exit();
209 arch_spin_unlock(&stack_trace_max_lock); 194 arch_spin_unlock(&stack_trace_max_lock);
210 local_irq_restore(flags); 195 local_irq_restore(flags);
211} 196}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index f5d52024f6b7..6bcb854909c0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -29,20 +29,29 @@
29#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
30#include <linux/kthread.h> 30#include <linux/kthread.h>
31 31
32/* Watchdog configuration */ 32static DEFINE_MUTEX(watchdog_mutex);
33static DEFINE_MUTEX(watchdog_proc_mutex);
34
35int __read_mostly nmi_watchdog_enabled;
36 33
37#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) 34#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
38unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | 35# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
39 NMI_WATCHDOG_ENABLED; 36# define NMI_WATCHDOG_DEFAULT 1
40#else 37#else
41unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; 38# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
39# define NMI_WATCHDOG_DEFAULT 0
42#endif 40#endif
43 41
42unsigned long __read_mostly watchdog_enabled;
43int __read_mostly watchdog_user_enabled = 1;
44int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
45int __read_mostly soft_watchdog_user_enabled = 1;
46int __read_mostly watchdog_thresh = 10;
47int __read_mostly nmi_watchdog_available;
48
49struct cpumask watchdog_allowed_mask __read_mostly;
50
51struct cpumask watchdog_cpumask __read_mostly;
52unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
53
44#ifdef CONFIG_HARDLOCKUP_DETECTOR 54#ifdef CONFIG_HARDLOCKUP_DETECTOR
45/* boot commands */
46/* 55/*
47 * Should we panic when a soft-lockup or hard-lockup occurs: 56 * Should we panic when a soft-lockup or hard-lockup occurs:
48 */ 57 */
@@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic =
56 * kernel command line parameters are parsed, because otherwise it is not 65 * kernel command line parameters are parsed, because otherwise it is not
57 * possible to override this in hardlockup_panic_setup(). 66 * possible to override this in hardlockup_panic_setup().
58 */ 67 */
59void hardlockup_detector_disable(void) 68void __init hardlockup_detector_disable(void)
60{ 69{
61 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 70 nmi_watchdog_user_enabled = 0;
62} 71}
63 72
64static int __init hardlockup_panic_setup(char *str) 73static int __init hardlockup_panic_setup(char *str)
@@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str)
68 else if (!strncmp(str, "nopanic", 7)) 77 else if (!strncmp(str, "nopanic", 7))
69 hardlockup_panic = 0; 78 hardlockup_panic = 0;
70 else if (!strncmp(str, "0", 1)) 79 else if (!strncmp(str, "0", 1))
71 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; 80 nmi_watchdog_user_enabled = 0;
72 else if (!strncmp(str, "1", 1)) 81 else if (!strncmp(str, "1", 1))
73 watchdog_enabled |= NMI_WATCHDOG_ENABLED; 82 nmi_watchdog_user_enabled = 1;
74 return 1; 83 return 1;
75} 84}
76__setup("nmi_watchdog=", hardlockup_panic_setup); 85__setup("nmi_watchdog=", hardlockup_panic_setup);
77 86
78#endif 87# ifdef CONFIG_SMP
79
80#ifdef CONFIG_SOFTLOCKUP_DETECTOR
81int __read_mostly soft_watchdog_enabled;
82#endif
83
84int __read_mostly watchdog_user_enabled;
85int __read_mostly watchdog_thresh = 10;
86
87#ifdef CONFIG_SMP
88int __read_mostly sysctl_softlockup_all_cpu_backtrace;
89int __read_mostly sysctl_hardlockup_all_cpu_backtrace; 88int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
90#endif
91struct cpumask watchdog_cpumask __read_mostly;
92unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
93 89
94/* 90static int __init hardlockup_all_cpu_backtrace_setup(char *str)
95 * The 'watchdog_running' variable is set to 1 when the watchdog threads 91{
96 * are registered/started and is set to 0 when the watchdog threads are 92 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
97 * unregistered/stopped, so it is an indicator whether the threads exist. 93 return 1;
98 */ 94}
99static int __read_mostly watchdog_running; 95__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
100/* 96# endif /* CONFIG_SMP */
101 * If a subsystem has a need to deactivate the watchdog temporarily, it 97#endif /* CONFIG_HARDLOCKUP_DETECTOR */
102 * can use the suspend/resume interface to achieve this. The content of
103 * the 'watchdog_suspended' variable reflects this state. Existing threads
104 * are parked/unparked by the lockup_detector_{suspend|resume} functions
105 * (see comment blocks pertaining to those functions for further details).
106 *
107 * 'watchdog_suspended' also prevents threads from being registered/started
108 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
109 * of 'watchdog_running' cannot change while the watchdog is deactivated
110 * temporarily (see related code in 'proc' handlers).
111 */
112int __read_mostly watchdog_suspended;
113 98
114/* 99/*
115 * These functions can be overridden if an architecture implements its 100 * These functions can be overridden if an architecture implements its
@@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended;
121 */ 106 */
122int __weak watchdog_nmi_enable(unsigned int cpu) 107int __weak watchdog_nmi_enable(unsigned int cpu)
123{ 108{
109 hardlockup_detector_perf_enable();
124 return 0; 110 return 0;
125} 111}
112
126void __weak watchdog_nmi_disable(unsigned int cpu) 113void __weak watchdog_nmi_disable(unsigned int cpu)
127{ 114{
115 hardlockup_detector_perf_disable();
128} 116}
129 117
130/* 118/* Return 0, if a NMI watchdog is available. Error code otherwise */
131 * watchdog_nmi_reconfigure can be implemented to be notified after any 119int __weak __init watchdog_nmi_probe(void)
132 * watchdog configuration change. The arch hardlockup watchdog should 120{
133 * respond to the following variables: 121 return hardlockup_detector_perf_init();
134 * - nmi_watchdog_enabled 122}
123
124/**
125 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
126 *
127 * The reconfiguration steps are:
128 * watchdog_nmi_stop();
129 * update_variables();
130 * watchdog_nmi_start();
131 */
132void __weak watchdog_nmi_stop(void) { }
133
134/**
135 * watchdog_nmi_start - Start the watchdog after reconfiguration
136 *
137 * Counterpart to watchdog_nmi_stop().
138 *
139 * The following variables have been updated in update_variables() and
140 * contain the currently valid configuration:
141 * - watchdog_enabled
135 * - watchdog_thresh 142 * - watchdog_thresh
136 * - watchdog_cpumask 143 * - watchdog_cpumask
137 * - sysctl_hardlockup_all_cpu_backtrace
138 * - hardlockup_panic
139 * - watchdog_suspended
140 */ 144 */
141void __weak watchdog_nmi_reconfigure(void) 145void __weak watchdog_nmi_start(void) { }
146
147/**
148 * lockup_detector_update_enable - Update the sysctl enable bit
149 *
150 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
151 * can't race with watchdog_nmi_disable().
152 */
153static void lockup_detector_update_enable(void)
142{ 154{
155 watchdog_enabled = 0;
156 if (!watchdog_user_enabled)
157 return;
158 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
159 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
160 if (soft_watchdog_user_enabled)
161 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
143} 162}
144 163
145
146#ifdef CONFIG_SOFTLOCKUP_DETECTOR 164#ifdef CONFIG_SOFTLOCKUP_DETECTOR
147 165
148/* Helper for online, unparked cpus. */ 166/* Global variables, exported for sysctl */
149#define for_each_watchdog_cpu(cpu) \ 167unsigned int __read_mostly softlockup_panic =
150 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) 168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
151
152atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
153 169
170static bool softlockup_threads_initialized __read_mostly;
154static u64 __read_mostly sample_period; 171static u64 __read_mostly sample_period;
155 172
156static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 173static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
164static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 181static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
165static unsigned long soft_lockup_nmi_warn; 182static unsigned long soft_lockup_nmi_warn;
166 183
167unsigned int __read_mostly softlockup_panic =
168 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
169
170static int __init softlockup_panic_setup(char *str) 184static int __init softlockup_panic_setup(char *str)
171{ 185{
172 softlockup_panic = simple_strtoul(str, NULL, 0); 186 softlockup_panic = simple_strtoul(str, NULL, 0);
173
174 return 1; 187 return 1;
175} 188}
176__setup("softlockup_panic=", softlockup_panic_setup); 189__setup("softlockup_panic=", softlockup_panic_setup);
177 190
178static int __init nowatchdog_setup(char *str) 191static int __init nowatchdog_setup(char *str)
179{ 192{
180 watchdog_enabled = 0; 193 watchdog_user_enabled = 0;
181 return 1; 194 return 1;
182} 195}
183__setup("nowatchdog", nowatchdog_setup); 196__setup("nowatchdog", nowatchdog_setup);
184 197
185static int __init nosoftlockup_setup(char *str) 198static int __init nosoftlockup_setup(char *str)
186{ 199{
187 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; 200 soft_watchdog_user_enabled = 0;
188 return 1; 201 return 1;
189} 202}
190__setup("nosoftlockup", nosoftlockup_setup); 203__setup("nosoftlockup", nosoftlockup_setup);
191 204
192#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206int __read_mostly sysctl_softlockup_all_cpu_backtrace;
207
193static int __init softlockup_all_cpu_backtrace_setup(char *str) 208static int __init softlockup_all_cpu_backtrace_setup(char *str)
194{ 209{
195 sysctl_softlockup_all_cpu_backtrace = 210 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
196 !!simple_strtol(str, NULL, 0);
197 return 1; 211 return 1;
198} 212}
199__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); 213__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
200#ifdef CONFIG_HARDLOCKUP_DETECTOR
201static int __init hardlockup_all_cpu_backtrace_setup(char *str)
202{
203 sysctl_hardlockup_all_cpu_backtrace =
204 !!simple_strtol(str, NULL, 0);
205 return 1;
206}
207__setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
208#endif
209#endif 214#endif
210 215
216static void __lockup_detector_cleanup(void);
217
211/* 218/*
212 * Hard-lockup warnings should be triggered after just a few seconds. Soft- 219 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
213 * lockups can have false positives under extreme conditions. So we generally 220 * lockups can have false positives under extreme conditions. So we generally
@@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void)
278 int cpu; 285 int cpu;
279 286
280 /* 287 /*
281 * this is done lockless 288 * watchdog_mutex cannpt be taken here, as this might be called
282 * do we care if a 0 races with a timestamp? 289 * from (soft)interrupt context, so the access to
283 * all it means is the softlock check starts one cycle later 290 * watchdog_allowed_cpumask might race with a concurrent update.
291 *
292 * The watchdog time stamp can race against a concurrent real
293 * update as well, the only side effect might be a cycle delay for
294 * the softlockup check.
284 */ 295 */
285 for_each_watchdog_cpu(cpu) 296 for_each_cpu(cpu, &watchdog_allowed_mask)
286 per_cpu(watchdog_touch_ts, cpu) = 0; 297 per_cpu(watchdog_touch_ts, cpu) = 0;
287 wq_watchdog_touch(-1); 298 wq_watchdog_touch(-1);
288} 299}
@@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void)
322 __this_cpu_inc(hrtimer_interrupts); 333 __this_cpu_inc(hrtimer_interrupts);
323} 334}
324 335
325static int watchdog_enable_all_cpus(void);
326static void watchdog_disable_all_cpus(void);
327
328/* watchdog kicker functions */ 336/* watchdog kicker functions */
329static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 337static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
330{ 338{
@@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
333 int duration; 341 int duration;
334 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; 342 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
335 343
336 if (atomic_read(&watchdog_park_in_progress) != 0) 344 if (!watchdog_enabled)
337 return HRTIMER_NORESTART; 345 return HRTIMER_NORESTART;
338 346
339 /* kick the hardlockup detector */ 347 /* kick the hardlockup detector */
@@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
447 455
448static void watchdog_enable(unsigned int cpu) 456static void watchdog_enable(unsigned int cpu)
449{ 457{
450 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 458 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
451 459
452 /* kick off the timer for the hardlockup detector */ 460 /*
461 * Start the timer first to prevent the NMI watchdog triggering
462 * before the timer has a chance to fire.
463 */
453 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 464 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
454 hrtimer->function = watchdog_timer_fn; 465 hrtimer->function = watchdog_timer_fn;
455
456 /* Enable the perf event */
457 watchdog_nmi_enable(cpu);
458
459 /* done here because hrtimer_start can only pin to smp_processor_id() */
460 hrtimer_start(hrtimer, ns_to_ktime(sample_period), 466 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
461 HRTIMER_MODE_REL_PINNED); 467 HRTIMER_MODE_REL_PINNED);
462 468
463 /* initialize timestamp */ 469 /* Initialize timestamp */
464 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
465 __touch_watchdog(); 470 __touch_watchdog();
471 /* Enable the perf event */
472 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
473 watchdog_nmi_enable(cpu);
474
475 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
466} 476}
467 477
468static void watchdog_disable(unsigned int cpu) 478static void watchdog_disable(unsigned int cpu)
469{ 479{
470 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); 480 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
471 481
472 watchdog_set_prio(SCHED_NORMAL, 0); 482 watchdog_set_prio(SCHED_NORMAL, 0);
473 hrtimer_cancel(hrtimer); 483 /*
474 /* disable the perf event */ 484 * Disable the perf event first. That prevents that a large delay
485 * between disabling the timer and disabling the perf event causes
486 * the perf NMI to detect a false positive.
487 */
475 watchdog_nmi_disable(cpu); 488 watchdog_nmi_disable(cpu);
489 hrtimer_cancel(hrtimer);
476} 490}
477 491
478static void watchdog_cleanup(unsigned int cpu, bool online) 492static void watchdog_cleanup(unsigned int cpu, bool online)
@@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu)
499 __this_cpu_write(soft_lockup_hrtimer_cnt, 513 __this_cpu_write(soft_lockup_hrtimer_cnt,
500 __this_cpu_read(hrtimer_interrupts)); 514 __this_cpu_read(hrtimer_interrupts));
501 __touch_watchdog(); 515 __touch_watchdog();
502
503 /*
504 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
505 * failure path. Check for failures that can occur asynchronously -
506 * for example, when CPUs are on-lined - and shut down the hardware
507 * perf event on each CPU accordingly.
508 *
509 * The only non-obvious place this bit can be cleared is through
510 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
511 * pr_info here would be too noisy as it would result in a message
512 * every few seconds if the hardlockup was disabled but the softlockup
513 * enabled.
514 */
515 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
516 watchdog_nmi_disable(cpu);
517} 516}
518 517
519static struct smp_hotplug_thread watchdog_threads = { 518static struct smp_hotplug_thread watchdog_threads = {
@@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = {
527 .unpark = watchdog_enable, 526 .unpark = watchdog_enable,
528}; 527};
529 528
530/* 529static void softlockup_update_smpboot_threads(void)
531 * park all watchdog threads that are specified in 'watchdog_cpumask'
532 *
533 * This function returns an error if kthread_park() of a watchdog thread
534 * fails. In this situation, the watchdog threads of some CPUs can already
535 * be parked and the watchdog threads of other CPUs can still be runnable.
536 * Callers are expected to handle this special condition as appropriate in
537 * their context.
538 *
539 * This function may only be called in a context that is protected against
540 * races with CPU hotplug - for example, via get_online_cpus().
541 */
542static int watchdog_park_threads(void)
543{ 530{
544 int cpu, ret = 0; 531 lockdep_assert_held(&watchdog_mutex);
545 532
546 atomic_set(&watchdog_park_in_progress, 1); 533 if (!softlockup_threads_initialized)
534 return;
547 535
548 for_each_watchdog_cpu(cpu) { 536 smpboot_update_cpumask_percpu_thread(&watchdog_threads,
549 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); 537 &watchdog_allowed_mask);
550 if (ret)
551 break;
552 }
553
554 atomic_set(&watchdog_park_in_progress, 0);
555
556 return ret;
557} 538}
558 539
559/* 540/* Temporarily park all watchdog threads */
560 * unpark all watchdog threads that are specified in 'watchdog_cpumask' 541static void softlockup_park_all_threads(void)
561 *
562 * This function may only be called in a context that is protected against
563 * races with CPU hotplug - for example, via get_online_cpus().
564 */
565static void watchdog_unpark_threads(void)
566{ 542{
567 int cpu; 543 cpumask_clear(&watchdog_allowed_mask);
568 544 softlockup_update_smpboot_threads();
569 for_each_watchdog_cpu(cpu)
570 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
571} 545}
572 546
573static int update_watchdog_all_cpus(void) 547/* Unpark enabled threads */
548static void softlockup_unpark_threads(void)
574{ 549{
575 int ret; 550 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
576 551 softlockup_update_smpboot_threads();
577 ret = watchdog_park_threads();
578 if (ret)
579 return ret;
580
581 watchdog_unpark_threads();
582
583 return 0;
584} 552}
585 553
586static int watchdog_enable_all_cpus(void) 554static void lockup_detector_reconfigure(void)
587{ 555{
588 int err = 0; 556 cpus_read_lock();
589 557 watchdog_nmi_stop();
590 if (!watchdog_running) { 558 softlockup_park_all_threads();
591 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, 559 set_sample_period();
592 &watchdog_cpumask); 560 lockup_detector_update_enable();
593 if (err) 561 if (watchdog_enabled && watchdog_thresh)
594 pr_err("Failed to create watchdog threads, disabled\n"); 562 softlockup_unpark_threads();
595 else 563 watchdog_nmi_start();
596 watchdog_running = 1; 564 cpus_read_unlock();
597 } else { 565 /*
598 /* 566 * Must be called outside the cpus locked section to prevent
599 * Enable/disable the lockup detectors or 567 * recursive locking in the perf code.
600 * change the sample period 'on the fly'. 568 */
601 */ 569 __lockup_detector_cleanup();
602 err = update_watchdog_all_cpus();
603
604 if (err) {
605 watchdog_disable_all_cpus();
606 pr_err("Failed to update lockup detectors, disabled\n");
607 }
608 }
609
610 if (err)
611 watchdog_enabled = 0;
612
613 return err;
614} 570}
615 571
616static void watchdog_disable_all_cpus(void) 572/*
573 * Create the watchdog thread infrastructure and configure the detector(s).
574 *
575 * The threads are not unparked as watchdog_allowed_mask is empty. When
576 * the threads are sucessfully initialized, take the proper locks and
577 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
578 */
579static __init void lockup_detector_setup(void)
617{ 580{
618 if (watchdog_running) { 581 int ret;
619 watchdog_running = 0;
620 smpboot_unregister_percpu_thread(&watchdog_threads);
621 }
622}
623 582
624#ifdef CONFIG_SYSCTL 583 /*
625static int watchdog_update_cpus(void) 584 * If sysctl is off and watchdog got disabled on the command line,
626{ 585 * nothing to do here.
627 return smpboot_update_cpumask_percpu_thread( 586 */
628 &watchdog_threads, &watchdog_cpumask); 587 lockup_detector_update_enable();
629}
630#endif
631 588
632#else /* SOFTLOCKUP */ 589 if (!IS_ENABLED(CONFIG_SYSCTL) &&
633static int watchdog_park_threads(void) 590 !(watchdog_enabled && watchdog_thresh))
634{ 591 return;
635 return 0;
636}
637 592
638static void watchdog_unpark_threads(void) 593 ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
639{ 594 &watchdog_allowed_mask);
640} 595 if (ret) {
596 pr_err("Failed to initialize soft lockup detector threads\n");
597 return;
598 }
641 599
642static int watchdog_enable_all_cpus(void) 600 mutex_lock(&watchdog_mutex);
643{ 601 softlockup_threads_initialized = true;
644 return 0; 602 lockup_detector_reconfigure();
603 mutex_unlock(&watchdog_mutex);
645} 604}
646 605
647static void watchdog_disable_all_cpus(void) 606#else /* CONFIG_SOFTLOCKUP_DETECTOR */
607static inline int watchdog_park_threads(void) { return 0; }
608static inline void watchdog_unpark_threads(void) { }
609static inline int watchdog_enable_all_cpus(void) { return 0; }
610static inline void watchdog_disable_all_cpus(void) { }
611static void lockup_detector_reconfigure(void)
648{ 612{
613 cpus_read_lock();
614 watchdog_nmi_stop();
615 lockup_detector_update_enable();
616 watchdog_nmi_start();
617 cpus_read_unlock();
649} 618}
650 619static inline void lockup_detector_setup(void)
651#ifdef CONFIG_SYSCTL
652static int watchdog_update_cpus(void)
653{ 620{
654 return 0; 621 lockup_detector_reconfigure();
655} 622}
656#endif 623#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
657 624
658static void set_sample_period(void) 625static void __lockup_detector_cleanup(void)
659{ 626{
627 lockdep_assert_held(&watchdog_mutex);
628 hardlockup_detector_perf_cleanup();
660} 629}
661#endif /* SOFTLOCKUP */
662 630
663/* 631/**
664 * Suspend the hard and soft lockup detector by parking the watchdog threads. 632 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
633 *
634 * Caller must not hold the cpu hotplug rwsem.
665 */ 635 */
666int lockup_detector_suspend(void) 636void lockup_detector_cleanup(void)
667{ 637{
668 int ret = 0; 638 mutex_lock(&watchdog_mutex);
669 639 __lockup_detector_cleanup();
670 get_online_cpus(); 640 mutex_unlock(&watchdog_mutex);
671 mutex_lock(&watchdog_proc_mutex);
672 /*
673 * Multiple suspend requests can be active in parallel (counted by
674 * the 'watchdog_suspended' variable). If the watchdog threads are
675 * running, the first caller takes care that they will be parked.
676 * The state of 'watchdog_running' cannot change while a suspend
677 * request is active (see related code in 'proc' handlers).
678 */
679 if (watchdog_running && !watchdog_suspended)
680 ret = watchdog_park_threads();
681
682 if (ret == 0)
683 watchdog_suspended++;
684 else {
685 watchdog_disable_all_cpus();
686 pr_err("Failed to suspend lockup detectors, disabled\n");
687 watchdog_enabled = 0;
688 }
689
690 watchdog_nmi_reconfigure();
691
692 mutex_unlock(&watchdog_proc_mutex);
693
694 return ret;
695} 641}
696 642
697/* 643/**
698 * Resume the hard and soft lockup detector by unparking the watchdog threads. 644 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
645 *
646 * Special interface for parisc. It prevents lockup detector warnings from
647 * the default pm_poweroff() function which busy loops forever.
699 */ 648 */
700void lockup_detector_resume(void) 649void lockup_detector_soft_poweroff(void)
701{ 650{
702 mutex_lock(&watchdog_proc_mutex); 651 watchdog_enabled = 0;
703
704 watchdog_suspended--;
705 /*
706 * The watchdog threads are unparked if they were previously running
707 * and if there is no more active suspend request.
708 */
709 if (watchdog_running && !watchdog_suspended)
710 watchdog_unpark_threads();
711
712 watchdog_nmi_reconfigure();
713
714 mutex_unlock(&watchdog_proc_mutex);
715 put_online_cpus();
716} 652}
717 653
718#ifdef CONFIG_SYSCTL 654#ifdef CONFIG_SYSCTL
719 655
720/* 656/* Propagate any changes to the watchdog threads */
721 * Update the run state of the lockup detectors. 657static void proc_watchdog_update(void)
722 */
723static int proc_watchdog_update(void)
724{ 658{
725 int err = 0; 659 /* Remove impossible cpus to keep sysctl output clean. */
726 660 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
727 /* 661 lockup_detector_reconfigure();
728 * Watchdog threads won't be started if they are already active.
729 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
730 * care of this. If those threads are already active, the sample
731 * period will be updated and the lockup detectors will be enabled
732 * or disabled 'on the fly'.
733 */
734 if (watchdog_enabled && watchdog_thresh)
735 err = watchdog_enable_all_cpus();
736 else
737 watchdog_disable_all_cpus();
738
739 watchdog_nmi_reconfigure();
740
741 return err;
742
743} 662}
744 663
745/* 664/*
746 * common function for watchdog, nmi_watchdog and soft_watchdog parameter 665 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
747 * 666 *
748 * caller | table->data points to | 'which' contains the flag(s) 667 * caller | table->data points to | 'which'
749 * -------------------|-----------------------|----------------------------- 668 * -------------------|----------------------------|--------------------------
750 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed 669 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
751 * | | with SOFT_WATCHDOG_ENABLED 670 * | | SOFT_WATCHDOG_ENABLED
752 * -------------------|-----------------------|----------------------------- 671 * -------------------|----------------------------|--------------------------
753 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED 672 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
754 * -------------------|-----------------------|----------------------------- 673 * -------------------|----------------------------|--------------------------
755 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED 674 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
756 */ 675 */
757static int proc_watchdog_common(int which, struct ctl_table *table, int write, 676static int proc_watchdog_common(int which, struct ctl_table *table, int write,
758 void __user *buffer, size_t *lenp, loff_t *ppos) 677 void __user *buffer, size_t *lenp, loff_t *ppos)
759{ 678{
760 int err, old, new; 679 int err, old, *param = table->data;
761 int *watchdog_param = (int *)table->data;
762 680
763 get_online_cpus(); 681 mutex_lock(&watchdog_mutex);
764 mutex_lock(&watchdog_proc_mutex);
765 682
766 if (watchdog_suspended) {
767 /* no parameter changes allowed while watchdog is suspended */
768 err = -EAGAIN;
769 goto out;
770 }
771
772 /*
773 * If the parameter is being read return the state of the corresponding
774 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
775 * run state of the lockup detectors.
776 */
777 if (!write) { 683 if (!write) {
778 *watchdog_param = (watchdog_enabled & which) != 0; 684 /*
685 * On read synchronize the userspace interface. This is a
686 * racy snapshot.
687 */
688 *param = (watchdog_enabled & which) != 0;
779 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 689 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
780 } else { 690 } else {
691 old = READ_ONCE(*param);
781 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 692 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
782 if (err) 693 if (!err && old != READ_ONCE(*param))
783 goto out; 694 proc_watchdog_update();
784
785 /*
786 * There is a race window between fetching the current value
787 * from 'watchdog_enabled' and storing the new value. During
788 * this race window, watchdog_nmi_enable() can sneak in and
789 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
790 * The 'cmpxchg' detects this race and the loop retries.
791 */
792 do {
793 old = watchdog_enabled;
794 /*
795 * If the parameter value is not zero set the
796 * corresponding bit(s), else clear it(them).
797 */
798 if (*watchdog_param)
799 new = old | which;
800 else
801 new = old & ~which;
802 } while (cmpxchg(&watchdog_enabled, old, new) != old);
803
804 /*
805 * Update the run state of the lockup detectors. There is _no_
806 * need to check the value returned by proc_watchdog_update()
807 * and to restore the previous value of 'watchdog_enabled' as
808 * both lockup detectors are disabled if proc_watchdog_update()
809 * returns an error.
810 */
811 if (old == new)
812 goto out;
813
814 err = proc_watchdog_update();
815 } 695 }
816out: 696 mutex_unlock(&watchdog_mutex);
817 mutex_unlock(&watchdog_proc_mutex);
818 put_online_cpus();
819 return err; 697 return err;
820} 698}
821 699
@@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write,
835int proc_nmi_watchdog(struct ctl_table *table, int write, 713int proc_nmi_watchdog(struct ctl_table *table, int write,
836 void __user *buffer, size_t *lenp, loff_t *ppos) 714 void __user *buffer, size_t *lenp, loff_t *ppos)
837{ 715{
716 if (!nmi_watchdog_available && write)
717 return -ENOTSUPP;
838 return proc_watchdog_common(NMI_WATCHDOG_ENABLED, 718 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
839 table, write, buffer, lenp, ppos); 719 table, write, buffer, lenp, ppos);
840} 720}
@@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
855int proc_watchdog_thresh(struct ctl_table *table, int write, 735int proc_watchdog_thresh(struct ctl_table *table, int write,
856 void __user *buffer, size_t *lenp, loff_t *ppos) 736 void __user *buffer, size_t *lenp, loff_t *ppos)
857{ 737{
858 int err, old, new; 738 int err, old;
859
860 get_online_cpus();
861 mutex_lock(&watchdog_proc_mutex);
862 739
863 if (watchdog_suspended) { 740 mutex_lock(&watchdog_mutex);
864 /* no parameter changes allowed while watchdog is suspended */
865 err = -EAGAIN;
866 goto out;
867 }
868 741
869 old = ACCESS_ONCE(watchdog_thresh); 742 old = READ_ONCE(watchdog_thresh);
870 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 743 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
871 744
872 if (err || !write) 745 if (!err && write && old != READ_ONCE(watchdog_thresh))
873 goto out; 746 proc_watchdog_update();
874
875 /*
876 * Update the sample period. Restore on failure.
877 */
878 new = ACCESS_ONCE(watchdog_thresh);
879 if (old == new)
880 goto out;
881 747
882 set_sample_period(); 748 mutex_unlock(&watchdog_mutex);
883 err = proc_watchdog_update();
884 if (err) {
885 watchdog_thresh = old;
886 set_sample_period();
887 }
888out:
889 mutex_unlock(&watchdog_proc_mutex);
890 put_online_cpus();
891 return err; 749 return err;
892} 750}
893 751
@@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
902{ 760{
903 int err; 761 int err;
904 762
905 get_online_cpus(); 763 mutex_lock(&watchdog_mutex);
906 mutex_lock(&watchdog_proc_mutex);
907
908 if (watchdog_suspended) {
909 /* no parameter changes allowed while watchdog is suspended */
910 err = -EAGAIN;
911 goto out;
912 }
913 764
914 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); 765 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
915 if (!err && write) { 766 if (!err && write)
916 /* Remove impossible cpus to keep sysctl output cleaner. */ 767 proc_watchdog_update();
917 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
918 cpu_possible_mask);
919
920 if (watchdog_running) {
921 /*
922 * Failure would be due to being unable to allocate
923 * a temporary cpumask, so we are likely not in a
924 * position to do much else to make things better.
925 */
926 if (watchdog_update_cpus() != 0)
927 pr_err("cpumask update failed\n");
928 }
929 768
930 watchdog_nmi_reconfigure(); 769 mutex_unlock(&watchdog_mutex);
931 }
932out:
933 mutex_unlock(&watchdog_proc_mutex);
934 put_online_cpus();
935 return err; 770 return err;
936} 771}
937
938#endif /* CONFIG_SYSCTL */ 772#endif /* CONFIG_SYSCTL */
939 773
940void __init lockup_detector_init(void) 774void __init lockup_detector_init(void)
941{ 775{
942 set_sample_period();
943
944#ifdef CONFIG_NO_HZ_FULL 776#ifdef CONFIG_NO_HZ_FULL
945 if (tick_nohz_full_enabled()) { 777 if (tick_nohz_full_enabled()) {
946 pr_info("Disabling watchdog on nohz_full cores by default\n"); 778 pr_info("Disabling watchdog on nohz_full cores by default\n");
@@ -951,6 +783,7 @@ void __init lockup_detector_init(void)
951 cpumask_copy(&watchdog_cpumask, cpu_possible_mask); 783 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
952#endif 784#endif
953 785
954 if (watchdog_enabled) 786 if (!watchdog_nmi_probe())
955 watchdog_enable_all_cpus(); 787 nmi_watchdog_available = true;
788 lockup_detector_setup();
956} 789}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 3a09ea1b1d3d..71a62ceacdc8 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -21,8 +21,10 @@
21static DEFINE_PER_CPU(bool, hard_watchdog_warn); 21static DEFINE_PER_CPU(bool, hard_watchdog_warn);
22static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 22static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
23static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 23static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
24static struct cpumask dead_events_mask;
24 25
25static unsigned long hardlockup_allcpu_dumped; 26static unsigned long hardlockup_allcpu_dumped;
27static unsigned int watchdog_cpus;
26 28
27void arch_touch_nmi_watchdog(void) 29void arch_touch_nmi_watchdog(void)
28{ 30{
@@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = {
103 105
104/* Callback function for perf event subsystem */ 106/* Callback function for perf event subsystem */
105static void watchdog_overflow_callback(struct perf_event *event, 107static void watchdog_overflow_callback(struct perf_event *event,
106 struct perf_sample_data *data, 108 struct perf_sample_data *data,
107 struct pt_regs *regs) 109 struct pt_regs *regs)
108{ 110{
109 /* Ensure the watchdog never gets throttled */ 111 /* Ensure the watchdog never gets throttled */
110 event->hw.interrupts = 0; 112 event->hw.interrupts = 0;
111 113
112 if (atomic_read(&watchdog_park_in_progress) != 0)
113 return;
114
115 if (__this_cpu_read(watchdog_nmi_touch) == true) { 114 if (__this_cpu_read(watchdog_nmi_touch) == true) {
116 __this_cpu_write(watchdog_nmi_touch, false); 115 __this_cpu_write(watchdog_nmi_touch, false);
117 return; 116 return;
@@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event,
160 return; 159 return;
161} 160}
162 161
163/* 162static int hardlockup_detector_event_create(void)
164 * People like the simple clean cpu node info on boot.
165 * Reduce the watchdog noise by only printing messages
166 * that are different from what cpu0 displayed.
167 */
168static unsigned long firstcpu_err;
169static atomic_t watchdog_cpus;
170
171int watchdog_nmi_enable(unsigned int cpu)
172{ 163{
164 unsigned int cpu = smp_processor_id();
173 struct perf_event_attr *wd_attr; 165 struct perf_event_attr *wd_attr;
174 struct perf_event *event = per_cpu(watchdog_ev, cpu); 166 struct perf_event *evt;
175 int firstcpu = 0;
176
177 /* nothing to do if the hard lockup detector is disabled */
178 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
179 goto out;
180
181 /* is it already setup and enabled? */
182 if (event && event->state > PERF_EVENT_STATE_OFF)
183 goto out;
184
185 /* it is setup but not enabled */
186 if (event != NULL)
187 goto out_enable;
188
189 if (atomic_inc_return(&watchdog_cpus) == 1)
190 firstcpu = 1;
191 167
192 wd_attr = &wd_hw_attr; 168 wd_attr = &wd_hw_attr;
193 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); 169 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
194 170
195 /* Try to register using hardware perf events */ 171 /* Try to register using hardware perf events */
196 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 172 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
173 watchdog_overflow_callback, NULL);
174 if (IS_ERR(evt)) {
175 pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
176 PTR_ERR(evt));
177 return PTR_ERR(evt);
178 }
179 this_cpu_write(watchdog_ev, evt);
180 return 0;
181}
197 182
198 /* save the first cpu's error for future comparision */ 183/**
199 if (firstcpu && IS_ERR(event)) 184 * hardlockup_detector_perf_enable - Enable the local event
200 firstcpu_err = PTR_ERR(event); 185 */
186void hardlockup_detector_perf_enable(void)
187{
188 if (hardlockup_detector_event_create())
189 return;
201 190
202 if (!IS_ERR(event)) { 191 if (!watchdog_cpus++)
203 /* only print for the first cpu initialized */ 192 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
204 if (firstcpu || firstcpu_err)
205 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
206 goto out_save;
207 }
208 193
209 /* 194 perf_event_enable(this_cpu_read(watchdog_ev));
210 * Disable the hard lockup detector if _any_ CPU fails to set up
211 * set up the hardware perf event. The watchdog() function checks
212 * the NMI_WATCHDOG_ENABLED bit periodically.
213 *
214 * The barriers are for syncing up watchdog_enabled across all the
215 * cpus, as clear_bit() does not use barriers.
216 */
217 smp_mb__before_atomic();
218 clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
219 smp_mb__after_atomic();
220
221 /* skip displaying the same error again */
222 if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
223 return PTR_ERR(event);
224
225 /* vary the KERN level based on the returned errno */
226 if (PTR_ERR(event) == -EOPNOTSUPP)
227 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
228 else if (PTR_ERR(event) == -ENOENT)
229 pr_warn("disabled (cpu%i): hardware events not enabled\n",
230 cpu);
231 else
232 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
233 cpu, PTR_ERR(event));
234
235 pr_info("Shutting down hard lockup detector on all cpus\n");
236
237 return PTR_ERR(event);
238
239 /* success path */
240out_save:
241 per_cpu(watchdog_ev, cpu) = event;
242out_enable:
243 perf_event_enable(per_cpu(watchdog_ev, cpu));
244out:
245 return 0;
246} 195}
247 196
248void watchdog_nmi_disable(unsigned int cpu) 197/**
198 * hardlockup_detector_perf_disable - Disable the local event
199 */
200void hardlockup_detector_perf_disable(void)
249{ 201{
250 struct perf_event *event = per_cpu(watchdog_ev, cpu); 202 struct perf_event *event = this_cpu_read(watchdog_ev);
251 203
252 if (event) { 204 if (event) {
253 perf_event_disable(event); 205 perf_event_disable(event);
206 cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
207 watchdog_cpus--;
208 }
209}
210
211/**
212 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
213 *
214 * Called from lockup_detector_cleanup(). Serialized by the caller.
215 */
216void hardlockup_detector_perf_cleanup(void)
217{
218 int cpu;
219
220 for_each_cpu(cpu, &dead_events_mask) {
221 struct perf_event *event = per_cpu(watchdog_ev, cpu);
222
223 /*
224 * Required because for_each_cpu() reports unconditionally
225 * CPU0 as set on UP kernels. Sigh.
226 */
227 if (event)
228 perf_event_release_kernel(event);
254 per_cpu(watchdog_ev, cpu) = NULL; 229 per_cpu(watchdog_ev, cpu) = NULL;
230 }
231 cpumask_clear(&dead_events_mask);
232}
233
234/**
235 * hardlockup_detector_perf_stop - Globally stop watchdog events
236 *
237 * Special interface for x86 to handle the perf HT bug.
238 */
239void __init hardlockup_detector_perf_stop(void)
240{
241 int cpu;
242
243 lockdep_assert_cpus_held();
244
245 for_each_online_cpu(cpu) {
246 struct perf_event *event = per_cpu(watchdog_ev, cpu);
247
248 if (event)
249 perf_event_disable(event);
250 }
251}
255 252
256 /* should be in cleanup, but blocks oprofile */ 253/**
257 perf_event_release_kernel(event); 254 * hardlockup_detector_perf_restart - Globally restart watchdog events
255 *
256 * Special interface for x86 to handle the perf HT bug.
257 */
258void __init hardlockup_detector_perf_restart(void)
259{
260 int cpu;
261
262 lockdep_assert_cpus_held();
263
264 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
265 return;
266
267 for_each_online_cpu(cpu) {
268 struct perf_event *event = per_cpu(watchdog_ev, cpu);
269
270 if (event)
271 perf_event_enable(event);
272 }
273}
274
275/**
276 * hardlockup_detector_perf_init - Probe whether NMI event is available at all
277 */
278int __init hardlockup_detector_perf_init(void)
279{
280 int ret = hardlockup_detector_event_create();
258 281
259 /* watchdog_nmi_enable() expects this to be zero initially. */ 282 if (ret) {
260 if (atomic_dec_and_test(&watchdog_cpus)) 283 pr_info("Perf NMI watchdog permanently disabled\n");
261 firstcpu_err = 0; 284 } else {
285 perf_event_release_kernel(this_cpu_read(watchdog_ev));
286 this_cpu_write(watchdog_ev, NULL);
262 } 287 }
288 return ret;
263} 289}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 64d0edf428f8..a2dccfe1acec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,7 @@ enum {
68 * attach_mutex to avoid changing binding state while 68 * attach_mutex to avoid changing binding state while
69 * worker_attach_to_pool() is in progress. 69 * worker_attach_to_pool() is in progress.
70 */ 70 */
71 POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
71 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 72 POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
72 73
73 /* worker flags */ 74 /* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
165 /* L: hash of busy workers */ 166 /* L: hash of busy workers */
166 167
167 /* see manage_workers() for details on the two manager mutexes */ 168 /* see manage_workers() for details on the two manager mutexes */
168 struct mutex manager_arb; /* manager arbitration */
169 struct worker *manager; /* L: purely informational */ 169 struct worker *manager; /* L: purely informational */
170 struct mutex attach_mutex; /* attach/detach exclusion */ 170 struct mutex attach_mutex; /* attach/detach exclusion */
171 struct list_head workers; /* A: attached workers */ 171 struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
299 299
300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 300static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
302static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
302 303
303static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 304static LIST_HEAD(workqueues); /* PR: list of all workqueues */
304static bool workqueue_freezing; /* PL: have wqs started freezing? */ 305static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
801/* Do we have too many workers and should some go away? */ 802/* Do we have too many workers and should some go away? */
802static bool too_many_workers(struct worker_pool *pool) 803static bool too_many_workers(struct worker_pool *pool)
803{ 804{
804 bool managing = mutex_is_locked(&pool->manager_arb); 805 bool managing = pool->flags & POOL_MANAGER_ACTIVE;
805 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 806 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
806 int nr_busy = pool->nr_workers - nr_idle; 807 int nr_busy = pool->nr_workers - nr_idle;
807 808
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
1980{ 1981{
1981 struct worker_pool *pool = worker->pool; 1982 struct worker_pool *pool = worker->pool;
1982 1983
1983 /* 1984 if (pool->flags & POOL_MANAGER_ACTIVE)
1984 * Anyone who successfully grabs manager_arb wins the arbitration
1985 * and becomes the manager. mutex_trylock() on pool->manager_arb
1986 * failure while holding pool->lock reliably indicates that someone
1987 * else is managing the pool and the worker which failed trylock
1988 * can proceed to executing work items. This means that anyone
1989 * grabbing manager_arb is responsible for actually performing
1990 * manager duties. If manager_arb is grabbed and released without
1991 * actual management, the pool may stall indefinitely.
1992 */
1993 if (!mutex_trylock(&pool->manager_arb))
1994 return false; 1985 return false;
1986
1987 pool->flags |= POOL_MANAGER_ACTIVE;
1995 pool->manager = worker; 1988 pool->manager = worker;
1996 1989
1997 maybe_create_worker(pool); 1990 maybe_create_worker(pool);
1998 1991
1999 pool->manager = NULL; 1992 pool->manager = NULL;
2000 mutex_unlock(&pool->manager_arb); 1993 pool->flags &= ~POOL_MANAGER_ACTIVE;
1994 wake_up(&wq_manager_wait);
2001 return true; 1995 return true;
2002} 1996}
2003 1997
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
3248 setup_timer(&pool->mayday_timer, pool_mayday_timeout, 3242 setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3249 (unsigned long)pool); 3243 (unsigned long)pool);
3250 3244
3251 mutex_init(&pool->manager_arb);
3252 mutex_init(&pool->attach_mutex); 3245 mutex_init(&pool->attach_mutex);
3253 INIT_LIST_HEAD(&pool->workers); 3246 INIT_LIST_HEAD(&pool->workers);
3254 3247
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
3318 hash_del(&pool->hash_node); 3311 hash_del(&pool->hash_node);
3319 3312
3320 /* 3313 /*
3321 * Become the manager and destroy all workers. Grabbing 3314 * Become the manager and destroy all workers. This prevents
3322 * manager_arb prevents @pool's workers from blocking on 3315 * @pool's workers from blocking on attach_mutex. We're the last
3323 * attach_mutex. 3316 * manager and @pool gets freed with the flag set.
3324 */ 3317 */
3325 mutex_lock(&pool->manager_arb);
3326
3327 spin_lock_irq(&pool->lock); 3318 spin_lock_irq(&pool->lock);
3319 wait_event_lock_irq(wq_manager_wait,
3320 !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3321 pool->flags |= POOL_MANAGER_ACTIVE;
3322
3328 while ((worker = first_idle_worker(pool))) 3323 while ((worker = first_idle_worker(pool)))
3329 destroy_worker(worker); 3324 destroy_worker(worker);
3330 WARN_ON(pool->nr_workers || pool->nr_idle); 3325 WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
3338 if (pool->detach_completion) 3333 if (pool->detach_completion)
3339 wait_for_completion(pool->detach_completion); 3334 wait_for_completion(pool->detach_completion);
3340 3335
3341 mutex_unlock(&pool->manager_arb);
3342
3343 /* shut down the timers */ 3336 /* shut down the timers */
3344 del_timer_sync(&pool->idle_timer); 3337 del_timer_sync(&pool->idle_timer);
3345 del_timer_sync(&pool->mayday_timer); 3338 del_timer_sync(&pool->mayday_timer);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2689b7c50c52..dfdad67d8f6c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING
1092 select DEBUG_MUTEXES 1092 select DEBUG_MUTEXES
1093 select DEBUG_RT_MUTEXES if RT_MUTEXES 1093 select DEBUG_RT_MUTEXES if RT_MUTEXES
1094 select DEBUG_LOCK_ALLOC 1094 select DEBUG_LOCK_ALLOC
1095 select LOCKDEP_CROSSRELEASE 1095 select LOCKDEP_CROSSRELEASE if BROKEN
1096 select LOCKDEP_COMPLETIONS 1096 select LOCKDEP_COMPLETIONS if BROKEN
1097 select TRACE_IRQFLAGS 1097 select TRACE_IRQFLAGS
1098 default n 1098 default n
1099 help 1099 help
@@ -1590,6 +1590,54 @@ config LATENCYTOP
1590 1590
1591source kernel/trace/Kconfig 1591source kernel/trace/Kconfig
1592 1592
1593config PROVIDE_OHCI1394_DMA_INIT
1594 bool "Remote debugging over FireWire early on boot"
1595 depends on PCI && X86
1596 help
1597 If you want to debug problems which hang or crash the kernel early
1598 on boot and the crashing machine has a FireWire port, you can use
1599 this feature to remotely access the memory of the crashed machine
1600 over FireWire. This employs remote DMA as part of the OHCI1394
1601 specification which is now the standard for FireWire controllers.
1602
1603 With remote DMA, you can monitor the printk buffer remotely using
1604 firescope and access all memory below 4GB using fireproxy from gdb.
1605 Even controlling a kernel debugger is possible using remote DMA.
1606
1607 Usage:
1608
1609 If ohci1394_dma=early is used as boot parameter, it will initialize
1610 all OHCI1394 controllers which are found in the PCI config space.
1611
1612 As all changes to the FireWire bus such as enabling and disabling
1613 devices cause a bus reset and thereby disable remote DMA for all
1614 devices, be sure to have the cable plugged and FireWire enabled on
1615 the debugging host before booting the debug target for debugging.
1616
1617 This code (~1k) is freed after boot. By then, the firewire stack
1618 in charge of the OHCI-1394 controllers should be used instead.
1619
1620 See Documentation/debugging-via-ohci1394.txt for more information.
1621
1622config DMA_API_DEBUG
1623 bool "Enable debugging of DMA-API usage"
1624 depends on HAVE_DMA_API_DEBUG
1625 help
1626 Enable this option to debug the use of the DMA API by device drivers.
1627 With this option you will be able to detect common bugs in device
1628 drivers like double-freeing of DMA mappings or freeing mappings that
1629 were never allocated.
1630
1631 This also attempts to catch cases where a page owned by DMA is
1632 accessed by the cpu in a way that could cause data corruption. For
1633 example, this enables cow_user_page() to check that the source page is
1634 not undergoing DMA.
1635
1636 This option causes a performance degradation. Use only if you want to
1637 debug device drivers and dma interactions.
1638
1639 If unsure, say N.
1640
1593menu "Runtime Testing" 1641menu "Runtime Testing"
1594 1642
1595config LKDTM 1643config LKDTM
@@ -1749,56 +1797,6 @@ config TEST_PARMAN
1749 1797
1750 If unsure, say N. 1798 If unsure, say N.
1751 1799
1752endmenu # runtime tests
1753
1754config PROVIDE_OHCI1394_DMA_INIT
1755 bool "Remote debugging over FireWire early on boot"
1756 depends on PCI && X86
1757 help
1758 If you want to debug problems which hang or crash the kernel early
1759 on boot and the crashing machine has a FireWire port, you can use
1760 this feature to remotely access the memory of the crashed machine
1761 over FireWire. This employs remote DMA as part of the OHCI1394
1762 specification which is now the standard for FireWire controllers.
1763
1764 With remote DMA, you can monitor the printk buffer remotely using
1765 firescope and access all memory below 4GB using fireproxy from gdb.
1766 Even controlling a kernel debugger is possible using remote DMA.
1767
1768 Usage:
1769
1770 If ohci1394_dma=early is used as boot parameter, it will initialize
1771 all OHCI1394 controllers which are found in the PCI config space.
1772
1773 As all changes to the FireWire bus such as enabling and disabling
1774 devices cause a bus reset and thereby disable remote DMA for all
1775 devices, be sure to have the cable plugged and FireWire enabled on
1776 the debugging host before booting the debug target for debugging.
1777
1778 This code (~1k) is freed after boot. By then, the firewire stack
1779 in charge of the OHCI-1394 controllers should be used instead.
1780
1781 See Documentation/debugging-via-ohci1394.txt for more information.
1782
1783config DMA_API_DEBUG
1784 bool "Enable debugging of DMA-API usage"
1785 depends on HAVE_DMA_API_DEBUG
1786 help
1787 Enable this option to debug the use of the DMA API by device drivers.
1788 With this option you will be able to detect common bugs in device
1789 drivers like double-freeing of DMA mappings or freeing mappings that
1790 were never allocated.
1791
1792 This also attempts to catch cases where a page owned by DMA is
1793 accessed by the cpu in a way that could cause data corruption. For
1794 example, this enables cow_user_page() to check that the source page is
1795 not undergoing DMA.
1796
1797 This option causes a performance degradation. Use only if you want to
1798 debug device drivers and dma interactions.
1799
1800 If unsure, say N.
1801
1802config TEST_LKM 1800config TEST_LKM
1803 tristate "Test module loading with 'hello world' module" 1801 tristate "Test module loading with 'hello world' module"
1804 default n 1802 default n
@@ -1873,18 +1871,6 @@ config TEST_UDELAY
1873 1871
1874 If unsure, say N. 1872 If unsure, say N.
1875 1873
1876config MEMTEST
1877 bool "Memtest"
1878 depends on HAVE_MEMBLOCK
1879 ---help---
1880 This option adds a kernel parameter 'memtest', which allows memtest
1881 to be set.
1882 memtest=0, mean disabled; -- default
1883 memtest=1, mean do 1 test pattern;
1884 ...
1885 memtest=17, mean do 17 test patterns.
1886 If you are unsure how to answer this question, answer N.
1887
1888config TEST_STATIC_KEYS 1874config TEST_STATIC_KEYS
1889 tristate "Test static keys" 1875 tristate "Test static keys"
1890 default n 1876 default n
@@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS
1894 1880
1895 If unsure, say N. 1881 If unsure, say N.
1896 1882
1897config BUG_ON_DATA_CORRUPTION
1898 bool "Trigger a BUG when data corruption is detected"
1899 select DEBUG_LIST
1900 help
1901 Select this option if the kernel should BUG when it encounters
1902 data corruption in kernel memory structures when they get checked
1903 for validity.
1904
1905 If unsure, say N.
1906
1907config TEST_KMOD 1883config TEST_KMOD
1908 tristate "kmod stress tester" 1884 tristate "kmod stress tester"
1909 default n 1885 default n
@@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL
1941 1917
1942 If unsure, say N. 1918 If unsure, say N.
1943 1919
1920endmenu # runtime tests
1921
1922config MEMTEST
1923 bool "Memtest"
1924 depends on HAVE_MEMBLOCK
1925 ---help---
1926 This option adds a kernel parameter 'memtest', which allows memtest
1927 to be set.
1928 memtest=0, mean disabled; -- default
1929 memtest=1, mean do 1 test pattern;
1930 ...
1931 memtest=17, mean do 17 test patterns.
1932 If you are unsure how to answer this question, answer N.
1933
1934config BUG_ON_DATA_CORRUPTION
1935 bool "Trigger a BUG when data corruption is detected"
1936 select DEBUG_LIST
1937 help
1938 Select this option if the kernel should BUG when it encounters
1939 data corruption in kernel memory structures when they get checked
1940 for validity.
1941
1942 If unsure, say N.
1944 1943
1945source "samples/Kconfig" 1944source "samples/Kconfig"
1946 1945
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 155c55d8db5f..4e53be8bc590 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
598 if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) 598 if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
599 goto all_leaves_cluster_together; 599 goto all_leaves_cluster_together;
600 600
601 /* Otherwise we can just insert a new node ahead of the old 601 /* Otherwise all the old leaves cluster in the same slot, but
602 * one. 602 * the new leaf wants to go into a different slot - so we
603 * create a new node (n0) to hold the new leaf and a pointer to
604 * a new node (n1) holding all the old leaves.
605 *
606 * This can be done by falling through to the node splitting
607 * path.
603 */ 608 */
604 goto present_leaves_cluster_but_not_new_leaf; 609 pr_devel("present leaves cluster but not new leaf\n");
605 } 610 }
606 611
607split_node: 612split_node:
608 pr_devel("split node\n"); 613 pr_devel("split node\n");
609 614
610 /* We need to split the current node; we know that the node doesn't 615 /* We need to split the current node. The node must contain anything
611 * simply contain a full set of leaves that cluster together (it 616 * from a single leaf (in the one leaf case, this leaf will cluster
612 * contains meta pointers and/or non-clustering leaves). 617 * with the new leaf) and the rest meta-pointers, to all leaves, some
618 * of which may cluster.
619 *
620 * It won't contain the case in which all the current leaves plus the
621 * new leaves want to cluster in the same slot.
613 * 622 *
614 * We need to expel at least two leaves out of a set consisting of the 623 * We need to expel at least two leaves out of a set consisting of the
615 * leaves in the node and the new leaf. 624 * leaves in the node and the new leaf. The current meta pointers can
625 * just be copied as they shouldn't cluster with any of the leaves.
616 * 626 *
617 * We need a new node (n0) to replace the current one and a new node to 627 * We need a new node (n0) to replace the current one and a new node to
618 * take the expelled nodes (n1). 628 * take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
717 pr_devel("<--%s() = ok [split node]\n", __func__); 727 pr_devel("<--%s() = ok [split node]\n", __func__);
718 return true; 728 return true;
719 729
720present_leaves_cluster_but_not_new_leaf:
721 /* All the old leaves cluster in the same slot, but the new leaf wants
722 * to go into a different slot, so we create a new node to hold the new
723 * leaf and a pointer to a new node holding all the old leaves.
724 */
725 pr_devel("present leaves cluster but not new leaf\n");
726
727 new_n0->back_pointer = node->back_pointer;
728 new_n0->parent_slot = node->parent_slot;
729 new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
730 new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
731 new_n1->parent_slot = edit->segment_cache[0];
732 new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
733 edit->adjust_count_on = new_n0;
734
735 for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
736 new_n1->slots[i] = node->slots[i];
737
738 new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
739 edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
740
741 edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
742 edit->set[0].to = assoc_array_node_to_ptr(new_n0);
743 edit->excised_meta[0] = assoc_array_node_to_ptr(node);
744 pr_devel("<--%s() = ok [insert node before]\n", __func__);
745 return true;
746
747all_leaves_cluster_together: 730all_leaves_cluster_together:
748 /* All the leaves, new and old, want to cluster together in this node 731 /* All the leaves, new and old, want to cluster together in this node
749 * in the same slot, so we have to replace this node with a shortcut to 732 * in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/digsig.c b/lib/digsig.c
index 03d7c63837ae..6ba6fcd92dd1 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
87 down_read(&key->sem); 87 down_read(&key->sem);
88 ukp = user_key_payload_locked(key); 88 ukp = user_key_payload_locked(key);
89 89
90 if (!ukp) {
91 /* key was revoked before we acquired its semaphore */
92 err = -EKEYREVOKED;
93 goto err1;
94 }
95
90 if (ukp->datalen < sizeof(*pkh)) 96 if (ukp->datalen < sizeof(*pkh))
91 goto err1; 97 goto err1;
92 98
diff --git a/lib/idr.c b/lib/idr.c
index f9adf4805fd7..edd9b2be1651 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(idr_get_next_ext);
146 * idr_alloc() and idr_remove() (as long as the ID being removed is not 146 * idr_alloc() and idr_remove() (as long as the ID being removed is not
147 * the one being replaced!). 147 * the one being replaced!).
148 * 148 *
149 * Returns: 0 on success. %-ENOENT indicates that @id was not found. 149 * Returns: the old value on success. %-ENOENT indicates that @id was not
150 * %-EINVAL indicates that @id or @ptr were not valid. 150 * found. %-EINVAL indicates that @id or @ptr were not valid.
151 */ 151 */
152void *idr_replace(struct idr *idr, void *ptr, int id) 152void *idr_replace(struct idr *idr, void *ptr, int id)
153{ 153{
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 4bb30206b942..c835f9080c43 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -161,6 +161,7 @@ int ioremap_page_range(unsigned long addr,
161 unsigned long next; 161 unsigned long next;
162 int err; 162 int err;
163 163
164 might_sleep();
164 BUG_ON(addr >= end); 165 BUG_ON(addr >= end);
165 166
166 start = addr; 167 start = addr;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index e590523ea476..f237a09a5862 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -294,6 +294,26 @@ static void cleanup_uevent_env(struct subprocess_info *info)
294} 294}
295#endif 295#endif
296 296
297static void zap_modalias_env(struct kobj_uevent_env *env)
298{
299 static const char modalias_prefix[] = "MODALIAS=";
300 int i;
301
302 for (i = 0; i < env->envp_idx;) {
303 if (strncmp(env->envp[i], modalias_prefix,
304 sizeof(modalias_prefix) - 1)) {
305 i++;
306 continue;
307 }
308
309 if (i != env->envp_idx - 1)
310 memmove(&env->envp[i], &env->envp[i + 1],
311 sizeof(env->envp[i]) * env->envp_idx - 1);
312
313 env->envp_idx--;
314 }
315}
316
297/** 317/**
298 * kobject_uevent_env - send an uevent with environmental data 318 * kobject_uevent_env - send an uevent with environmental data
299 * 319 *
@@ -409,16 +429,29 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
409 } 429 }
410 } 430 }
411 431
412 /* 432 switch (action) {
413 * Mark "add" and "remove" events in the object to ensure proper 433 case KOBJ_ADD:
414 * events to userspace during automatic cleanup. If the object did 434 /*
415 * send an "add" event, "remove" will automatically generated by 435 * Mark "add" event so we can make sure we deliver "remove"
416 * the core, if not already done by the caller. 436 * event to userspace during automatic cleanup. If
417 */ 437 * the object did send an "add" event, "remove" will
418 if (action == KOBJ_ADD) 438 * automatically generated by the core, if not already done
439 * by the caller.
440 */
419 kobj->state_add_uevent_sent = 1; 441 kobj->state_add_uevent_sent = 1;
420 else if (action == KOBJ_REMOVE) 442 break;
443
444 case KOBJ_REMOVE:
421 kobj->state_remove_uevent_sent = 1; 445 kobj->state_remove_uevent_sent = 1;
446 break;
447
448 case KOBJ_UNBIND:
449 zap_modalias_env(env);
450 break;
451
452 default:
453 break;
454 }
422 455
423 mutex_lock(&uevent_sock_mutex); 456 mutex_lock(&uevent_sock_mutex);
424 /* we will send an event, so request a new sequence number */ 457 /* we will send an event, so request a new sequence number */
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index cd0b5c964bd0..2b827b8a1d8c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
2031 print_testname("mixed read-lock/lock-write ABBA"); 2031 print_testname("mixed read-lock/lock-write ABBA");
2032 pr_cont(" |"); 2032 pr_cont(" |");
2033 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); 2033 dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
2034#ifdef CONFIG_PROVE_LOCKING
2034 /* 2035 /*
2035 * Lockdep does indeed fail here, but there's nothing we can do about 2036 * Lockdep does indeed fail here, but there's nothing we can do about
2036 * that now. Don't kill lockdep for it. 2037 * that now. Don't kill lockdep for it.
2037 */ 2038 */
2038 unexpected_testcase_failures--; 2039 unexpected_testcase_failures--;
2040#endif
2039 2041
2040 pr_cont(" |"); 2042 pr_cont(" |");
2041 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); 2043 dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index bd3574312b82..141734d255e4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -85,8 +85,8 @@ static FORCE_INLINE int LZ4_decompress_generic(
85 const BYTE * const lowLimit = lowPrefix - dictSize; 85 const BYTE * const lowLimit = lowPrefix - dictSize;
86 86
87 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; 87 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
88 const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; 88 static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
89 const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; 89 static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
90 90
91 const int safeDecode = (endOnInput == endOnInputSize); 91 const int safeDecode = (endOnInput == endOnInputSize);
92 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); 92 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 08f8043cac61..d01f47135239 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
48 if (time_is_before_jiffies(rs->begin + rs->interval)) { 48 if (time_is_before_jiffies(rs->begin + rs->interval)) {
49 if (rs->missed) { 49 if (rs->missed) {
50 if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { 50 if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
51 pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); 51 printk_deferred(KERN_WARNING
52 "%s: %d callbacks suppressed\n",
53 func, rs->missed);
52 rs->missed = 0; 54 rs->missed = 0;
53 } 55 }
54 } 56 }
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 5696a35184e4..69557c74ef9f 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -11,7 +11,7 @@
11 * ========================================================================== 11 * ==========================================================================
12 * 12 *
13 * A finite state machine consists of n states (struct ts_fsm_token) 13 * A finite state machine consists of n states (struct ts_fsm_token)
14 * representing the pattern as a finite automation. The data is read 14 * representing the pattern as a finite automaton. The data is read
15 * sequentially on an octet basis. Every state token specifies the number 15 * sequentially on an octet basis. Every state token specifies the number
16 * of recurrences and the type of value accepted which can be either a 16 * of recurrences and the type of value accepted which can be either a
17 * specific character or ctype based set of characters. The available 17 * specific character or ctype based set of characters. The available
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 632f783e65f1..ffbe66cbb0ed 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -27,7 +27,7 @@
27 * 27 *
28 * [1] Cormen, Leiserson, Rivest, Stein 28 * [1] Cormen, Leiserson, Rivest, Stein
29 * Introdcution to Algorithms, 2nd Edition, MIT Press 29 * Introdcution to Algorithms, 2nd Edition, MIT Press
30 * [2] See finite automation theory 30 * [2] See finite automaton theory
31 */ 31 */
32 32
33#include <linux/module.h> 33#include <linux/module.h>
diff --git a/mm/cma.c b/mm/cma.c
index c0da318c020e..022e52bd8370 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
460 460
461 trace_cma_alloc(pfn, page, count, align); 461 trace_cma_alloc(pfn, page, count, align);
462 462
463 if (ret) { 463 if (ret && !(gfp_mask & __GFP_NOWARN)) {
464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", 464 pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n",
465 __func__, count, ret); 465 __func__, count, ret);
466 cma_debug_show_areas(cma); 466 cma_debug_show_areas(cma);
diff --git a/mm/compaction.c b/mm/compaction.c
index fb548e4c7bd4..03d31a875341 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1999,17 +1999,14 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1999 if (pgdat->kcompactd_max_order < order) 1999 if (pgdat->kcompactd_max_order < order)
2000 pgdat->kcompactd_max_order = order; 2000 pgdat->kcompactd_max_order = order;
2001 2001
2002 /*
2003 * Pairs with implicit barrier in wait_event_freezable()
2004 * such that wakeups are not missed in the lockless
2005 * waitqueue_active() call.
2006 */
2007 smp_acquire__after_ctrl_dep();
2008
2009 if (pgdat->kcompactd_classzone_idx > classzone_idx) 2002 if (pgdat->kcompactd_classzone_idx > classzone_idx)
2010 pgdat->kcompactd_classzone_idx = classzone_idx; 2003 pgdat->kcompactd_classzone_idx = classzone_idx;
2011 2004
2012 if (!waitqueue_active(&pgdat->kcompactd_wait)) 2005 /*
2006 * Pairs with implicit barrier in wait_event_freezable()
2007 * such that wakeups are not missed.
2008 */
2009 if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2013 return; 2010 return;
2014 2011
2015 if (!kcompactd_node_suitable(pgdat)) 2012 if (!kcompactd_node_suitable(pgdat))
diff --git a/mm/filemap.c b/mm/filemap.c
index 870971e20967..594d73fef8b4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -620,6 +620,14 @@ int file_check_and_advance_wb_err(struct file *file)
620 trace_file_check_and_advance_wb_err(file, old); 620 trace_file_check_and_advance_wb_err(file, old);
621 spin_unlock(&file->f_lock); 621 spin_unlock(&file->f_lock);
622 } 622 }
623
624 /*
625 * We're mostly using this function as a drop in replacement for
626 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
627 * that the legacy code would have had on these flags.
628 */
629 clear_bit(AS_EIO, &mapping->flags);
630 clear_bit(AS_ENOSPC, &mapping->flags);
623 return err; 631 return err;
624} 632}
625EXPORT_SYMBOL(file_check_and_advance_wb_err); 633EXPORT_SYMBOL(file_check_and_advance_wb_err);
@@ -2926,9 +2934,15 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
2926 * we're writing. Either one is a pretty crazy thing to do, 2934 * we're writing. Either one is a pretty crazy thing to do,
2927 * so we don't support it 100%. If this invalidation 2935 * so we don't support it 100%. If this invalidation
2928 * fails, tough, the write still worked... 2936 * fails, tough, the write still worked...
2937 *
2938 * Most of the time we do not need this since dio_complete() will do
2939 * the invalidation for us. However there are some file systems that
2940 * do not end up with dio_complete() being called, so let's not break
2941 * them by removing it completely
2929 */ 2942 */
2930 invalidate_inode_pages2_range(mapping, 2943 if (mapping->nrpages)
2931 pos >> PAGE_SHIFT, end); 2944 invalidate_inode_pages2_range(mapping,
2945 pos >> PAGE_SHIFT, end);
2932 2946
2933 if (written > 0) { 2947 if (written > 0) {
2934 pos += written; 2948 pos += written;
diff --git a/mm/ksm.c b/mm/ksm.c
index 15dd7415f7b3..6cb60f46cce5 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item,
1990 */ 1990 */
1991static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 1991static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1992{ 1992{
1993 struct mm_struct *mm = rmap_item->mm;
1993 struct rmap_item *tree_rmap_item; 1994 struct rmap_item *tree_rmap_item;
1994 struct page *tree_page = NULL; 1995 struct page *tree_page = NULL;
1995 struct stable_node *stable_node; 1996 struct stable_node *stable_node;
@@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2062 if (ksm_use_zero_pages && (checksum == zero_checksum)) { 2063 if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2063 struct vm_area_struct *vma; 2064 struct vm_area_struct *vma;
2064 2065
2065 vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); 2066 down_read(&mm->mmap_sem);
2067 vma = find_mergeable_vma(mm, rmap_item->address);
2066 err = try_to_merge_one_page(vma, page, 2068 err = try_to_merge_one_page(vma, page,
2067 ZERO_PAGE(rmap_item->address)); 2069 ZERO_PAGE(rmap_item->address));
2070 up_read(&mm->mmap_sem);
2068 /* 2071 /*
2069 * In case of failure, the page was not really empty, so we 2072 * In case of failure, the page was not really empty, so we
2070 * need to continue. Otherwise we're done. 2073 * need to continue. Otherwise we're done.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7a40fa2be858..f141f0c80ff3 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -325,12 +325,12 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
325{ 325{
326 int size = memcg_nr_cache_ids; 326 int size = memcg_nr_cache_ids;
327 327
328 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL); 328 nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL);
329 if (!nlru->memcg_lrus) 329 if (!nlru->memcg_lrus)
330 return -ENOMEM; 330 return -ENOMEM;
331 331
332 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { 332 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
333 kfree(nlru->memcg_lrus); 333 kvfree(nlru->memcg_lrus);
334 return -ENOMEM; 334 return -ENOMEM;
335 } 335 }
336 336
@@ -340,7 +340,7 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
340static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) 340static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
341{ 341{
342 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); 342 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
343 kfree(nlru->memcg_lrus); 343 kvfree(nlru->memcg_lrus);
344} 344}
345 345
346static int memcg_update_list_lru_node(struct list_lru_node *nlru, 346static int memcg_update_list_lru_node(struct list_lru_node *nlru,
@@ -351,12 +351,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
351 BUG_ON(old_size > new_size); 351 BUG_ON(old_size > new_size);
352 352
353 old = nlru->memcg_lrus; 353 old = nlru->memcg_lrus;
354 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL); 354 new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL);
355 if (!new) 355 if (!new)
356 return -ENOMEM; 356 return -ENOMEM;
357 357
358 if (__memcg_init_list_lru_node(new, old_size, new_size)) { 358 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
359 kfree(new); 359 kvfree(new);
360 return -ENOMEM; 360 return -ENOMEM;
361 } 361 }
362 362
@@ -373,7 +373,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
373 nlru->memcg_lrus = new; 373 nlru->memcg_lrus = new;
374 spin_unlock_irq(&nlru->lock); 374 spin_unlock_irq(&nlru->lock);
375 375
376 kfree(old); 376 kvfree(old);
377 return 0; 377 return 0;
378} 378}
379 379
diff --git a/mm/madvise.c b/mm/madvise.c
index 21261ff0466f..fd70d6aabc3e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -625,18 +625,26 @@ static int madvise_inject_error(int behavior,
625{ 625{
626 struct page *page; 626 struct page *page;
627 struct zone *zone; 627 struct zone *zone;
628 unsigned int order;
628 629
629 if (!capable(CAP_SYS_ADMIN)) 630 if (!capable(CAP_SYS_ADMIN))
630 return -EPERM; 631 return -EPERM;
631 632
632 for (; start < end; start += PAGE_SIZE << 633
633 compound_order(compound_head(page))) { 634 for (; start < end; start += PAGE_SIZE << order) {
634 int ret; 635 int ret;
635 636
636 ret = get_user_pages_fast(start, 1, 0, &page); 637 ret = get_user_pages_fast(start, 1, 0, &page);
637 if (ret != 1) 638 if (ret != 1)
638 return ret; 639 return ret;
639 640
641 /*
642 * When soft offlining hugepages, after migrating the page
643 * we dissolve it, therefore in the second loop "page" will
644 * no longer be a compound page, and order will be 0.
645 */
646 order = compound_order(compound_head(page));
647
640 if (PageHWPoison(page)) { 648 if (PageHWPoison(page)) {
641 put_page(page); 649 put_page(page);
642 continue; 650 continue;
@@ -749,6 +757,9 @@ madvise_behavior_valid(int behavior)
749 * MADV_DONTFORK - omit this area from child's address space when forking: 757 * MADV_DONTFORK - omit this area from child's address space when forking:
750 * typically, to avoid COWing pages pinned by get_user_pages(). 758 * typically, to avoid COWing pages pinned by get_user_pages().
751 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 759 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
760 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
761 * range after a fork.
762 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
752 * MADV_HWPOISON - trigger memory error handler as if the given memory range 763 * MADV_HWPOISON - trigger memory error handler as if the given memory range
753 * were corrupted by unrecoverable hardware memory failure. 764 * were corrupted by unrecoverable hardware memory failure.
754 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 765 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
@@ -769,7 +780,9 @@ madvise_behavior_valid(int behavior)
769 * zero - success 780 * zero - success
770 * -EINVAL - start + len < 0, start is not page-aligned, 781 * -EINVAL - start + len < 0, start is not page-aligned,
771 * "behavior" is not a valid value, or application 782 * "behavior" is not a valid value, or application
772 * is attempting to release locked or shared pages. 783 * is attempting to release locked or shared pages,
784 * or the specified address range includes file, Huge TLB,
785 * MAP_SHARED or VMPFNMAP range.
773 * -ENOMEM - addresses in the specified range are not currently 786 * -ENOMEM - addresses in the specified range are not currently
774 * mapped, or are outside the AS of the process. 787 * mapped, or are outside the AS of the process.
775 * -EIO - an I/O error occurred while paging in data. 788 * -EIO - an I/O error occurred while paging in data.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 15af3da5af02..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy)
1777 struct memcg_stock_pcp *stock; 1777 struct memcg_stock_pcp *stock;
1778 unsigned long flags; 1778 unsigned long flags;
1779 1779
1780 /*
1781 * The only protection from memory hotplug vs. drain_stock races is
1782 * that we always operate on local CPU stock here with IRQ disabled
1783 */
1780 local_irq_save(flags); 1784 local_irq_save(flags);
1781 1785
1782 stock = this_cpu_ptr(&memcg_stock); 1786 stock = this_cpu_ptr(&memcg_stock);
@@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
1821 /* If someone's already draining, avoid adding running more workers. */ 1825 /* If someone's already draining, avoid adding running more workers. */
1822 if (!mutex_trylock(&percpu_charge_mutex)) 1826 if (!mutex_trylock(&percpu_charge_mutex))
1823 return; 1827 return;
1824 /* Notify other cpus that system-wide "drain" is running */ 1828 /*
1825 get_online_cpus(); 1829 * Notify other cpus that system-wide "drain" is running
1830 * We do not care about races with the cpu hotplug because cpu down
1831 * as well as workers from this path always operate on the local
1832 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1833 */
1826 curcpu = get_cpu(); 1834 curcpu = get_cpu();
1827 for_each_online_cpu(cpu) { 1835 for_each_online_cpu(cpu) {
1828 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 1836 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1829 struct mem_cgroup *memcg; 1837 struct mem_cgroup *memcg;
1830 1838
1831 memcg = stock->cached; 1839 memcg = stock->cached;
1832 if (!memcg || !stock->nr_pages) 1840 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
1833 continue; 1841 continue;
1834 if (!mem_cgroup_is_descendant(memcg, root_memcg)) 1842 if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1843 css_put(&memcg->css);
1835 continue; 1844 continue;
1845 }
1836 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { 1846 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1837 if (cpu == curcpu) 1847 if (cpu == curcpu)
1838 drain_local_stock(&stock->work); 1848 drain_local_stock(&stock->work);
1839 else 1849 else
1840 schedule_work_on(cpu, &stock->work); 1850 schedule_work_on(cpu, &stock->work);
1841 } 1851 }
1852 css_put(&memcg->css);
1842 } 1853 }
1843 put_cpu(); 1854 put_cpu();
1844 put_online_cpus();
1845 mutex_unlock(&percpu_charge_mutex); 1855 mutex_unlock(&percpu_charge_mutex);
1846} 1856}
1847 1857
@@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug)
5648static void uncharge_page(struct page *page, struct uncharge_gather *ug) 5658static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5649{ 5659{
5650 VM_BUG_ON_PAGE(PageLRU(page), page); 5660 VM_BUG_ON_PAGE(PageLRU(page), page);
5651 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page); 5661 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5662 !PageHWPoison(page) , page);
5652 5663
5653 if (!page->mem_cgroup) 5664 if (!page->mem_cgroup)
5654 return; 5665 return;
@@ -5817,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
5817 if (!mem_cgroup_sockets_enabled) 5828 if (!mem_cgroup_sockets_enabled)
5818 return; 5829 return;
5819 5830
5820 /*
5821 * Socket cloning can throw us here with sk_memcg already
5822 * filled. It won't however, necessarily happen from
5823 * process context. So the test for root memcg given
5824 * the current task's memcg won't help us in this case.
5825 *
5826 * Respecting the original socket's memcg is a better
5827 * decision in this case.
5828 */
5829 if (sk->sk_memcg) {
5830 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5831 css_get(&sk->sk_memcg->css);
5832 return;
5833 }
5834
5835 rcu_read_lock(); 5831 rcu_read_lock();
5836 memcg = mem_cgroup_from_task(current); 5832 memcg = mem_cgroup_from_task(current);
5837 if (memcg == root_mem_cgroup) 5833 if (memcg == root_mem_cgroup)
diff --git a/mm/memory.c b/mm/memory.c
index ec4e15494901..a728bed16c20 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -845,7 +845,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
845 * vm_normal_page() so that we do not have to special case all 845 * vm_normal_page() so that we do not have to special case all
846 * call site of vm_normal_page(). 846 * call site of vm_normal_page().
847 */ 847 */
848 if (likely(pfn < highest_memmap_pfn)) { 848 if (likely(pfn <= highest_memmap_pfn)) {
849 struct page *page = pfn_to_page(pfn); 849 struct page *page = pfn_to_page(pfn);
850 850
851 if (is_device_public_page(page)) { 851 if (is_device_public_page(page)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e882cb6da994..d4b5f29906b9 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -328,6 +328,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
328 if (err && (err != -EEXIST)) 328 if (err && (err != -EEXIST))
329 break; 329 break;
330 err = 0; 330 err = 0;
331 cond_resched();
331 } 332 }
332 vmemmap_populate_print_last(); 333 vmemmap_populate_print_last();
333out: 334out:
@@ -337,7 +338,7 @@ EXPORT_SYMBOL_GPL(__add_pages);
337 338
338#ifdef CONFIG_MEMORY_HOTREMOVE 339#ifdef CONFIG_MEMORY_HOTREMOVE
339/* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 340/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
340static int find_smallest_section_pfn(int nid, struct zone *zone, 341static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
341 unsigned long start_pfn, 342 unsigned long start_pfn,
342 unsigned long end_pfn) 343 unsigned long end_pfn)
343{ 344{
@@ -362,7 +363,7 @@ static int find_smallest_section_pfn(int nid, struct zone *zone,
362} 363}
363 364
364/* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 365/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
365static int find_biggest_section_pfn(int nid, struct zone *zone, 366static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
366 unsigned long start_pfn, 367 unsigned long start_pfn,
367 unsigned long end_pfn) 368 unsigned long end_pfn)
368{ 369{
@@ -550,7 +551,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms,
550 return ret; 551 return ret;
551 552
552 scn_nr = __section_nr(ms); 553 scn_nr = __section_nr(ms);
553 start_pfn = section_nr_to_pfn(scn_nr); 554 start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
554 __remove_zone(zone, start_pfn); 555 __remove_zone(zone, start_pfn);
555 556
556 sparse_remove_one_section(zone, ms, map_offset); 557 sparse_remove_one_section(zone, ms, map_offset);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 006ba625c0b8..a2af6d58a68f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1920 struct page *page; 1920 struct page *page;
1921 1921
1922 page = __alloc_pages(gfp, order, nid); 1922 page = __alloc_pages(gfp, order, nid);
1923 if (page && page_to_nid(page) == nid) 1923 if (page && page_to_nid(page) == nid) {
1924 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); 1924 preempt_disable();
1925 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1926 preempt_enable();
1927 }
1925 return page; 1928 return page;
1926} 1929}
1927 1930
diff --git a/mm/migrate.c b/mm/migrate.c
index 6954c1435833..e00814ca390e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start,
2146 unsigned long addr; 2146 unsigned long addr;
2147 2147
2148 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 2148 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2149 migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; 2149 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2150 migrate->dst[migrate->npages] = 0; 2150 migrate->dst[migrate->npages] = 0;
2151 migrate->npages++;
2151 migrate->cpages++; 2152 migrate->cpages++;
2152 } 2153 }
2153 2154
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 99736e026712..dee0f75c3013 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -40,6 +40,7 @@
40#include <linux/ratelimit.h> 40#include <linux/ratelimit.h>
41#include <linux/kthread.h> 41#include <linux/kthread.h>
42#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/mmu_notifier.h>
43 44
44#include <asm/tlb.h> 45#include <asm/tlb.h>
45#include "internal.h" 46#include "internal.h"
@@ -495,6 +496,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
495 } 496 }
496 497
497 /* 498 /*
499 * If the mm has notifiers then we would need to invalidate them around
500 * unmap_page_range and that is risky because notifiers can sleep and
501 * what they do is basically undeterministic. So let's have a short
502 * sleep to give the oom victim some more time.
503 * TODO: we really want to get rid of this ugly hack and make sure that
504 * notifiers cannot block for unbounded amount of time and add
505 * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
506 */
507 if (mm_has_notifiers(mm)) {
508 up_read(&mm->mmap_sem);
509 schedule_timeout_idle(HZ);
510 goto unlock_oom;
511 }
512
513 /*
498 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't 514 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
499 * work on the mm anymore. The check for MMF_OOM_SKIP must run 515 * work on the mm anymore. The check for MMF_OOM_SKIP must run
500 * under mmap_sem for reading because it serializes against the 516 * under mmap_sem for reading because it serializes against the
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c841af88836a..77e4d3c5c57b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1190,7 +1190,7 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
1190} 1190}
1191 1191
1192#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1192#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1193static void init_reserved_page(unsigned long pfn) 1193static void __meminit init_reserved_page(unsigned long pfn)
1194{ 1194{
1195 pg_data_t *pgdat; 1195 pg_data_t *pgdat;
1196 int nid, zid; 1196 int nid, zid;
@@ -5367,6 +5367,7 @@ not_early:
5367 5367
5368 __init_single_page(page, pfn, zone, nid); 5368 __init_single_page(page, pfn, zone, nid);
5369 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5369 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
5370 cond_resched();
5370 } else { 5371 } else {
5371 __init_single_pfn(pfn, zone, nid); 5372 __init_single_pfn(pfn, zone, nid);
5372 } 5373 }
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 6a03946469a9..53afbb919a1c 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -6,17 +6,6 @@
6 6
7#include "internal.h" 7#include "internal.h"
8 8
9static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
10{
11 pmd_t pmde;
12 /*
13 * Make sure we don't re-load pmd between present and !trans_huge check.
14 * We need a consistent view.
15 */
16 pmde = READ_ONCE(*pvmw->pmd);
17 return pmd_present(pmde) && !pmd_trans_huge(pmde);
18}
19
20static inline bool not_found(struct page_vma_mapped_walk *pvmw) 9static inline bool not_found(struct page_vma_mapped_walk *pvmw)
21{ 10{
22 page_vma_mapped_walk_done(pvmw); 11 page_vma_mapped_walk_done(pvmw);
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
116 pgd_t *pgd; 105 pgd_t *pgd;
117 p4d_t *p4d; 106 p4d_t *p4d;
118 pud_t *pud; 107 pud_t *pud;
108 pmd_t pmde;
119 109
120 /* The only possible pmd mapping has been handled on last iteration */ 110 /* The only possible pmd mapping has been handled on last iteration */
121 if (pvmw->pmd && !pvmw->pte) 111 if (pvmw->pmd && !pvmw->pte)
@@ -148,7 +138,13 @@ restart:
148 if (!pud_present(*pud)) 138 if (!pud_present(*pud))
149 return false; 139 return false;
150 pvmw->pmd = pmd_offset(pud, pvmw->address); 140 pvmw->pmd = pmd_offset(pud, pvmw->address);
151 if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { 141 /*
142 * Make sure the pmd value isn't cached in a register by the
143 * compiler and used as a stale value after we've observed a
144 * subsequent update.
145 */
146 pmde = READ_ONCE(*pvmw->pmd);
147 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
152 pvmw->ptl = pmd_lock(mm, pvmw->pmd); 148 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
153 if (likely(pmd_trans_huge(*pvmw->pmd))) { 149 if (likely(pmd_trans_huge(*pvmw->pmd))) {
154 if (pvmw->flags & PVMW_MIGRATION) 150 if (pvmw->flags & PVMW_MIGRATION)
@@ -167,17 +163,15 @@ restart:
167 return not_found(pvmw); 163 return not_found(pvmw);
168 return true; 164 return true;
169 } 165 }
170 } else 166 }
171 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
172 return not_found(pvmw); 167 return not_found(pvmw);
173 } else { 168 } else {
174 /* THP pmd was split under us: handle on pte level */ 169 /* THP pmd was split under us: handle on pte level */
175 spin_unlock(pvmw->ptl); 170 spin_unlock(pvmw->ptl);
176 pvmw->ptl = NULL; 171 pvmw->ptl = NULL;
177 } 172 }
178 } else { 173 } else if (!pmd_present(pmde)) {
179 if (!check_pmd(pvmw)) 174 return false;
180 return false;
181 } 175 }
182 if (!map_pte(pvmw)) 176 if (!map_pte(pvmw))
183 goto next_pte; 177 goto next_pte;
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
index 6142484e88f7..7a58460bfd27 100644
--- a/mm/percpu-stats.c
+++ b/mm/percpu-stats.c
@@ -73,7 +73,7 @@ static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
73 last_alloc + 1 : 0; 73 last_alloc + 1 : 0;
74 74
75 as_len = 0; 75 as_len = 0;
76 start = chunk->start_offset; 76 start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
77 77
78 /* 78 /*
79 * If a bit is set in the allocation map, the bound_map identifies 79 * If a bit is set in the allocation map, the bound_map identifies
diff --git a/mm/percpu.c b/mm/percpu.c
index 59d44d61f5f1..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -353,6 +353,8 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
353 block->contig_hint_start); 353 block->contig_hint_start);
354 return; 354 return;
355 } 355 }
356 /* reset to satisfy the second predicate above */
357 block_off = 0;
356 358
357 *bits = block->right_free; 359 *bits = block->right_free;
358 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 360 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
@@ -407,6 +409,8 @@ static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
407 *bit_off = pcpu_block_off_to_off(i, block->first_free); 409 *bit_off = pcpu_block_off_to_off(i, block->first_free);
408 return; 410 return;
409 } 411 }
412 /* reset to satisfy the second predicate above */
413 block_off = 0;
410 414
411 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 415 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
412 align); 416 align);
@@ -1325,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1325 * @gfp: allocation flags 1329 * @gfp: allocation flags
1326 * 1330 *
1327 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1331 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1328 * contain %GFP_KERNEL, the allocation is atomic. 1332 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1333 * then no warning will be triggered on invalid or failed allocation
1334 * requests.
1329 * 1335 *
1330 * RETURNS: 1336 * RETURNS:
1331 * Percpu pointer to the allocated area on success, NULL on failure. 1337 * Percpu pointer to the allocated area on success, NULL on failure.
@@ -1333,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1333static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1339static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1334 gfp_t gfp) 1340 gfp_t gfp)
1335{ 1341{
1342 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1343 bool do_warn = !(gfp & __GFP_NOWARN);
1336 static int warn_limit = 10; 1344 static int warn_limit = 10;
1337 struct pcpu_chunk *chunk; 1345 struct pcpu_chunk *chunk;
1338 const char *err; 1346 const char *err;
1339 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1340 int slot, off, cpu, ret; 1347 int slot, off, cpu, ret;
1341 unsigned long flags; 1348 unsigned long flags;
1342 void __percpu *ptr; 1349 void __percpu *ptr;
@@ -1357,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1357 1364
1358 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1365 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1359 !is_power_of_2(align))) { 1366 !is_power_of_2(align))) {
1360 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1367 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1361 size, align); 1368 size, align);
1362 return NULL; 1369 return NULL;
1363 } 1370 }
@@ -1478,7 +1485,7 @@ fail_unlock:
1478fail: 1485fail:
1479 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1486 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1480 1487
1481 if (!is_atomic && warn_limit) { 1488 if (!is_atomic && do_warn && warn_limit) {
1482 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1489 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1483 size, align, is_atomic, err); 1490 size, align, is_atomic, err);
1484 dump_stack(); 1491 dump_stack();
@@ -1503,7 +1510,9 @@ fail:
1503 * 1510 *
1504 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1511 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1505 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1512 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1506 * be called from any context but is a lot more likely to fail. 1513 * be called from any context but is a lot more likely to fail. If @gfp
1514 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1515 * allocation requests.
1507 * 1516 *
1508 * RETURNS: 1517 * RETURNS:
1509 * Percpu pointer to the allocated area on success, NULL on failure. 1518 * Percpu pointer to the allocated area on success, NULL on failure.
diff --git a/mm/rodata_test.c b/mm/rodata_test.c
index 6bb4deb12e78..d908c8769b48 100644
--- a/mm/rodata_test.c
+++ b/mm/rodata_test.c
@@ -14,7 +14,7 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/sections.h> 15#include <asm/sections.h>
16 16
17const int rodata_test_data = 0xC3; 17static const int rodata_test_data = 0xC3;
18 18
19void rodata_test(void) 19void rodata_test(void)
20{ 20{
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 904a83be82de..80164599ca5d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -165,9 +165,9 @@ static int init_memcg_params(struct kmem_cache *s,
165 if (!memcg_nr_cache_ids) 165 if (!memcg_nr_cache_ids)
166 return 0; 166 return 0;
167 167
168 arr = kzalloc(sizeof(struct memcg_cache_array) + 168 arr = kvzalloc(sizeof(struct memcg_cache_array) +
169 memcg_nr_cache_ids * sizeof(void *), 169 memcg_nr_cache_ids * sizeof(void *),
170 GFP_KERNEL); 170 GFP_KERNEL);
171 if (!arr) 171 if (!arr)
172 return -ENOMEM; 172 return -ENOMEM;
173 173
@@ -178,15 +178,23 @@ static int init_memcg_params(struct kmem_cache *s,
178static void destroy_memcg_params(struct kmem_cache *s) 178static void destroy_memcg_params(struct kmem_cache *s)
179{ 179{
180 if (is_root_cache(s)) 180 if (is_root_cache(s))
181 kfree(rcu_access_pointer(s->memcg_params.memcg_caches)); 181 kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
182}
183
184static void free_memcg_params(struct rcu_head *rcu)
185{
186 struct memcg_cache_array *old;
187
188 old = container_of(rcu, struct memcg_cache_array, rcu);
189 kvfree(old);
182} 190}
183 191
184static int update_memcg_params(struct kmem_cache *s, int new_array_size) 192static int update_memcg_params(struct kmem_cache *s, int new_array_size)
185{ 193{
186 struct memcg_cache_array *old, *new; 194 struct memcg_cache_array *old, *new;
187 195
188 new = kzalloc(sizeof(struct memcg_cache_array) + 196 new = kvzalloc(sizeof(struct memcg_cache_array) +
189 new_array_size * sizeof(void *), GFP_KERNEL); 197 new_array_size * sizeof(void *), GFP_KERNEL);
190 if (!new) 198 if (!new)
191 return -ENOMEM; 199 return -ENOMEM;
192 200
@@ -198,7 +206,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size)
198 206
199 rcu_assign_pointer(s->memcg_params.memcg_caches, new); 207 rcu_assign_pointer(s->memcg_params.memcg_caches, new);
200 if (old) 208 if (old)
201 kfree_rcu(old, rcu); 209 call_rcu(&old->rcu, free_memcg_params);
202 return 0; 210 return 0;
203} 211}
204 212
diff --git a/mm/swap.c b/mm/swap.c
index 9295ae960d66..a77d68f2c1b6 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
575 void *arg) 575 void *arg)
576{ 576{
577 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 577 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
578 !PageUnevictable(page)) { 578 !PageSwapCache(page) && !PageUnevictable(page)) {
579 bool active = PageActive(page); 579 bool active = PageActive(page);
580 580
581 del_page_from_lru_list(page, lruvec, 581 del_page_from_lru_list(page, lruvec,
@@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page)
665void mark_page_lazyfree(struct page *page) 665void mark_page_lazyfree(struct page *page)
666{ 666{
667 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 667 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
668 !PageUnevictable(page)) { 668 !PageSwapCache(page) && !PageUnevictable(page)) {
669 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); 669 struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);
670 670
671 get_page(page); 671 get_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 71ce2d1ccbf7..05b6803f0cce 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
39static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; 39static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
40bool swap_vma_readahead = true; 40bool swap_vma_readahead = true;
41 41
42#define SWAP_RA_MAX_ORDER_DEFAULT 3
43
44static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT;
45
46#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 42#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
47#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 43#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
48#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 44#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
@@ -242,6 +238,17 @@ int add_to_swap(struct page *page)
242 * clear SWAP_HAS_CACHE flag. 238 * clear SWAP_HAS_CACHE flag.
243 */ 239 */
244 goto fail; 240 goto fail;
241 /*
242 * Normally the page will be dirtied in unmap because its pte should be
243 * dirty. A special case is MADV_FREE page. The page'e pte could have
244 * dirty bit cleared but the page's SwapBacked bit is still set because
245 * clearing the dirty bit and SwapBacked bit has no lock protected. For
246 * such page, unmap will not set dirty bit for it, so page reclaim will
247 * not write the page out. This can cause data corruption when the page
248 * is swap in later. Always setting the dirty bit for the page solves
249 * the problem.
250 */
251 set_page_dirty(page);
245 252
246 return 1; 253 return 1;
247 254
@@ -653,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
653 pte_t *tpte; 660 pte_t *tpte;
654#endif 661#endif
655 662
663 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
664 SWAP_RA_ORDER_CEILING);
665 if (max_win == 1) {
666 swap_ra->win = 1;
667 return NULL;
668 }
669
656 faddr = vmf->address; 670 faddr = vmf->address;
657 entry = pte_to_swp_entry(vmf->orig_pte); 671 entry = pte_to_swp_entry(vmf->orig_pte);
658 if ((unlikely(non_swap_entry(entry)))) 672 if ((unlikely(non_swap_entry(entry))))
@@ -661,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
661 if (page) 675 if (page)
662 return page; 676 return page;
663 677
664 max_win = 1 << READ_ONCE(swap_ra_max_order);
665 if (max_win == 1) {
666 swap_ra->win = 1;
667 return NULL;
668 }
669
670 fpfn = PFN_DOWN(faddr); 678 fpfn = PFN_DOWN(faddr);
671 swap_ra_info = GET_SWAP_RA_VAL(vma); 679 swap_ra_info = GET_SWAP_RA_VAL(vma);
672 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); 680 pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
@@ -775,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr =
775 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 783 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
776 vma_ra_enabled_store); 784 vma_ra_enabled_store);
777 785
778static ssize_t vma_ra_max_order_show(struct kobject *kobj,
779 struct kobj_attribute *attr, char *buf)
780{
781 return sprintf(buf, "%d\n", swap_ra_max_order);
782}
783static ssize_t vma_ra_max_order_store(struct kobject *kobj,
784 struct kobj_attribute *attr,
785 const char *buf, size_t count)
786{
787 int err, v;
788
789 err = kstrtoint(buf, 10, &v);
790 if (err || v > SWAP_RA_ORDER_CEILING || v <= 0)
791 return -EINVAL;
792
793 swap_ra_max_order = v;
794
795 return count;
796}
797static struct kobj_attribute vma_ra_max_order_attr =
798 __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show,
799 vma_ra_max_order_store);
800
801static struct attribute *swap_attrs[] = { 786static struct attribute *swap_attrs[] = {
802 &vma_ra_enabled_attr.attr, 787 &vma_ra_enabled_attr.attr,
803 &vma_ra_max_order_attr.attr,
804 NULL, 788 NULL,
805}; 789};
806 790
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8a43db6284eb..673942094328 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1695 for (i = 0; i < area->nr_pages; i++) { 1695 for (i = 0; i < area->nr_pages; i++) {
1696 struct page *page; 1696 struct page *page;
1697 1697
1698 if (fatal_signal_pending(current)) {
1699 area->nr_pages = i;
1700 goto fail_no_warn;
1701 }
1702
1703 if (node == NUMA_NO_NODE) 1698 if (node == NUMA_NO_NODE)
1704 page = alloc_page(alloc_mask|highmem_mask); 1699 page = alloc_page(alloc_mask|highmem_mask);
1705 else 1700 else
@@ -1723,7 +1718,6 @@ fail:
1723 warn_alloc(gfp_mask, NULL, 1718 warn_alloc(gfp_mask, NULL,
1724 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1719 "vmalloc: allocation failure, allocated %ld of %ld bytes",
1725 (area->nr_pages*PAGE_SIZE), area->size); 1720 (area->nr_pages*PAGE_SIZE), area->size);
1726fail_no_warn:
1727 vfree(area->addr); 1721 vfree(area->addr);
1728 return NULL; 1722 return NULL;
1729} 1723}
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 486550df32be..b2ba2ba585f3 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
250 250
251 WARN_ON(!list_empty(&zhdr->buddy)); 251 WARN_ON(!list_empty(&zhdr->buddy));
252 set_bit(PAGE_STALE, &page->private); 252 set_bit(PAGE_STALE, &page->private);
253 clear_bit(NEEDS_COMPACTING, &page->private);
253 spin_lock(&pool->lock); 254 spin_lock(&pool->lock);
254 if (!list_empty(&page->lru)) 255 if (!list_empty(&page->lru))
255 list_del(&page->lru); 256 list_del(&page->lru);
@@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w)
303 list_del(&zhdr->buddy); 304 list_del(&zhdr->buddy);
304 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 305 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
305 continue; 306 continue;
306 clear_bit(NEEDS_COMPACTING, &page->private);
307 spin_unlock(&pool->stale_lock); 307 spin_unlock(&pool->stale_lock);
308 cancel_work_sync(&zhdr->work); 308 cancel_work_sync(&zhdr->work);
309 free_z3fold_page(page); 309 free_z3fold_page(page);
@@ -624,10 +624,8 @@ lookup:
624 * stale pages list. cancel_work_sync() can sleep so we must make 624 * stale pages list. cancel_work_sync() can sleep so we must make
625 * sure it won't be called in case we're in atomic context. 625 * sure it won't be called in case we're in atomic context.
626 */ 626 */
627 if (zhdr && (can_sleep || !work_pending(&zhdr->work) || 627 if (zhdr && (can_sleep || !work_pending(&zhdr->work))) {
628 !unlikely(work_busy(&zhdr->work)))) {
629 list_del(&zhdr->buddy); 628 list_del(&zhdr->buddy);
630 clear_bit(NEEDS_COMPACTING, &page->private);
631 spin_unlock(&pool->stale_lock); 629 spin_unlock(&pool->stale_lock);
632 if (can_sleep) 630 if (can_sleep)
633 cancel_work_sync(&zhdr->work); 631 cancel_work_sync(&zhdr->work);
@@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
875 goto next; 873 goto next;
876 } 874 }
877next: 875next:
876 spin_lock(&pool->lock);
878 if (test_bit(PAGE_HEADLESS, &page->private)) { 877 if (test_bit(PAGE_HEADLESS, &page->private)) {
879 if (ret == 0) { 878 if (ret == 0) {
879 spin_unlock(&pool->lock);
880 free_z3fold_page(page); 880 free_z3fold_page(page);
881 return 0; 881 return 0;
882 } 882 }
883 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { 883 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
884 atomic64_dec(&pool->pages_nr); 884 atomic64_dec(&pool->pages_nr);
885 spin_unlock(&pool->lock);
885 return 0; 886 return 0;
886 } 887 }
887 spin_lock(&pool->lock);
888 888
889 /* 889 /*
890 * Add to the beginning of LRU. 890 * Add to the beginning of LRU.
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e2ed69850489..0bc31de9071a 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp)
21 if (unlikely(!skb)) 21 if (unlikely(!skb))
22 return false; 22 return false;
23 23
24 if (unlikely(!(vlan_dev->flags & IFF_UP))) {
25 kfree_skb(skb);
26 *skbp = NULL;
27 return false;
28 }
29
24 skb->dev = vlan_dev; 30 skb->dev = vlan_dev;
25 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { 31 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
26 /* Our lower layer thinks this is not local, let's make sure. 32 /* Our lower layer thinks this is not local, let's make sure.
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index c18115d22f00..db82a40875e8 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -126,14 +126,4 @@ config BT_DEBUGFS
126 Provide extensive information about internal Bluetooth states 126 Provide extensive information about internal Bluetooth states
127 in debugfs. 127 in debugfs.
128 128
129config BT_LEGACY_IOCTL
130 bool "Enable legacy ioctl interfaces"
131 depends on BT && BT_BREDR
132 default y
133 help
134 Enable support for legacy ioctl interfaces. This is only needed
135 for old and deprecated applications using direct ioctl calls for
136 controller management. Since Linux 3.4 all configuration and
137 setup is done via mgmt interface and this is no longer needed.
138
139source "drivers/bluetooth/Kconfig" 129source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 0bad296fe0af..65d734c165bd 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -878,7 +878,6 @@ static int hci_sock_release(struct socket *sock)
878 return 0; 878 return 0;
879} 879}
880 880
881#ifdef CONFIG_BT_LEGACY_IOCTL
882static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg) 881static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
883{ 882{
884 bdaddr_t bdaddr; 883 bdaddr_t bdaddr;
@@ -1050,7 +1049,6 @@ done:
1050 release_sock(sk); 1049 release_sock(sk);
1051 return err; 1050 return err;
1052} 1051}
1053#endif
1054 1052
1055static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, 1053static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1056 int addr_len) 1054 int addr_len)
@@ -1971,11 +1969,7 @@ static const struct proto_ops hci_sock_ops = {
1971 .getname = hci_sock_getname, 1969 .getname = hci_sock_getname,
1972 .sendmsg = hci_sock_sendmsg, 1970 .sendmsg = hci_sock_sendmsg,
1973 .recvmsg = hci_sock_recvmsg, 1971 .recvmsg = hci_sock_recvmsg,
1974#ifdef CONFIG_BT_LEGACY_IOCTL
1975 .ioctl = hci_sock_ioctl, 1972 .ioctl = hci_sock_ioctl,
1976#else
1977 .ioctl = sock_no_ioctl,
1978#endif
1979 .poll = datagram_poll, 1973 .poll = datagram_poll,
1980 .listen = sock_no_listen, 1974 .listen = sock_no_listen,
1981 .shutdown = sock_no_shutdown, 1975 .shutdown = sock_no_shutdown,
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3bc890716c89..de2152730809 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
573 } 573 }
574 *vinfo_last = NULL; 574 *vinfo_last = NULL;
575 575
576 return 0; 576 return err;
577 } 577 }
578 578
579 return br_vlan_info(br, p, cmd, vinfo_curr); 579 return br_vlan_info(br, p, cmd, vinfo_curr);
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 2585b100ebbb..276b60262981 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb)
65 65
66static int __net_init broute_net_init(struct net *net) 66static int __net_init broute_net_init(struct net *net)
67{ 67{
68 net->xt.broute_table = ebt_register_table(net, &broute_table, NULL); 68 return ebt_register_table(net, &broute_table, NULL,
69 return PTR_ERR_OR_ZERO(net->xt.broute_table); 69 &net->xt.broute_table);
70} 70}
71 71
72static void __net_exit broute_net_exit(struct net *net) 72static void __net_exit broute_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 45a00dbdbcad..c41da5fac84f 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = {
93 93
94static int __net_init frame_filter_net_init(struct net *net) 94static int __net_init frame_filter_net_init(struct net *net)
95{ 95{
96 net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter); 96 return ebt_register_table(net, &frame_filter, ebt_ops_filter,
97 return PTR_ERR_OR_ZERO(net->xt.frame_filter); 97 &net->xt.frame_filter);
98} 98}
99 99
100static void __net_exit frame_filter_net_exit(struct net *net) 100static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 57cd5bb154e7..08df7406ecb3 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = {
93 93
94static int __net_init frame_nat_net_init(struct net *net) 94static int __net_init frame_nat_net_init(struct net *net)
95{ 95{
96 net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat); 96 return ebt_register_table(net, &frame_nat, ebt_ops_nat,
97 return PTR_ERR_OR_ZERO(net->xt.frame_nat); 97 &net->xt.frame_nat);
98} 98}
99 99
100static void __net_exit frame_nat_net_exit(struct net *net) 100static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 83951f978445..3b3dcf719e07 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table)
1169 kfree(table); 1169 kfree(table);
1170} 1170}
1171 1171
1172struct ebt_table * 1172int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1173ebt_register_table(struct net *net, const struct ebt_table *input_table, 1173 const struct nf_hook_ops *ops, struct ebt_table **res)
1174 const struct nf_hook_ops *ops)
1175{ 1174{
1176 struct ebt_table_info *newinfo; 1175 struct ebt_table_info *newinfo;
1177 struct ebt_table *t, *table; 1176 struct ebt_table *t, *table;
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
1183 repl->entries == NULL || repl->entries_size == 0 || 1182 repl->entries == NULL || repl->entries_size == 0 ||
1184 repl->counters != NULL || input_table->private != NULL) { 1183 repl->counters != NULL || input_table->private != NULL) {
1185 BUGPRINT("Bad table data for ebt_register_table!!!\n"); 1184 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1186 return ERR_PTR(-EINVAL); 1185 return -EINVAL;
1187 } 1186 }
1188 1187
1189 /* Don't add one table to multiple lists. */ 1188 /* Don't add one table to multiple lists. */
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table,
1252 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); 1251 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1253 mutex_unlock(&ebt_mutex); 1252 mutex_unlock(&ebt_mutex);
1254 1253
1254 WRITE_ONCE(*res, table);
1255
1255 if (!ops) 1256 if (!ops)
1256 return table; 1257 return 0;
1257 1258
1258 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); 1259 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1259 if (ret) { 1260 if (ret) {
1260 __ebt_unregister_table(net, table); 1261 __ebt_unregister_table(net, table);
1261 return ERR_PTR(ret); 1262 *res = NULL;
1262 } 1263 }
1263 1264
1264 return table; 1265 return ret;
1265free_unlock: 1266free_unlock:
1266 mutex_unlock(&ebt_mutex); 1267 mutex_unlock(&ebt_mutex);
1267free_chainstack: 1268free_chainstack:
@@ -1276,7 +1277,7 @@ free_newinfo:
1276free_table: 1277free_table:
1277 kfree(table); 1278 kfree(table);
1278out: 1279out:
1279 return ERR_PTR(ret); 1280 return ret;
1280} 1281}
1281 1282
1282void ebt_unregister_table(struct net *net, struct ebt_table *table, 1283void ebt_unregister_table(struct net *net, struct ebt_table *table,
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 88edac0f3e36..ecd5c703d11e 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78static struct kmem_cache *rcv_cache __read_mostly; 78static struct kmem_cache *rcv_cache __read_mostly;
79 79
80/* table of registered CAN protocols */ 80/* table of registered CAN protocols */
81static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 81static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
82static DEFINE_MUTEX(proto_tab_lock); 82static DEFINE_MUTEX(proto_tab_lock);
83 83
84static atomic_t skbcounter = ATOMIC_INIT(0); 84static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
788 788
789 mutex_lock(&proto_tab_lock); 789 mutex_lock(&proto_tab_lock);
790 790
791 if (proto_tab[proto]) { 791 if (rcu_access_pointer(proto_tab[proto])) {
792 pr_err("can: protocol %d already registered\n", proto); 792 pr_err("can: protocol %d already registered\n", proto);
793 err = -EBUSY; 793 err = -EBUSY;
794 } else 794 } else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
812 int proto = cp->protocol; 812 int proto = cp->protocol;
813 813
814 mutex_lock(&proto_tab_lock); 814 mutex_lock(&proto_tab_lock);
815 BUG_ON(proto_tab[proto] != cp); 815 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
816 RCU_INIT_POINTER(proto_tab[proto], NULL); 816 RCU_INIT_POINTER(proto_tab[proto], NULL);
817 mutex_unlock(&proto_tab_lock); 817 mutex_unlock(&proto_tab_lock);
818 818
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
875 spin_lock_init(&net->can.can_rcvlists_lock); 875 spin_lock_init(&net->can.can_rcvlists_lock);
876 net->can.can_rx_alldev_list = 876 net->can.can_rx_alldev_list =
877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
878 878 if (!net->can.can_rx_alldev_list)
879 goto out;
879 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); 880 net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
881 if (!net->can.can_stats)
882 goto out_free_alldev_list;
880 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); 883 net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
884 if (!net->can.can_pstats)
885 goto out_free_can_stats;
881 886
882 if (IS_ENABLED(CONFIG_PROC_FS)) { 887 if (IS_ENABLED(CONFIG_PROC_FS)) {
883 /* the statistics are updated every second (timer triggered) */ 888 /* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
892 } 897 }
893 898
894 return 0; 899 return 0;
900
901 out_free_can_stats:
902 kfree(net->can.can_stats);
903 out_free_alldev_list:
904 kfree(net->can.can_rx_alldev_list);
905 out:
906 return -ENOMEM;
895} 907}
896 908
897static void can_pernet_exit(struct net *net) 909static void can_pernet_exit(struct net *net)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 47a8748d953a..13690334efa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
1493static int bcm_release(struct socket *sock) 1493static int bcm_release(struct socket *sock)
1494{ 1494{
1495 struct sock *sk = sock->sk; 1495 struct sock *sk = sock->sk;
1496 struct net *net = sock_net(sk); 1496 struct net *net;
1497 struct bcm_sock *bo; 1497 struct bcm_sock *bo;
1498 struct bcm_op *op, *next; 1498 struct bcm_op *op, *next;
1499 1499
1500 if (sk == NULL) 1500 if (!sk)
1501 return 0; 1501 return 0;
1502 1502
1503 net = sock_net(sk);
1503 bo = bcm_sk(sk); 1504 bo = bcm_sk(sk);
1504 1505
1505 /* remove bcm_ops, timer, rx_unregister(), etc. */ 1506 /* remove bcm_ops, timer, rx_unregister(), etc. */
diff --git a/net/core/dev.c b/net/core/dev.c
index 588b473194a8..11596a302a26 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
1147 return ret; 1147 return ret;
1148} 1148}
1149 1149
1150static int dev_get_valid_name(struct net *net, 1150int dev_get_valid_name(struct net *net, struct net_device *dev,
1151 struct net_device *dev, 1151 const char *name)
1152 const char *name)
1153{ 1152{
1154 BUG_ON(!net); 1153 BUG_ON(!net);
1155 1154
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
1165 1164
1166 return 0; 1165 return 0;
1167} 1166}
1167EXPORT_SYMBOL(dev_get_valid_name);
1168 1168
1169/** 1169/**
1170 * dev_change_name - change name of a device 1170 * dev_change_name - change name of a device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 709a4e6fb447..f9c7a88cd981 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
303 case SIOCSIFTXQLEN: 303 case SIOCSIFTXQLEN:
304 if (ifr->ifr_qlen < 0) 304 if (ifr->ifr_qlen < 0)
305 return -EINVAL; 305 return -EINVAL;
306 dev->tx_queue_len = ifr->ifr_qlen; 306 if (dev->tx_queue_len ^ ifr->ifr_qlen) {
307 unsigned int orig_len = dev->tx_queue_len;
308
309 dev->tx_queue_len = ifr->ifr_qlen;
310 err = call_netdevice_notifiers(
311 NETDEV_CHANGE_TX_QUEUE_LEN, dev);
312 err = notifier_to_errno(err);
313 if (err) {
314 dev->tx_queue_len = orig_len;
315 return err;
316 }
317 }
307 return 0; 318 return 0;
308 319
309 case SIOCSIFNAME: 320 case SIOCSIFNAME:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3228411ada0f..9a9a3d77e327 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
436EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); 436EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
437 437
438/* return false if legacy contained non-0 deprecated fields 438/* return false if legacy contained non-0 deprecated fields
439 * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated 439 * maxtxpkt/maxrxpkt. rest of ksettings always updated
440 */ 440 */
441static bool 441static bool
442convert_legacy_settings_to_link_ksettings( 442convert_legacy_settings_to_link_ksettings(
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
451 * deprecated legacy fields, and they should not use 451 * deprecated legacy fields, and they should not use
452 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS 452 * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
453 */ 453 */
454 if (legacy_settings->transceiver || 454 if (legacy_settings->maxtxpkt ||
455 legacy_settings->maxtxpkt ||
456 legacy_settings->maxrxpkt) 455 legacy_settings->maxrxpkt)
457 retval = false; 456 retval = false;
458 457
diff --git a/net/core/filter.c b/net/core/filter.c
index 82edad58d066..6ae94f825f72 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
989 989
990bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 990bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
991{ 991{
992 bool ret = __sk_filter_charge(sk, fp); 992 if (!refcount_inc_not_zero(&fp->refcnt))
993 if (ret) 993 return false;
994 refcount_inc(&fp->refcnt); 994
995 return ret; 995 if (!__sk_filter_charge(sk, fp)) {
996 sk_filter_release(fp);
997 return false;
998 }
999 return true;
996} 1000}
997 1001
998static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) 1002static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
@@ -1835,31 +1839,32 @@ static const struct bpf_func_proto bpf_redirect_proto = {
1835 .arg2_type = ARG_ANYTHING, 1839 .arg2_type = ARG_ANYTHING,
1836}; 1840};
1837 1841
1838BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) 1842BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1843 struct bpf_map *, map, u32, key, u64, flags)
1839{ 1844{
1840 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1845 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1841 1846
1847 /* If user passes invalid input drop the packet. */
1842 if (unlikely(flags)) 1848 if (unlikely(flags))
1843 return SK_ABORTED; 1849 return SK_DROP;
1844 1850
1845 ri->ifindex = key; 1851 tcb->bpf.key = key;
1846 ri->flags = flags; 1852 tcb->bpf.flags = flags;
1847 ri->map = map; 1853 tcb->bpf.map = map;
1848 1854
1849 return SK_REDIRECT; 1855 return SK_PASS;
1850} 1856}
1851 1857
1852struct sock *do_sk_redirect_map(void) 1858struct sock *do_sk_redirect_map(struct sk_buff *skb)
1853{ 1859{
1854 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 1860 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
1855 struct sock *sk = NULL; 1861 struct sock *sk = NULL;
1856 1862
1857 if (ri->map) { 1863 if (tcb->bpf.map) {
1858 sk = __sock_map_lookup_elem(ri->map, ri->ifindex); 1864 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
1859 1865
1860 ri->ifindex = 0; 1866 tcb->bpf.key = 0;
1861 ri->map = NULL; 1867 tcb->bpf.map = NULL;
1862 /* we do not clear flags for future lookup */
1863 } 1868 }
1864 1869
1865 return sk; 1870 return sk;
@@ -1869,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1869 .func = bpf_sk_redirect_map, 1874 .func = bpf_sk_redirect_map,
1870 .gpl_only = false, 1875 .gpl_only = false,
1871 .ret_type = RET_INTEGER, 1876 .ret_type = RET_INTEGER,
1872 .arg1_type = ARG_CONST_MAP_PTR, 1877 .arg1_type = ARG_PTR_TO_CTX,
1873 .arg2_type = ARG_ANYTHING, 1878 .arg2_type = ARG_CONST_MAP_PTR,
1874 .arg3_type = ARG_ANYTHING, 1879 .arg3_type = ARG_ANYTHING,
1880 .arg4_type = ARG_ANYTHING,
1875}; 1881};
1876 1882
1877BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) 1883BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3679,7 +3685,6 @@ static bool sk_skb_is_valid_access(int off, int size,
3679{ 3685{
3680 if (type == BPF_WRITE) { 3686 if (type == BPF_WRITE) {
3681 switch (off) { 3687 switch (off) {
3682 case bpf_ctx_range(struct __sk_buff, mark):
3683 case bpf_ctx_range(struct __sk_buff, tc_index): 3688 case bpf_ctx_range(struct __sk_buff, tc_index):
3684 case bpf_ctx_range(struct __sk_buff, priority): 3689 case bpf_ctx_range(struct __sk_buff, priority):
3685 break; 3690 break;
@@ -3689,6 +3694,7 @@ static bool sk_skb_is_valid_access(int off, int size,
3689 } 3694 }
3690 3695
3691 switch (off) { 3696 switch (off) {
3697 case bpf_ctx_range(struct __sk_buff, mark):
3692 case bpf_ctx_range(struct __sk_buff, tc_classid): 3698 case bpf_ctx_range(struct __sk_buff, tc_classid):
3693 return false; 3699 return false;
3694 case bpf_ctx_range(struct __sk_buff, data): 3700 case bpf_ctx_range(struct __sk_buff, data):
@@ -4238,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
4238 return insn - insn_buf; 4244 return insn - insn_buf;
4239} 4245}
4240 4246
4247static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
4248 const struct bpf_insn *si,
4249 struct bpf_insn *insn_buf,
4250 struct bpf_prog *prog, u32 *target_size)
4251{
4252 struct bpf_insn *insn = insn_buf;
4253 int off;
4254
4255 switch (si->off) {
4256 case offsetof(struct __sk_buff, data_end):
4257 off = si->off;
4258 off -= offsetof(struct __sk_buff, data_end);
4259 off += offsetof(struct sk_buff, cb);
4260 off += offsetof(struct tcp_skb_cb, bpf.data_end);
4261 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4262 si->src_reg, off);
4263 break;
4264 default:
4265 return bpf_convert_ctx_access(type, si, insn_buf, prog,
4266 target_size);
4267 }
4268
4269 return insn - insn_buf;
4270}
4271
4241const struct bpf_verifier_ops sk_filter_prog_ops = { 4272const struct bpf_verifier_ops sk_filter_prog_ops = {
4242 .get_func_proto = sk_filter_func_proto, 4273 .get_func_proto = sk_filter_func_proto,
4243 .is_valid_access = sk_filter_is_valid_access, 4274 .is_valid_access = sk_filter_is_valid_access,
@@ -4296,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = {
4296const struct bpf_verifier_ops sk_skb_prog_ops = { 4327const struct bpf_verifier_ops sk_skb_prog_ops = {
4297 .get_func_proto = sk_skb_func_proto, 4328 .get_func_proto = sk_skb_func_proto,
4298 .is_valid_access = sk_skb_is_valid_access, 4329 .is_valid_access = sk_skb_is_valid_access,
4299 .convert_ctx_access = bpf_convert_ctx_access, 4330 .convert_ctx_access = sk_skb_convert_ctx_access,
4300 .gen_prologue = sk_skb_prologue, 4331 .gen_prologue = sk_skb_prologue,
4301}; 4332};
4302 4333
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a78fd61da0ec..5ace48926b19 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1483 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1483 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1484 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1484 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1485 [IFLA_NET_NS_FD] = { .type = NLA_U32 }, 1485 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1486 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1486 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1487 * allow 0-length string (needed to remove an alias).
1488 */
1489 [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
1487 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1490 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1488 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1491 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
1489 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1492 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
2093 dev->tx_queue_len = orig_len; 2096 dev->tx_queue_len = orig_len;
2094 goto errout; 2097 goto errout;
2095 } 2098 }
2096 status |= DO_SETLINK_NOTIFY; 2099 status |= DO_SETLINK_MODIFIED;
2097 } 2100 }
2098 } 2101 }
2099 2102
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
2248 2251
2249errout: 2252errout:
2250 if (status & DO_SETLINK_MODIFIED) { 2253 if (status & DO_SETLINK_MODIFIED) {
2251 if (status & DO_SETLINK_NOTIFY) 2254 if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
2252 netdev_state_change(dev); 2255 netdev_state_change(dev);
2253 2256
2254 if (err < 0) 2257 if (err < 0)
@@ -3854,6 +3857,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
3854 return -EMSGSIZE; 3857 return -EMSGSIZE;
3855 3858
3856 ifsm = nlmsg_data(nlh); 3859 ifsm = nlmsg_data(nlh);
3860 ifsm->family = PF_UNSPEC;
3861 ifsm->pad1 = 0;
3862 ifsm->pad2 = 0;
3857 ifsm->ifindex = dev->ifindex; 3863 ifsm->ifindex = dev->ifindex;
3858 ifsm->filter_mask = filter_mask; 3864 ifsm->filter_mask = filter_mask;
3859 3865
@@ -4276,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
4276 4282
4277 switch (event) { 4283 switch (event) {
4278 case NETDEV_REBOOT: 4284 case NETDEV_REBOOT:
4285 case NETDEV_CHANGEMTU:
4279 case NETDEV_CHANGEADDR: 4286 case NETDEV_CHANGEADDR:
4280 case NETDEV_CHANGENAME: 4287 case NETDEV_CHANGENAME:
4281 case NETDEV_FEAT_CHANGE: 4288 case NETDEV_FEAT_CHANGE:
4282 case NETDEV_BONDING_FAILOVER: 4289 case NETDEV_BONDING_FAILOVER:
4290 case NETDEV_POST_TYPE_CHANGE:
4283 case NETDEV_NOTIFY_PEERS: 4291 case NETDEV_NOTIFY_PEERS:
4292 case NETDEV_CHANGEUPPER:
4284 case NETDEV_RESEND_IGMP: 4293 case NETDEV_RESEND_IGMP:
4285 case NETDEV_CHANGEINFODATA: 4294 case NETDEV_CHANGEINFODATA:
4295 case NETDEV_CHANGE_TX_QUEUE_LEN:
4286 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), 4296 rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
4287 GFP_KERNEL); 4297 GFP_KERNEL);
4288 break; 4298 break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 16982de649b9..24656076906d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1124 1124
1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1125 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1126 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1127 struct sock *save_sk = skb->sk;
1128
1127 /* Streams do not free skb on error. Reset to prev state. */ 1129 /* Streams do not free skb on error. Reset to prev state. */
1128 msg->msg_iter = orig_iter; 1130 msg->msg_iter = orig_iter;
1131 skb->sk = sk;
1129 ___pskb_trim(skb, orig_len); 1132 ___pskb_trim(skb, orig_len);
1133 skb->sk = save_sk;
1130 return err; 1134 return err;
1131 } 1135 }
1132 1136
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1896 } 1900 }
1897 1901
1898 /* If we need update frag list, we are in troubles. 1902 /* If we need update frag list, we are in troubles.
1899 * Certainly, it possible to add an offset to skb data, 1903 * Certainly, it is possible to add an offset to skb data,
1900 * but taking into account that pulling is expected to 1904 * but taking into account that pulling is expected to
1901 * be very rare operation, it is worth to fight against 1905 * be very rare operation, it is worth to fight against
1902 * further bloating skb head and crucify ourselves here instead. 1906 * further bloating skb head and crucify ourselves here instead.
diff --git a/net/core/sock.c b/net/core/sock.c
index 9b7b6bbb2a23..415f441c63b9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1654 1654
1655 sock_copy(newsk, sk); 1655 sock_copy(newsk, sk);
1656 1656
1657 newsk->sk_prot_creator = sk->sk_prot;
1658
1657 /* SANITY */ 1659 /* SANITY */
1658 if (likely(newsk->sk_net_refcnt)) 1660 if (likely(newsk->sk_net_refcnt))
1659 get_net(sock_net(newsk)); 1661 get_net(sock_net(newsk));
@@ -1675,20 +1677,28 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1675 newsk->sk_dst_pending_confirm = 0; 1677 newsk->sk_dst_pending_confirm = 0;
1676 newsk->sk_wmem_queued = 0; 1678 newsk->sk_wmem_queued = 0;
1677 newsk->sk_forward_alloc = 0; 1679 newsk->sk_forward_alloc = 0;
1680
1681 /* sk->sk_memcg will be populated at accept() time */
1682 newsk->sk_memcg = NULL;
1683
1678 atomic_set(&newsk->sk_drops, 0); 1684 atomic_set(&newsk->sk_drops, 0);
1679 newsk->sk_send_head = NULL; 1685 newsk->sk_send_head = NULL;
1680 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1686 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1681 atomic_set(&newsk->sk_zckey, 0); 1687 atomic_set(&newsk->sk_zckey, 0);
1682 1688
1683 sock_reset_flag(newsk, SOCK_DONE); 1689 sock_reset_flag(newsk, SOCK_DONE);
1690 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1684 1691
1685 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1692 rcu_read_lock();
1693 filter = rcu_dereference(sk->sk_filter);
1686 if (filter != NULL) 1694 if (filter != NULL)
1687 /* though it's an empty new sock, the charging may fail 1695 /* though it's an empty new sock, the charging may fail
1688 * if sysctl_optmem_max was changed between creation of 1696 * if sysctl_optmem_max was changed between creation of
1689 * original socket and cloning 1697 * original socket and cloning
1690 */ 1698 */
1691 is_charged = sk_filter_charge(newsk, filter); 1699 is_charged = sk_filter_charge(newsk, filter);
1700 RCU_INIT_POINTER(newsk->sk_filter, filter);
1701 rcu_read_unlock();
1692 1702
1693 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1703 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1694 /* We need to make sure that we don't uncharge the new 1704 /* We need to make sure that we don't uncharge the new
@@ -1709,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1709 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1719 newsk->sk_incoming_cpu = raw_smp_processor_id();
1710 atomic64_set(&newsk->sk_cookie, 0); 1720 atomic64_set(&newsk->sk_cookie, 0);
1711 1721
1712 mem_cgroup_sk_alloc(newsk);
1713 cgroup_sk_alloc(&newsk->sk_cgrp_data);
1714
1715 /* 1722 /*
1716 * Before updating sk_refcnt, we must commit prior changes to memory 1723 * Before updating sk_refcnt, we must commit prior changes to memory
1717 * (Documentation/RCU/rculist_nulls.txt for details) 1724 * (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
36 * soft irq of receive path or setsockopt from process context 36 * soft irq of receive path or setsockopt from process context
37 */ 37 */
38 spin_lock_bh(&reuseport_lock); 38 spin_lock_bh(&reuseport_lock);
39 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, 39
40 lockdep_is_held(&reuseport_lock)), 40 /* Allocation attempts can occur concurrently via the setsockopt path
41 "multiple allocations for the same socket"); 41 * and the bind/hash path. Nothing to do when we lose the race.
42 */
43 if (rcu_dereference_protected(sk->sk_reuseport_cb,
44 lockdep_is_held(&reuseport_lock)))
45 goto out;
46
42 reuse = __reuseport_alloc(INIT_SOCKS); 47 reuse = __reuseport_alloc(INIT_SOCKS);
43 if (!reuse) { 48 if (!reuse) {
44 spin_unlock_bh(&reuseport_lock); 49 spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
49 reuse->num_socks = 1; 54 reuse->num_socks = 1;
50 rcu_assign_pointer(sk->sk_reuseport_cb, reuse); 55 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
51 56
57out:
52 spin_unlock_bh(&reuseport_lock); 58 spin_unlock_bh(&reuseport_lock);
53 59
54 return 0; 60 return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 001c08696334..e65fcb45c3f6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
414 sk_daddr_set(newsk, ireq->ir_rmt_addr); 414 sk_daddr_set(newsk, ireq->ir_rmt_addr);
415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 415 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
416 newinet->inet_saddr = ireq->ir_loc_addr; 416 newinet->inet_saddr = ireq->ir_loc_addr;
417 newinet->inet_opt = ireq->opt; 417 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
418 ireq->opt = NULL;
419 newinet->mc_index = inet_iif(skb); 418 newinet->mc_index = inet_iif(skb);
420 newinet->mc_ttl = ip_hdr(skb)->ttl; 419 newinet->mc_ttl = ip_hdr(skb)->ttl;
421 newinet->inet_id = jiffies; 420 newinet->inet_id = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
430 if (__inet_inherit_port(sk, newsk) < 0) 429 if (__inet_inherit_port(sk, newsk) < 0)
431 goto put_and_exit; 430 goto put_and_exit;
432 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 431 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
433 432 if (*own_req)
433 ireq->ireq_opt = NULL;
434 else
435 newinet->inet_opt = NULL;
434 return newsk; 436 return newsk;
435 437
436exit_overflow: 438exit_overflow:
@@ -441,6 +443,7 @@ exit:
441 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 443 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
442 return NULL; 444 return NULL;
443put_and_exit: 445put_and_exit:
446 newinet->inet_opt = NULL;
444 inet_csk_prepare_forced_close(newsk); 447 inet_csk_prepare_forced_close(newsk);
445 dccp_done(newsk); 448 dccp_done(newsk);
446 goto exit; 449 goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
492 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
493 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
494 ireq->ir_rmt_addr, 497 ireq->ir_rmt_addr,
495 ireq->opt); 498 ireq_opt_deref(ireq));
496 err = net_xmit_eval(err); 499 err = net_xmit_eval(err);
497 } 500 }
498 501
@@ -548,7 +551,7 @@ out:
548static void dccp_v4_reqsk_destructor(struct request_sock *req) 551static void dccp_v4_reqsk_destructor(struct request_sock *req)
549{ 552{
550 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 553 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
551 kfree(inet_rsk(req)->opt); 554 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
552} 555}
553 556
554void dccp_syn_ack_timeout(const struct request_sock *req) 557void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412c7b27..e1d4d898a007 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
224static void dns_resolver_describe(const struct key *key, struct seq_file *m) 224static void dns_resolver_describe(const struct key *key, struct seq_file *m)
225{ 225{
226 seq_puts(m, key->description); 226 seq_puts(m, key->description);
227 if (key_is_instantiated(key)) { 227 if (key_is_positive(key)) {
228 int err = PTR_ERR(key->payload.data[dns_key_error]); 228 int err = PTR_ERR(key->payload.data[dns_key_error]);
229 229
230 if (err) 230 if (err)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 873af0108e24..045d8a176279 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
496 if (!ethernet) 496 if (!ethernet)
497 return -EINVAL; 497 return -EINVAL;
498 ethernet_dev = of_find_net_device_by_node(ethernet); 498 ethernet_dev = of_find_net_device_by_node(ethernet);
499 if (!ethernet_dev)
500 return -EPROBE_DEFER;
499 } else { 501 } else {
500 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); 502 ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
503 if (!ethernet_dev)
504 return -EPROBE_DEFER;
501 dev_put(ethernet_dev); 505 dev_put(ethernet_dev);
502 } 506 }
503 507
504 if (!ethernet_dev)
505 return -EPROBE_DEFER;
506
507 if (!dst->cpu_dp) { 508 if (!dst->cpu_dp) {
508 dst->cpu_dp = port; 509 dst->cpu_dp = port;
509 dst->cpu_dp->netdev = ethernet_dev; 510 dst->cpu_dp->netdev = ethernet_dev;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2afa99506f8b..865e29e62bad 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
1301 p->old_duplex = -1; 1301 p->old_duplex = -1;
1302 1302
1303 port->netdev = slave_dev; 1303 port->netdev = slave_dev;
1304 ret = register_netdev(slave_dev);
1305 if (ret) {
1306 netdev_err(master, "error %d registering interface %s\n",
1307 ret, slave_dev->name);
1308 port->netdev = NULL;
1309 free_percpu(p->stats64);
1310 free_netdev(slave_dev);
1311 return ret;
1312 }
1313 1304
1314 netif_carrier_off(slave_dev); 1305 netif_carrier_off(slave_dev);
1315 1306
1316 ret = dsa_slave_phy_setup(p, slave_dev); 1307 ret = dsa_slave_phy_setup(p, slave_dev);
1317 if (ret) { 1308 if (ret) {
1318 netdev_err(master, "error %d setting up slave phy\n", ret); 1309 netdev_err(master, "error %d setting up slave phy\n", ret);
1319 unregister_netdev(slave_dev); 1310 goto out_free;
1320 free_percpu(p->stats64); 1311 }
1321 free_netdev(slave_dev); 1312
1322 return ret; 1313 ret = register_netdev(slave_dev);
1314 if (ret) {
1315 netdev_err(master, "error %d registering interface %s\n",
1316 ret, slave_dev->name);
1317 goto out_phy;
1323 } 1318 }
1324 1319
1325 return 0; 1320 return 0;
1321
1322out_phy:
1323 phy_disconnect(p->phy);
1324 if (of_phy_is_fixed_link(p->dp->dn))
1325 of_phy_deregister_fixed_link(p->dp->dn);
1326out_free:
1327 free_percpu(p->stats64);
1328 free_netdev(slave_dev);
1329 port->netdev = NULL;
1330 return ret;
1326} 1331}
1327 1332
1328void dsa_slave_destroy(struct net_device *slave_dev) 1333void dsa_slave_destroy(struct net_device *slave_dev)
diff --git a/net/ife/ife.c b/net/ife/ife.c
index f360341c72eb..7d1ec76e7f43 100644
--- a/net/ife/ife.c
+++ b/net/ife/ife.c
@@ -137,6 +137,6 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
137EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); 137EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
138 138
139MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>"); 139MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>");
140MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 140MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
141MODULE_DESCRIPTION("Inter-FE LFB action"); 141MODULE_DESCRIPTION("Inter-FE LFB action");
142MODULE_LICENSE("GPL"); 142MODULE_LICENSE("GPL");
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 91a2557942fa..f48fe6fc7e8c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
70 address into account. Furthermore, the TOS (Type-Of-Service) field 70 address into account. Furthermore, the TOS (Type-Of-Service) field
71 of the packet can be used for routing decisions as well. 71 of the packet can be used for routing decisions as well.
72 72
73 If you are interested in this, please see the preliminary 73 If you need more information, see the Linux Advanced
74 documentation at <http://www.compendium.com.ar/policy-routing.txt> 74 Routing and Traffic Control documentation at
75 and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>. 75 <http://lartc.org/howto/lartc.rpdb.html>
76 You will need supporting software from
77 <ftp://ftp.tux.org/pub/net/ip-routing/>.
78 76
79 If unsure, say N. 77 If unsure, say N.
80 78
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2ae8f54cb321..82178cc69c96 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1951 buf = NULL; 1951 buf = NULL;
1952 1952
1953 req_inet = inet_rsk(req); 1953 req_inet = inet_rsk(req);
1954 opt = xchg(&req_inet->opt, opt); 1954 opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
1955 if (opt) 1955 if (opt)
1956 kfree_rcu(opt, rcu); 1956 kfree_rcu(opt, rcu);
1957 1957
@@ -1973,11 +1973,13 @@ req_setattr_failure:
1973 * values on failure. 1973 * values on failure.
1974 * 1974 *
1975 */ 1975 */
1976static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) 1976static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
1977{ 1977{
1978 struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
1978 int hdr_delta = 0; 1979 int hdr_delta = 0;
1979 struct ip_options_rcu *opt = *opt_ptr;
1980 1980
1981 if (!opt || opt->opt.cipso == 0)
1982 return 0;
1981 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { 1983 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
1982 u8 cipso_len; 1984 u8 cipso_len;
1983 u8 cipso_off; 1985 u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2039 */ 2041 */
2040void cipso_v4_sock_delattr(struct sock *sk) 2042void cipso_v4_sock_delattr(struct sock *sk)
2041{ 2043{
2042 int hdr_delta;
2043 struct ip_options_rcu *opt;
2044 struct inet_sock *sk_inet; 2044 struct inet_sock *sk_inet;
2045 int hdr_delta;
2045 2046
2046 sk_inet = inet_sk(sk); 2047 sk_inet = inet_sk(sk);
2047 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2048 if (!opt || opt->opt.cipso == 0)
2049 return;
2050 2048
2051 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); 2049 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2052 if (sk_inet->is_icsk && hdr_delta > 0) { 2050 if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
2066 */ 2064 */
2067void cipso_v4_req_delattr(struct request_sock *req) 2065void cipso_v4_req_delattr(struct request_sock *req)
2068{ 2066{
2069 struct ip_options_rcu *opt; 2067 cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
2070 struct inet_request_sock *req_inet;
2071
2072 req_inet = inet_rsk(req);
2073 opt = req_inet->opt;
2074 if (!opt || opt->opt.cipso == 0)
2075 return;
2076
2077 cipso_v4_delopt(&req_inet->opt);
2078} 2068}
2079 2069
2080/** 2070/**
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 416bb304a281..1859c473b21a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
86 greh = (struct gre_base_hdr *)skb_transport_header(skb); 86 greh = (struct gre_base_hdr *)skb_transport_header(skb);
87 pcsum = (__sum16 *)(greh + 1); 87 pcsum = (__sum16 *)(greh + 1);
88 88
89 if (gso_partial) { 89 if (gso_partial && skb_is_gso(skb)) {
90 unsigned int partial_adj; 90 unsigned int partial_adj;
91 91
92 /* Adjust checksum to account for the fact that 92 /* Adjust checksum to account for the fact that
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c039c937ba90..b47a59cb3573 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
475 } 475 }
476 spin_unlock_bh(&queue->fastopenq.lock); 476 spin_unlock_bh(&queue->fastopenq.lock);
477 } 477 }
478 mem_cgroup_sk_alloc(newsk);
478out: 479out:
479 release_sock(sk); 480 release_sock(sk);
480 if (req) 481 if (req)
@@ -539,9 +540,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
539{ 540{
540 const struct inet_request_sock *ireq = inet_rsk(req); 541 const struct inet_request_sock *ireq = inet_rsk(req);
541 struct net *net = read_pnet(&ireq->ireq_net); 542 struct net *net = read_pnet(&ireq->ireq_net);
542 struct ip_options_rcu *opt = ireq->opt; 543 struct ip_options_rcu *opt;
543 struct rtable *rt; 544 struct rtable *rt;
544 545
546 opt = ireq_opt_deref(ireq);
547
545 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 548 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
546 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 549 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
547 sk->sk_protocol, inet_sk_flowi_flags(sk), 550 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -575,10 +578,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
575 struct flowi4 *fl4; 578 struct flowi4 *fl4;
576 struct rtable *rt; 579 struct rtable *rt;
577 580
581 opt = rcu_dereference(ireq->ireq_opt);
578 fl4 = &newinet->cork.fl.u.ip4; 582 fl4 = &newinet->cork.fl.u.ip4;
579 583
580 rcu_read_lock();
581 opt = rcu_dereference(newinet->inet_opt);
582 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 584 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
583 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 585 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
584 sk->sk_protocol, inet_sk_flowi_flags(sk), 586 sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -591,13 +593,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
591 goto no_route; 593 goto no_route;
592 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 594 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
593 goto route_err; 595 goto route_err;
594 rcu_read_unlock();
595 return &rt->dst; 596 return &rt->dst;
596 597
597route_err: 598route_err:
598 ip_rt_put(rt); 599 ip_rt_put(rt);
599no_route: 600no_route:
600 rcu_read_unlock();
601 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 601 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
602 return NULL; 602 return NULL;
603} 603}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 597bb4cfe805..e7d15fb0d94d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
456 return reuseport_add_sock(sk, sk2); 456 return reuseport_add_sock(sk, sk2);
457 } 457 }
458 458
459 /* Initial allocation may have already happened via setsockopt */ 459 return reuseport_alloc(sk);
460 if (!rcu_access_pointer(sk->sk_reuseport_cb))
461 return reuseport_alloc(sk);
462 return 0;
463} 460}
464 461
465int __inet_hash(struct sock *sk, struct sock *osk) 462int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e7eb590c86ce..b20c8ac64081 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
128 break; 128 break;
129 } 129 }
130 if (cmp == -1) 130 if (cmp == -1)
131 pp = &(*pp)->rb_left; 131 pp = &next->rb_left;
132 else 132 else
133 pp = &(*pp)->rb_right; 133 pp = &next->rb_right;
134 } 134 }
135 *parent_p = parent; 135 *parent_p = parent;
136 *pp_p = pp; 136 *pp_p = pp;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 0162fb955b33..467e44d7587d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
259 struct ip_tunnel *tunnel; 259 struct ip_tunnel *tunnel;
260 struct erspanhdr *ershdr; 260 struct erspanhdr *ershdr;
261 const struct iphdr *iph; 261 const struct iphdr *iph;
262 __be32 session_id;
263 __be32 index; 262 __be32 index;
264 int len; 263 int len;
265 264
@@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
275 /* The original GRE header does not have key field, 274 /* The original GRE header does not have key field,
276 * Use ERSPAN 10-bit session ID as key. 275 * Use ERSPAN 10-bit session ID as key.
277 */ 276 */
278 session_id = cpu_to_be32(ntohs(ershdr->session_id)); 277 tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
279 tpi->key = session_id;
280 index = ershdr->md.index; 278 index = ershdr->md.index;
281 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, 279 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
282 tpi->flags | TUNNEL_KEY, 280 tpi->flags | TUNNEL_KEY,
@@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
733 if (skb_cow_head(skb, dev->needed_headroom)) 731 if (skb_cow_head(skb, dev->needed_headroom))
734 goto free_skb; 732 goto free_skb;
735 733
736 if (skb->len > dev->mtu) { 734 if (skb->len - dev->hard_header_len > dev->mtu) {
737 pskb_trim(skb, dev->mtu); 735 pskb_trim(skb, dev->mtu);
738 truncate = true; 736 truncate = true;
739 } 737 }
@@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev)
1223{ 1221{
1224 __gre_tunnel_init(dev); 1222 __gre_tunnel_init(dev);
1225 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1223 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1224 netif_keep_dst(dev);
1226 1225
1227 return ip_tunnel_init(dev); 1226 return ip_tunnel_init(dev);
1228} 1227}
@@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev)
1246 1245
1247 tunnel->tun_hlen = 8; 1246 tunnel->tun_hlen = 8;
1248 tunnel->parms.iph.protocol = IPPROTO_GRE; 1247 tunnel->parms.iph.protocol = IPPROTO_GRE;
1249 t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr); 1248 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1249 sizeof(struct erspanhdr);
1250 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1250 1251
1251 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; 1252 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
1252 dev->mtu = ETH_DATA_LEN - t_hlen - 4; 1253 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
1253 dev->features |= GRE_FEATURES; 1254 dev->features |= GRE_FEATURES;
1254 dev->hw_features |= GRE_FEATURES; 1255 dev->hw_features |= GRE_FEATURES;
1255 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1256 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1257 netif_keep_dst(dev);
1256 1258
1257 return ip_tunnel_init(dev); 1259 return ip_tunnel_init(dev);
1258} 1260}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fa2dc8f692c6..57fc13c6ab2b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -311,9 +311,10 @@ drop:
311static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 311static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
312{ 312{
313 const struct iphdr *iph = ip_hdr(skb); 313 const struct iphdr *iph = ip_hdr(skb);
314 struct rtable *rt; 314 int (*edemux)(struct sk_buff *skb);
315 struct net_device *dev = skb->dev; 315 struct net_device *dev = skb->dev;
316 void (*edemux)(struct sk_buff *skb); 316 struct rtable *rt;
317 int err;
317 318
318 /* if ingress device is enslaved to an L3 master device pass the 319 /* if ingress device is enslaved to an L3 master device pass the
319 * skb to its handler for processing 320 * skb to its handler for processing
@@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
331 332
332 ipprot = rcu_dereference(inet_protos[protocol]); 333 ipprot = rcu_dereference(inet_protos[protocol]);
333 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { 334 if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) {
334 edemux(skb); 335 err = edemux(skb);
336 if (unlikely(err))
337 goto drop_error;
335 /* must reload iph, skb->head might have changed */ 338 /* must reload iph, skb->head might have changed */
336 iph = ip_hdr(skb); 339 iph = ip_hdr(skb);
337 } 340 }
@@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
342 * how the packet travels inside Linux networking. 345 * how the packet travels inside Linux networking.
343 */ 346 */
344 if (!skb_valid_dst(skb)) { 347 if (!skb_valid_dst(skb)) {
345 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 348 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
346 iph->tos, dev); 349 iph->tos, dev);
347 if (unlikely(err)) { 350 if (unlikely(err))
348 if (err == -EXDEV) 351 goto drop_error;
349 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
350 goto drop;
351 }
352 } 352 }
353 353
354#ifdef CONFIG_IP_ROUTE_CLASSID 354#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
399drop: 399drop:
400 kfree_skb(skb); 400 kfree_skb(skb);
401 return NET_RX_DROP; 401 return NET_RX_DROP;
402
403drop_error:
404 if (err == -EXDEV)
405 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
406 goto drop;
402} 407}
403 408
404/* 409/*
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 5ed63d250950..89453cf62158 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
168 struct ip_tunnel_parm *parms = &tunnel->parms; 168 struct ip_tunnel_parm *parms = &tunnel->parms;
169 struct dst_entry *dst = skb_dst(skb); 169 struct dst_entry *dst = skb_dst(skb);
170 struct net_device *tdev; /* Device to other host */ 170 struct net_device *tdev; /* Device to other host */
171 int pkt_len = skb->len;
171 int err; 172 int err;
172 int mtu; 173 int mtu;
173 174
@@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
229 230
230 err = dst_output(tunnel->net, skb->sk, skb); 231 err = dst_output(tunnel->net, skb->sk, skb);
231 if (net_xmit_eval(err) == 0) 232 if (net_xmit_eval(err) == 0)
232 err = skb->len; 233 err = pkt_len;
233 iptunnel_xmit_stats(dev, err); 234 iptunnel_xmit_stats(dev, err);
234 return NETDEV_TX_OK; 235 return NETDEV_TX_OK;
235 236
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index fb1ad22b5e29..cdd627355ed1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
128 128
129static int ipip_err(struct sk_buff *skb, u32 info) 129static int ipip_err(struct sk_buff *skb, u32 info)
130{ 130{
131 131 /* All the routers (except for Linux) return only
132/* All the routers (except for Linux) return only 132 * 8 bytes of packet payload. It means, that precise relaying of
133 8 bytes of packet payload. It means, that precise relaying of 133 * ICMP in the real Internet is absolutely infeasible.
134 ICMP in the real Internet is absolutely infeasible. 134 */
135 */
136 struct net *net = dev_net(skb->dev); 135 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); 136 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
138 const struct iphdr *iph = (const struct iphdr *)skb->data; 137 const struct iphdr *iph = (const struct iphdr *)skb->data;
139 struct ip_tunnel *t;
140 int err;
141 const int type = icmp_hdr(skb)->type; 138 const int type = icmp_hdr(skb)->type;
142 const int code = icmp_hdr(skb)->code; 139 const int code = icmp_hdr(skb)->code;
140 struct ip_tunnel *t;
141 int err = 0;
142
143 switch (type) {
144 case ICMP_DEST_UNREACH:
145 switch (code) {
146 case ICMP_SR_FAILED:
147 /* Impossible event. */
148 goto out;
149 default:
150 /* All others are translated to HOST_UNREACH.
151 * rfc2003 contains "deep thoughts" about NET_UNREACH,
152 * I believe they are just ether pollution. --ANK
153 */
154 break;
155 }
156 break;
157
158 case ICMP_TIME_EXCEEDED:
159 if (code != ICMP_EXC_TTL)
160 goto out;
161 break;
162
163 case ICMP_REDIRECT:
164 break;
165
166 default:
167 goto out;
168 }
143 169
144 err = -ENOENT;
145 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 170 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
146 iph->daddr, iph->saddr, 0); 171 iph->daddr, iph->saddr, 0);
147 if (!t) 172 if (!t) {
173 err = -ENOENT;
148 goto out; 174 goto out;
175 }
149 176
150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 177 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
151 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 178 ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
152 t->parms.link, 0, iph->protocol, 0); 179 iph->protocol, 0);
153 err = 0;
154 goto out; 180 goto out;
155 } 181 }
156 182
157 if (type == ICMP_REDIRECT) { 183 if (type == ICMP_REDIRECT) {
158 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 184 ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
159 iph->protocol, 0);
160 err = 0;
161 goto out; 185 goto out;
162 } 186 }
163 187
164 if (t->parms.iph.daddr == 0) 188 if (t->parms.iph.daddr == 0) {
189 err = -ENOENT;
165 goto out; 190 goto out;
191 }
166 192
167 err = 0;
168 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 193 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
169 goto out; 194 goto out;
170 195
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 811689e523c3..f75fc6b53115 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv,
330 if (synproxy == NULL) 330 if (synproxy == NULL)
331 return NF_ACCEPT; 331 return NF_ACCEPT;
332 332
333 if (nf_is_loopback_packet(skb)) 333 if (nf_is_loopback_packet(skb) ||
334 ip_hdr(skb)->protocol != IPPROTO_TCP)
334 return NF_ACCEPT; 335 return NF_ACCEPT;
335 336
336 thoff = ip_hdrlen(skb); 337 thoff = ip_hdrlen(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 94d4cd2d5ea4..3d9f1c2f81c5 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
1520EXPORT_SYMBOL(rt_dst_alloc); 1520EXPORT_SYMBOL(rt_dst_alloc);
1521 1521
1522/* called in rcu_read_lock() section */ 1522/* called in rcu_read_lock() section */
1523static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1523int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1524 u8 tos, struct net_device *dev, int our) 1524 u8 tos, struct net_device *dev,
1525 struct in_device *in_dev, u32 *itag)
1525{ 1526{
1526 struct rtable *rth;
1527 struct in_device *in_dev = __in_dev_get_rcu(dev);
1528 unsigned int flags = RTCF_MULTICAST;
1529 u32 itag = 0;
1530 int err; 1527 int err;
1531 1528
1532 /* Primary sanity checks. */ 1529 /* Primary sanity checks. */
1533
1534 if (!in_dev) 1530 if (!in_dev)
1535 return -EINVAL; 1531 return -EINVAL;
1536 1532
1537 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || 1533 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1538 skb->protocol != htons(ETH_P_IP)) 1534 skb->protocol != htons(ETH_P_IP))
1539 goto e_inval; 1535 return -EINVAL;
1540 1536
1541 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) 1537 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1542 goto e_inval; 1538 return -EINVAL;
1543 1539
1544 if (ipv4_is_zeronet(saddr)) { 1540 if (ipv4_is_zeronet(saddr)) {
1545 if (!ipv4_is_local_multicast(daddr)) 1541 if (!ipv4_is_local_multicast(daddr))
1546 goto e_inval; 1542 return -EINVAL;
1547 } else { 1543 } else {
1548 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, 1544 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1549 in_dev, &itag); 1545 in_dev, itag);
1550 if (err < 0) 1546 if (err < 0)
1551 goto e_err; 1547 return err;
1552 } 1548 }
1549 return 0;
1550}
1551
1552/* called in rcu_read_lock() section */
1553static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1554 u8 tos, struct net_device *dev, int our)
1555{
1556 struct in_device *in_dev = __in_dev_get_rcu(dev);
1557 unsigned int flags = RTCF_MULTICAST;
1558 struct rtable *rth;
1559 u32 itag = 0;
1560 int err;
1561
1562 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1563 if (err)
1564 return err;
1565
1553 if (our) 1566 if (our)
1554 flags |= RTCF_LOCAL; 1567 flags |= RTCF_LOCAL;
1555 1568
1556 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, 1569 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1557 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); 1570 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1558 if (!rth) 1571 if (!rth)
1559 goto e_nobufs; 1572 return -ENOBUFS;
1560 1573
1561#ifdef CONFIG_IP_ROUTE_CLASSID 1574#ifdef CONFIG_IP_ROUTE_CLASSID
1562 rth->dst.tclassid = itag; 1575 rth->dst.tclassid = itag;
@@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1572 1585
1573 skb_dst_set(skb, &rth->dst); 1586 skb_dst_set(skb, &rth->dst);
1574 return 0; 1587 return 0;
1575
1576e_nobufs:
1577 return -ENOBUFS;
1578e_inval:
1579 return -EINVAL;
1580e_err:
1581 return err;
1582} 1588}
1583 1589
1584 1590
@@ -2507,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2507 struct rtable *ort = (struct rtable *) dst_orig; 2513 struct rtable *ort = (struct rtable *) dst_orig;
2508 struct rtable *rt; 2514 struct rtable *rt;
2509 2515
2510 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); 2516 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2511 if (rt) { 2517 if (rt) {
2512 struct dst_entry *new = &rt->dst; 2518 struct dst_entry *new = &rt->dst;
2513 2519
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b1bb1b3a1082..77cf32a80952 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
355 /* We throwed the options of the initial SYN away, so we hope 355 /* We throwed the options of the initial SYN away, so we hope
356 * the ACK carries the same options again (see RFC1122 4.2.3.8) 356 * the ACK carries the same options again (see RFC1122 4.2.3.8)
357 */ 357 */
358 ireq->opt = tcp_v4_save_options(sock_net(sk), skb); 358 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
359 359
360 if (security_inet_conn_request(sk, skb, req)) { 360 if (security_inet_conn_request(sk, skb, req)) {
361 reqsk_free(req); 361 reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c5d7656beeee..7eec3383702b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
6196 struct inet_request_sock *ireq = inet_rsk(req); 6196 struct inet_request_sock *ireq = inet_rsk(req);
6197 6197
6198 kmemcheck_annotate_bitfield(ireq, flags); 6198 kmemcheck_annotate_bitfield(ireq, flags);
6199 ireq->opt = NULL; 6199 ireq->ireq_opt = NULL;
6200#if IS_ENABLED(CONFIG_IPV6) 6200#if IS_ENABLED(CONFIG_IPV6)
6201 ireq->pktopts = NULL; 6201 ireq->pktopts = NULL;
6202#endif 6202#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d9416b5162bc..5b027c69cbc5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
877 877
878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 878 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
879 ireq->ir_rmt_addr, 879 ireq->ir_rmt_addr,
880 ireq->opt); 880 ireq_opt_deref(ireq));
881 err = net_xmit_eval(err); 881 err = net_xmit_eval(err);
882 } 882 }
883 883
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
889 */ 889 */
890static void tcp_v4_reqsk_destructor(struct request_sock *req) 890static void tcp_v4_reqsk_destructor(struct request_sock *req)
891{ 891{
892 kfree(inet_rsk(req)->opt); 892 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
893} 893}
894 894
895#ifdef CONFIG_TCP_MD5SIG 895#ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
1265 struct sk_buff *skb) 1265 struct sk_buff *skb)
1266{ 1266{
1267 struct inet_request_sock *ireq = inet_rsk(req); 1267 struct inet_request_sock *ireq = inet_rsk(req);
1268 struct net *net = sock_net(sk_listener);
1268 1269
1269 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); 1270 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1270 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); 1271 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1271 ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); 1272 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1272} 1273}
1273 1274
1274static struct dst_entry *tcp_v4_route_req(const struct sock *sk, 1275static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1355 sk_daddr_set(newsk, ireq->ir_rmt_addr); 1356 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1356 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); 1357 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1357 newsk->sk_bound_dev_if = ireq->ir_iif; 1358 newsk->sk_bound_dev_if = ireq->ir_iif;
1358 newinet->inet_saddr = ireq->ir_loc_addr; 1359 newinet->inet_saddr = ireq->ir_loc_addr;
1359 inet_opt = ireq->opt; 1360 inet_opt = rcu_dereference(ireq->ireq_opt);
1360 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1361 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1361 ireq->opt = NULL;
1362 newinet->mc_index = inet_iif(skb); 1362 newinet->mc_index = inet_iif(skb);
1363 newinet->mc_ttl = ip_hdr(skb)->ttl; 1363 newinet->mc_ttl = ip_hdr(skb)->ttl;
1364 newinet->rcv_tos = ip_hdr(skb)->tos; 1364 newinet->rcv_tos = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1403 if (__inet_inherit_port(sk, newsk) < 0) 1403 if (__inet_inherit_port(sk, newsk) < 0)
1404 goto put_and_exit; 1404 goto put_and_exit;
1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); 1405 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1406 if (*own_req) 1406 if (likely(*own_req)) {
1407 tcp_move_syn(newtp, req); 1407 tcp_move_syn(newtp, req);
1408 1408 ireq->ireq_opt = NULL;
1409 } else {
1410 newinet->inet_opt = NULL;
1411 }
1409 return newsk; 1412 return newsk;
1410 1413
1411exit_overflow: 1414exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
1416 tcp_listendrop(sk); 1419 tcp_listendrop(sk);
1417 return NULL; 1420 return NULL;
1418put_and_exit: 1421put_and_exit:
1422 newinet->inet_opt = NULL;
1419 inet_csk_prepare_forced_close(newsk); 1423 inet_csk_prepare_forced_close(newsk);
1420 tcp_done(newsk); 1424 tcp_done(newsk);
1421 goto exit; 1425 goto exit;
@@ -1503,23 +1507,23 @@ csum_err:
1503} 1507}
1504EXPORT_SYMBOL(tcp_v4_do_rcv); 1508EXPORT_SYMBOL(tcp_v4_do_rcv);
1505 1509
1506void tcp_v4_early_demux(struct sk_buff *skb) 1510int tcp_v4_early_demux(struct sk_buff *skb)
1507{ 1511{
1508 const struct iphdr *iph; 1512 const struct iphdr *iph;
1509 const struct tcphdr *th; 1513 const struct tcphdr *th;
1510 struct sock *sk; 1514 struct sock *sk;
1511 1515
1512 if (skb->pkt_type != PACKET_HOST) 1516 if (skb->pkt_type != PACKET_HOST)
1513 return; 1517 return 0;
1514 1518
1515 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) 1519 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1516 return; 1520 return 0;
1517 1521
1518 iph = ip_hdr(skb); 1522 iph = ip_hdr(skb);
1519 th = tcp_hdr(skb); 1523 th = tcp_hdr(skb);
1520 1524
1521 if (th->doff < sizeof(struct tcphdr) / 4) 1525 if (th->doff < sizeof(struct tcphdr) / 4)
1522 return; 1526 return 0;
1523 1527
1524 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, 1528 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1525 iph->saddr, th->source, 1529 iph->saddr, th->source,
@@ -1538,6 +1542,7 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1538 skb_dst_set_noref(skb, dst); 1542 skb_dst_set_noref(skb, dst);
1539 } 1543 }
1540 } 1544 }
1545 return 0;
1541} 1546}
1542 1547
1543bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) 1548bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0bc9e46a5369..823003eef3a2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
739 struct tcp_sock *tp = tcp_sk(sk); 739 struct tcp_sock *tp = tcp_sk(sk);
740 740
741 if (tp->lost_out > tp->retrans_out && 741 if (tp->lost_out > tp->retrans_out &&
742 tp->snd_cwnd > tcp_packets_in_flight(tp)) 742 tp->snd_cwnd > tcp_packets_in_flight(tp)) {
743 tcp_mstamp_refresh(tp);
743 tcp_xmit_retransmit_queue(sk); 744 tcp_xmit_retransmit_queue(sk);
745 }
744 746
745 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 747 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
746 0, GFP_ATOMIC); 748 0, GFP_ATOMIC);
@@ -2060,6 +2062,7 @@ static int tcp_mtu_probe(struct sock *sk)
2060 nskb->ip_summed = skb->ip_summed; 2062 nskb->ip_summed = skb->ip_summed;
2061 2063
2062 tcp_insert_write_queue_before(nskb, skb, sk); 2064 tcp_insert_write_queue_before(nskb, skb, sk);
2065 tcp_highest_sack_replace(sk, skb, nskb);
2063 2066
2064 len = 0; 2067 len = 0;
2065 tcp_for_write_queue_from_safe(skb, next, sk) { 2068 tcp_for_write_queue_from_safe(skb, next, sk) {
@@ -2237,6 +2240,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2237 2240
2238 sent_pkts = 0; 2241 sent_pkts = 0;
2239 2242
2243 tcp_mstamp_refresh(tp);
2240 if (!push_one) { 2244 if (!push_one) {
2241 /* Do MTU probing. */ 2245 /* Do MTU probing. */
2242 result = tcp_mtu_probe(sk); 2246 result = tcp_mtu_probe(sk);
@@ -2248,7 +2252,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2248 } 2252 }
2249 2253
2250 max_segs = tcp_tso_segs(sk, mss_now); 2254 max_segs = tcp_tso_segs(sk, mss_now);
2251 tcp_mstamp_refresh(tp);
2252 while ((skb = tcp_send_head(sk))) { 2255 while ((skb = tcp_send_head(sk))) {
2253 unsigned int limit; 2256 unsigned int limit;
2254 2257
@@ -2663,7 +2666,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2663 else if (!skb_shift(skb, next_skb, next_skb_size)) 2666 else if (!skb_shift(skb, next_skb, next_skb_size))
2664 return false; 2667 return false;
2665 } 2668 }
2666 tcp_highest_sack_combine(sk, next_skb, skb); 2669 tcp_highest_sack_replace(sk, next_skb, skb);
2667 2670
2668 tcp_unlink_write_queue(next_skb, sk); 2671 tcp_unlink_write_queue(next_skb, sk);
2669 2672
@@ -2841,8 +2844,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2841 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2844 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2842 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2845 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2843 -ENOBUFS; 2846 -ENOBUFS;
2844 if (!err) 2847 if (!err) {
2845 skb->skb_mstamp = tp->tcp_mstamp; 2848 skb->skb_mstamp = tp->tcp_mstamp;
2849 tcp_rate_skb_sent(sk, skb);
2850 }
2846 } else { 2851 } else {
2847 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2852 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2848 } 2853 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ef29df8648e4..ebfbccae62fd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
231 } 231 }
232 } 232 }
233 233
234 /* Initial allocation may have already happened via setsockopt */ 234 return reuseport_alloc(sk);
235 if (!rcu_access_pointer(sk->sk_reuseport_cb))
236 return reuseport_alloc(sk);
237 return 0;
238} 235}
239 236
240/** 237/**
@@ -1061,7 +1058,7 @@ back_from_confirm:
1061 /* ... which is an evident application bug. --ANK */ 1058 /* ... which is an evident application bug. --ANK */
1062 release_sock(sk); 1059 release_sock(sk);
1063 1060
1064 net_dbg_ratelimited("cork app bug 2\n"); 1061 net_dbg_ratelimited("socket already corked\n");
1065 err = -EINVAL; 1062 err = -EINVAL;
1066 goto out; 1063 goto out;
1067 } 1064 }
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1144 if (unlikely(!up->pending)) { 1141 if (unlikely(!up->pending)) {
1145 release_sock(sk); 1142 release_sock(sk);
1146 1143
1147 net_dbg_ratelimited("udp cork app bug 3\n"); 1144 net_dbg_ratelimited("cork failed\n");
1148 return -EINVAL; 1145 return -EINVAL;
1149 } 1146 }
1150 1147
@@ -2221,9 +2218,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
2221 return NULL; 2218 return NULL;
2222} 2219}
2223 2220
2224void udp_v4_early_demux(struct sk_buff *skb) 2221int udp_v4_early_demux(struct sk_buff *skb)
2225{ 2222{
2226 struct net *net = dev_net(skb->dev); 2223 struct net *net = dev_net(skb->dev);
2224 struct in_device *in_dev = NULL;
2227 const struct iphdr *iph; 2225 const struct iphdr *iph;
2228 const struct udphdr *uh; 2226 const struct udphdr *uh;
2229 struct sock *sk = NULL; 2227 struct sock *sk = NULL;
@@ -2234,25 +2232,21 @@ void udp_v4_early_demux(struct sk_buff *skb)
2234 2232
2235 /* validate the packet */ 2233 /* validate the packet */
2236 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) 2234 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2237 return; 2235 return 0;
2238 2236
2239 iph = ip_hdr(skb); 2237 iph = ip_hdr(skb);
2240 uh = udp_hdr(skb); 2238 uh = udp_hdr(skb);
2241 2239
2242 if (skb->pkt_type == PACKET_BROADCAST || 2240 if (skb->pkt_type == PACKET_MULTICAST) {
2243 skb->pkt_type == PACKET_MULTICAST) { 2241 in_dev = __in_dev_get_rcu(skb->dev);
2244 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
2245 2242
2246 if (!in_dev) 2243 if (!in_dev)
2247 return; 2244 return 0;
2248 2245
2249 /* we are supposed to accept bcast packets */ 2246 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
2250 if (skb->pkt_type == PACKET_MULTICAST) { 2247 iph->protocol);
2251 ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, 2248 if (!ours)
2252 iph->protocol); 2249 return 0;
2253 if (!ours)
2254 return;
2255 }
2256 2250
2257 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, 2251 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2258 uh->source, iph->saddr, 2252 uh->source, iph->saddr,
@@ -2263,7 +2257,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
2263 } 2257 }
2264 2258
2265 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 2259 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
2266 return; 2260 return 0;
2267 2261
2268 skb->sk = sk; 2262 skb->sk = sk;
2269 skb->destructor = sock_efree; 2263 skb->destructor = sock_efree;
@@ -2272,12 +2266,23 @@ void udp_v4_early_demux(struct sk_buff *skb)
2272 if (dst) 2266 if (dst)
2273 dst = dst_check(dst, 0); 2267 dst = dst_check(dst, 0);
2274 if (dst) { 2268 if (dst) {
2269 u32 itag = 0;
2270
2275 /* set noref for now. 2271 /* set noref for now.
2276 * any place which wants to hold dst has to call 2272 * any place which wants to hold dst has to call
2277 * dst_hold_safe() 2273 * dst_hold_safe()
2278 */ 2274 */
2279 skb_dst_set_noref(skb, dst); 2275 skb_dst_set_noref(skb, dst);
2276
2277 /* for unconnected multicast sockets we need to validate
2278 * the source on each packet
2279 */
2280 if (!inet_sk(sk)->inet_daddr && in_dev)
2281 return ip_mc_validate_source(skb, iph->daddr,
2282 iph->saddr, iph->tos,
2283 skb->dev, in_dev, &itag);
2280 } 2284 }
2285 return 0;
2281} 2286}
2282 2287
2283int udp_rcv(struct sk_buff *skb) 2288int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 97658bfc1b58..e360d55be555 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
120 * will be using a length value equal to only one MSS sized 120 * will be using a length value equal to only one MSS sized
121 * segment instead of the entire frame. 121 * segment instead of the entire frame.
122 */ 122 */
123 if (gso_partial) { 123 if (gso_partial && skb_is_gso(skb)) {
124 uh->len = htons(skb_shinfo(skb)->gso_size + 124 uh->len = htons(skb_shinfo(skb)->gso_size +
125 SKB_GSO_CB(skb)->data_offset + 125 SKB_GSO_CB(skb)->data_offset +
126 skb->head - (unsigned char *)uh); 126 skb->head - (unsigned char *)uh);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 96861c702c06..8a1c846d3df9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3335,6 +3335,7 @@ static void addrconf_permanent_addr(struct net_device *dev)
3335 if ((ifp->flags & IFA_F_PERMANENT) && 3335 if ((ifp->flags & IFA_F_PERMANENT) &&
3336 fixup_permanent_addr(idev, ifp) < 0) { 3336 fixup_permanent_addr(idev, ifp) < 0) {
3337 write_unlock_bh(&idev->lock); 3337 write_unlock_bh(&idev->lock);
3338 in6_ifa_hold(ifp);
3338 ipv6_del_addr(ifp); 3339 ipv6_del_addr(ifp);
3339 write_lock_bh(&idev->lock); 3340 write_lock_bh(&idev->lock);
3340 3341
@@ -3820,8 +3821,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3820 goto out; 3821 goto out;
3821 3822
3822 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 3823 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3823 dev_net(dev)->ipv6.devconf_all->accept_dad < 1 || 3824 (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 &&
3824 idev->cnf.accept_dad < 1 || 3825 idev->cnf.accept_dad < 1) ||
3825 !(ifp->flags&IFA_F_TENTATIVE) || 3826 !(ifp->flags&IFA_F_TENTATIVE) ||
3826 ifp->flags & IFA_F_NODAD) { 3827 ifp->flags & IFA_F_NODAD) {
3827 bump_id = ifp->flags & IFA_F_TENTATIVE; 3828 bump_id = ifp->flags & IFA_F_TENTATIVE;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 8081bafe441b..15535ee327c5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
315 } 315 }
316 opt_space->dst1opt = fopt->dst1opt; 316 opt_space->dst1opt = fopt->dst1opt;
317 opt_space->opt_flen = fopt->opt_flen; 317 opt_space->opt_flen = fopt->opt_flen;
318 opt_space->tot_len = fopt->tot_len;
318 return opt_space; 319 return opt_space;
319} 320}
320EXPORT_SYMBOL_GPL(fl6_merge_options); 321EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 20f66f4c9460..59c121b932ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
408 case ICMPV6_DEST_UNREACH: 408 case ICMPV6_DEST_UNREACH:
409 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", 409 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
410 t->parms.name); 410 t->parms.name);
411 break; 411 if (code != ICMPV6_PORT_UNREACH)
412 break;
413 return;
412 case ICMPV6_TIME_EXCEED: 414 case ICMPV6_TIME_EXCEED:
413 if (code == ICMPV6_EXC_HOPLIMIT) { 415 if (code == ICMPV6_EXC_HOPLIMIT) {
414 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 416 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
415 t->parms.name); 417 t->parms.name);
418 break;
416 } 419 }
417 break; 420 return;
418 case ICMPV6_PARAMPROB: 421 case ICMPV6_PARAMPROB:
419 teli = 0; 422 teli = 0;
420 if (code == ICMPV6_HDR_FIELD) 423 if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 433 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
431 t->parms.name); 434 t->parms.name);
432 } 435 }
433 break; 436 return;
434 case ICMPV6_PKT_TOOBIG: 437 case ICMPV6_PKT_TOOBIG:
435 mtu = be32_to_cpu(info) - offset - t->tun_hlen; 438 mtu = be32_to_cpu(info) - offset - t->tun_hlen;
436 if (t->dev->type == ARPHRD_ETHER) 439 if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 if (mtu < IPV6_MIN_MTU) 441 if (mtu < IPV6_MIN_MTU)
439 mtu = IPV6_MIN_MTU; 442 mtu = IPV6_MIN_MTU;
440 t->dev->mtu = mtu; 443 t->dev->mtu = mtu;
441 break; 444 return;
442 } 445 }
443 446
444 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) 447 if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
500 __u32 *pmtu, __be16 proto) 503 __u32 *pmtu, __be16 proto)
501{ 504{
502 struct ip6_tnl *tunnel = netdev_priv(dev); 505 struct ip6_tnl *tunnel = netdev_priv(dev);
503 __be16 protocol = (dev->type == ARPHRD_ETHER) ? 506 struct dst_entry *dst = skb_dst(skb);
504 htons(ETH_P_TEB) : proto; 507 __be16 protocol;
505 508
506 if (dev->type == ARPHRD_ETHER) 509 if (dev->type == ARPHRD_ETHER)
507 IPCB(skb)->flags = 0; 510 IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
515 tunnel->o_seqno++; 518 tunnel->o_seqno++;
516 519
517 /* Push GRE header. */ 520 /* Push GRE header. */
521 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
518 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, 522 gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
519 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); 523 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
520 524
525 /* TooBig packet may have updated dst->dev's mtu */
526 if (dst && dst_mtu(dst) > dst->dev->mtu)
527 dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
528
521 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, 529 return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
522 NEXTHDR_GRE); 530 NEXTHDR_GRE);
523} 531}
@@ -1311,6 +1319,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1311 dev->features |= NETIF_F_NETNS_LOCAL; 1319 dev->features |= NETIF_F_NETNS_LOCAL;
1312 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1320 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1313 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1321 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1322 netif_keep_dst(dev);
1314} 1323}
1315 1324
1316static bool ip6gre_netlink_encap_parms(struct nlattr *data[], 1325static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index cdb3728faca7..4a87f9428ca5 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
105 105
106 for (skb = segs; skb; skb = skb->next) { 106 for (skb = segs; skb; skb = skb->next) {
107 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); 107 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
108 if (gso_partial) 108 if (gso_partial && skb_is_gso(skb))
109 payload_len = skb_shinfo(skb)->gso_size + 109 payload_len = skb_shinfo(skb)->gso_size +
110 SKB_GSO_CB(skb)->data_offset + 110 SKB_GSO_CB(skb)->data_offset +
111 skb->head - (unsigned char *)(ipv6h + 1); 111 skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 43ca864327c7..5110a418cc4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1161 if (WARN_ON(v6_cork->opt)) 1161 if (WARN_ON(v6_cork->opt))
1162 return -EINVAL; 1162 return -EINVAL;
1163 1163
1164 v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); 1164 v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1165 if (unlikely(!v6_cork->opt)) 1165 if (unlikely(!v6_cork->opt))
1166 return -ENOBUFS; 1166 return -ENOBUFS;
1167 1167
1168 v6_cork->opt->tot_len = opt->tot_len; 1168 v6_cork->opt->tot_len = sizeof(*opt);
1169 v6_cork->opt->opt_flen = opt->opt_flen; 1169 v6_cork->opt->opt_flen = opt->opt_flen;
1170 v6_cork->opt->opt_nflen = opt->opt_nflen; 1170 v6_cork->opt->opt_nflen = opt->opt_nflen;
1171 1171
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f2f21c24915f..a1c24443cd9e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1043 struct dst_entry *dst = NULL, *ndst = NULL; 1043 struct dst_entry *dst = NULL, *ndst = NULL;
1044 struct net_device *tdev; 1044 struct net_device *tdev;
1045 int mtu; 1045 int mtu;
1046 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1046 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1047 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1047 unsigned int max_headroom = psh_hlen; 1048 unsigned int max_headroom = psh_hlen;
1048 bool use_cache = false; 1049 bool use_cache = false;
@@ -1124,7 +1125,7 @@ route_lookup:
1124 t->parms.name); 1125 t->parms.name);
1125 goto tx_err_dst_release; 1126 goto tx_err_dst_release;
1126 } 1127 }
1127 mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen; 1128 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1128 if (encap_limit >= 0) { 1129 if (encap_limit >= 0) {
1129 max_headroom += 8; 1130 max_headroom += 8;
1130 mtu -= 8; 1131 mtu -= 8;
@@ -1133,7 +1134,7 @@ route_lookup:
1133 mtu = IPV6_MIN_MTU; 1134 mtu = IPV6_MIN_MTU;
1134 if (skb_dst(skb) && !t->parms.collect_md) 1135 if (skb_dst(skb) && !t->parms.collect_md)
1135 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 1136 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1136 if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) { 1137 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1137 *pmtu = mtu; 1138 *pmtu = mtu;
1138 err = -EMSGSIZE; 1139 err = -EMSGSIZE;
1139 goto tx_err_dst_release; 1140 goto tx_err_dst_release;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 79444a4bfd6d..bcdc2d557de1 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
445 struct dst_entry *dst = skb_dst(skb); 445 struct dst_entry *dst = skb_dst(skb);
446 struct net_device *tdev; 446 struct net_device *tdev;
447 struct xfrm_state *x; 447 struct xfrm_state *x;
448 int pkt_len = skb->len;
448 int err = -1; 449 int err = -1;
449 int mtu; 450 int mtu;
450 451
@@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
502 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 503 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
503 504
504 u64_stats_update_begin(&tstats->syncp); 505 u64_stats_update_begin(&tstats->syncp);
505 tstats->tx_bytes += skb->len; 506 tstats->tx_bytes += pkt_len;
506 tstats->tx_packets++; 507 tstats->tx_packets++;
507 u64_stats_update_end(&tstats->syncp); 508 u64_stats_update_end(&tstats->syncp);
508 } else { 509 } else {
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index a5cd43d75393..437af8c95277 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv,
353 nexthdr = ipv6_hdr(skb)->nexthdr; 353 nexthdr = ipv6_hdr(skb)->nexthdr;
354 thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, 354 thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
355 &frag_off); 355 &frag_off);
356 if (thoff < 0) 356 if (thoff < 0 || nexthdr != IPPROTO_TCP)
357 return NF_ACCEPT; 357 return NF_ACCEPT;
358 358
359 th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); 359 th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 26cc9f483b6d..a96d5b385d8f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
1325 struct dst_entry *new = NULL; 1325 struct dst_entry *new = NULL;
1326 1326
1327 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, 1327 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1328 DST_OBSOLETE_NONE, 0); 1328 DST_OBSOLETE_DEAD, 0);
1329 if (rt) { 1329 if (rt) {
1330 rt6_info_init(rt); 1330 rt6_info_init(rt);
1331 1331
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ee485df73ccd..02d61101b108 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1314,6 +1314,9 @@ again:
1314 1314
1315 hlist_del_init(&session->hlist); 1315 hlist_del_init(&session->hlist);
1316 1316
1317 if (test_and_set_bit(0, &session->dead))
1318 goto again;
1319
1317 if (session->ref != NULL) 1320 if (session->ref != NULL)
1318 (*session->ref)(session); 1321 (*session->ref)(session);
1319 1322
@@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1685 1688
1686/* This function is used by the netlink TUNNEL_DELETE command. 1689/* This function is used by the netlink TUNNEL_DELETE command.
1687 */ 1690 */
1688int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1691void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1689{ 1692{
1690 l2tp_tunnel_inc_refcount(tunnel); 1693 if (!test_and_set_bit(0, &tunnel->dead)) {
1691 if (false == queue_work(l2tp_wq, &tunnel->del_work)) { 1694 l2tp_tunnel_inc_refcount(tunnel);
1692 l2tp_tunnel_dec_refcount(tunnel); 1695 queue_work(l2tp_wq, &tunnel->del_work);
1693 return 1;
1694 } 1696 }
1695 return 0;
1696} 1697}
1697EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1698EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1698 1699
@@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1750 */ 1751 */
1751int l2tp_session_delete(struct l2tp_session *session) 1752int l2tp_session_delete(struct l2tp_session *session)
1752{ 1753{
1754 if (test_and_set_bit(0, &session->dead))
1755 return 0;
1756
1753 if (session->ref) 1757 if (session->ref)
1754 (*session->ref)(session); 1758 (*session->ref)(session);
1755 __l2tp_session_unhash(session); 1759 __l2tp_session_unhash(session);
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a305e0c5925a..67c79d9b5c6c 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -76,6 +76,7 @@ struct l2tp_session_cfg {
76struct l2tp_session { 76struct l2tp_session {
77 int magic; /* should be 77 int magic; /* should be
78 * L2TP_SESSION_MAGIC */ 78 * L2TP_SESSION_MAGIC */
79 long dead;
79 80
80 struct l2tp_tunnel *tunnel; /* back pointer to tunnel 81 struct l2tp_tunnel *tunnel; /* back pointer to tunnel
81 * context */ 82 * context */
@@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg {
160 161
161struct l2tp_tunnel { 162struct l2tp_tunnel {
162 int magic; /* Should be L2TP_TUNNEL_MAGIC */ 163 int magic; /* Should be L2TP_TUNNEL_MAGIC */
164
165 unsigned long dead;
166
163 struct rcu_head rcu; 167 struct rcu_head rcu;
164 rwlock_t hlist_lock; /* protect session_hlist */ 168 rwlock_t hlist_lock; /* protect session_hlist */
165 bool acpt_newsess; /* Indicates whether this 169 bool acpt_newsess; /* Indicates whether this
@@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
254 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, 258 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
255 struct l2tp_tunnel **tunnelp); 259 struct l2tp_tunnel **tunnelp);
256void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); 260void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
257int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); 261void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
258struct l2tp_session *l2tp_session_create(int priv_size, 262struct l2tp_session *l2tp_session_create(int priv_size,
259 struct l2tp_tunnel *tunnel, 263 struct l2tp_tunnel *tunnel,
260 u32 session_id, u32 peer_session_id, 264 u32 session_id, u32 peer_session_id,
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 87da9ef61860..014a7bc2a872 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -44,7 +44,6 @@ struct l2tp_eth {
44 struct net_device *dev; 44 struct net_device *dev;
45 struct sock *tunnel_sock; 45 struct sock *tunnel_sock;
46 struct l2tp_session *session; 46 struct l2tp_session *session;
47 struct list_head list;
48 atomic_long_t tx_bytes; 47 atomic_long_t tx_bytes;
49 atomic_long_t tx_packets; 48 atomic_long_t tx_packets;
50 atomic_long_t tx_dropped; 49 atomic_long_t tx_dropped;
@@ -58,17 +57,6 @@ struct l2tp_eth_sess {
58 struct net_device *dev; 57 struct net_device *dev;
59}; 58};
60 59
61/* per-net private data for this module */
62static unsigned int l2tp_eth_net_id;
63struct l2tp_eth_net {
64 struct list_head l2tp_eth_dev_list;
65 spinlock_t l2tp_eth_lock;
66};
67
68static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
69{
70 return net_generic(net, l2tp_eth_net_id);
71}
72 60
73static int l2tp_eth_dev_init(struct net_device *dev) 61static int l2tp_eth_dev_init(struct net_device *dev)
74{ 62{
@@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
84 72
85static void l2tp_eth_dev_uninit(struct net_device *dev) 73static void l2tp_eth_dev_uninit(struct net_device *dev)
86{ 74{
87 struct l2tp_eth *priv = netdev_priv(dev);
88 struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
89
90 spin_lock(&pn->l2tp_eth_lock);
91 list_del_init(&priv->list);
92 spin_unlock(&pn->l2tp_eth_lock);
93 dev_put(dev); 75 dev_put(dev);
94} 76}
95 77
@@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
273 struct l2tp_eth *priv; 255 struct l2tp_eth *priv;
274 struct l2tp_eth_sess *spriv; 256 struct l2tp_eth_sess *spriv;
275 int rc; 257 int rc;
276 struct l2tp_eth_net *pn;
277 258
278 if (cfg->ifname) { 259 if (cfg->ifname) {
279 strlcpy(name, cfg->ifname, IFNAMSIZ); 260 strlcpy(name, cfg->ifname, IFNAMSIZ);
@@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
305 priv = netdev_priv(dev); 286 priv = netdev_priv(dev);
306 priv->dev = dev; 287 priv->dev = dev;
307 priv->session = session; 288 priv->session = session;
308 INIT_LIST_HEAD(&priv->list);
309 289
310 priv->tunnel_sock = tunnel->sock; 290 priv->tunnel_sock = tunnel->sock;
311 session->recv_skb = l2tp_eth_dev_recv; 291 session->recv_skb = l2tp_eth_dev_recv;
@@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
326 strlcpy(session->ifname, dev->name, IFNAMSIZ); 306 strlcpy(session->ifname, dev->name, IFNAMSIZ);
327 307
328 dev_hold(dev); 308 dev_hold(dev);
329 pn = l2tp_eth_pernet(dev_net(dev));
330 spin_lock(&pn->l2tp_eth_lock);
331 list_add(&priv->list, &pn->l2tp_eth_dev_list);
332 spin_unlock(&pn->l2tp_eth_lock);
333 309
334 return 0; 310 return 0;
335 311
@@ -342,22 +318,6 @@ out:
342 return rc; 318 return rc;
343} 319}
344 320
345static __net_init int l2tp_eth_init_net(struct net *net)
346{
347 struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
348
349 INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
350 spin_lock_init(&pn->l2tp_eth_lock);
351
352 return 0;
353}
354
355static struct pernet_operations l2tp_eth_net_ops = {
356 .init = l2tp_eth_init_net,
357 .id = &l2tp_eth_net_id,
358 .size = sizeof(struct l2tp_eth_net),
359};
360
361 321
362static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { 322static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
363 .session_create = l2tp_eth_create, 323 .session_create = l2tp_eth_create,
@@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void)
371 331
372 err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); 332 err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
373 if (err) 333 if (err)
374 goto out; 334 goto err;
375
376 err = register_pernet_device(&l2tp_eth_net_ops);
377 if (err)
378 goto out_unreg;
379 335
380 pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); 336 pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
381 337
382 return 0; 338 return 0;
383 339
384out_unreg: 340err:
385 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
386out:
387 return err; 341 return err;
388} 342}
389 343
390static void __exit l2tp_eth_exit(void) 344static void __exit l2tp_eth_exit(void)
391{ 345{
392 unregister_pernet_device(&l2tp_eth_net_ops);
393 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); 346 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
394} 347}
395 348
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 50e3ee9a9d61..0c2738349442 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
437 437
438 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 438 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
439 439
440 if (sock) { 440 if (sock)
441 inet_shutdown(sock, SEND_SHUTDOWN); 441 inet_shutdown(sock, SEND_SHUTDOWN);
442 /* Don't let the session go away before our socket does */ 442
443 l2tp_session_inc_refcount(session); 443 /* Don't let the session go away before our socket does */
444 } 444 l2tp_session_inc_refcount(session);
445} 445}
446 446
447/* Really kill the session socket. (Called from sock_put() if 447/* Really kill the session socket. (Called from sock_put() if
@@ -584,6 +584,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
584 u32 tunnel_id, peer_tunnel_id; 584 u32 tunnel_id, peer_tunnel_id;
585 u32 session_id, peer_session_id; 585 u32 session_id, peer_session_id;
586 bool drop_refcnt = false; 586 bool drop_refcnt = false;
587 bool drop_tunnel = false;
587 int ver = 2; 588 int ver = 2;
588 int fd; 589 int fd;
589 590
@@ -652,7 +653,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
652 if (tunnel_id == 0) 653 if (tunnel_id == 0)
653 goto end; 654 goto end;
654 655
655 tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); 656 tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
657 if (tunnel)
658 drop_tunnel = true;
656 659
657 /* Special case: create tunnel context if session_id and 660 /* Special case: create tunnel context if session_id and
658 * peer_session_id is 0. Otherwise look up tunnel using supplied 661 * peer_session_id is 0. Otherwise look up tunnel using supplied
@@ -781,6 +784,8 @@ out_no_ppp:
781end: 784end:
782 if (drop_refcnt) 785 if (drop_refcnt)
783 l2tp_session_dec_refcount(session); 786 l2tp_session_dec_refcount(session);
787 if (drop_tunnel)
788 l2tp_tunnel_dec_refcount(tunnel);
784 release_sock(sk); 789 release_sock(sk);
785 790
786 return error; 791 return error;
@@ -988,6 +993,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
988 session->name, cmd, arg); 993 session->name, cmd, arg);
989 994
990 sk = ps->sock; 995 sk = ps->sock;
996 if (!sk)
997 return -EBADR;
998
991 sock_hold(sk); 999 sock_hold(sk);
992 1000
993 switch (cmd) { 1001 switch (cmd) {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a354f1939e49..fb15d3b97cb2 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2727 if (!ieee80211_sdata_running(sdata)) 2727 if (!ieee80211_sdata_running(sdata))
2728 return -ENETDOWN; 2728 return -ENETDOWN;
2729 2729
2730 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
2731 ret = drv_set_bitrate_mask(local, sdata, mask);
2732 if (ret)
2733 return ret;
2734 }
2735
2736 /* 2730 /*
2737 * If active validate the setting and reject it if it doesn't leave 2731 * If active validate the setting and reject it if it doesn't leave
2738 * at least one basic rate usable, since we really have to be able 2732 * at least one basic rate usable, since we really have to be able
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2748 return -EINVAL; 2742 return -EINVAL;
2749 } 2743 }
2750 2744
2745 if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
2746 ret = drv_set_bitrate_mask(local, sdata, mask);
2747 if (ret)
2748 return ret;
2749 }
2750
2751 for (i = 0; i < NUM_NL80211_BANDS; i++) { 2751 for (i = 0; i < NUM_NL80211_BANDS; i++) {
2752 struct ieee80211_supported_band *sband = wiphy->bands[i]; 2752 struct ieee80211_supported_band *sband = wiphy->bands[i];
2753 int j; 2753 int j;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a98fc2b5e0dc..938049395f90 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,7 +4,7 @@
4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> 5 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
6 * Copyright 2013-2014 Intel Mobile Communications GmbH 6 * Copyright 2013-2014 Intel Mobile Communications GmbH
7 * Copyright 2015 Intel Deutschland GmbH 7 * Copyright 2015-2017 Intel Deutschland GmbH
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <net/mac80211.h> 21#include <net/mac80211.h>
22#include <crypto/algapi.h>
22#include <asm/unaligned.h> 23#include <asm/unaligned.h>
23#include "ieee80211_i.h" 24#include "ieee80211_i.h"
24#include "driver-ops.h" 25#include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
609 ieee80211_key_free_common(key); 610 ieee80211_key_free_common(key);
610} 611}
611 612
613static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
614 struct ieee80211_key *old,
615 struct ieee80211_key *new)
616{
617 u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
618 u8 *tk_old, *tk_new;
619
620 if (!old || new->conf.keylen != old->conf.keylen)
621 return false;
622
623 tk_old = old->conf.key;
624 tk_new = new->conf.key;
625
626 /*
627 * In station mode, don't compare the TX MIC key, as it's never used
628 * and offloaded rekeying may not care to send it to the host. This
629 * is the case in iwlwifi, for example.
630 */
631 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
632 new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
633 new->conf.keylen == WLAN_KEY_LEN_TKIP &&
634 !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
635 memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
636 memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
637 memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
638 memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
639 tk_old = tkip_old;
640 tk_new = tkip_new;
641 }
642
643 return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
644}
645
612int ieee80211_key_link(struct ieee80211_key *key, 646int ieee80211_key_link(struct ieee80211_key *key,
613 struct ieee80211_sub_if_data *sdata, 647 struct ieee80211_sub_if_data *sdata,
614 struct sta_info *sta) 648 struct sta_info *sta)
@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
620 654
621 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; 655 pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
622 idx = key->conf.keyidx; 656 idx = key->conf.keyidx;
623 key->local = sdata->local;
624 key->sdata = sdata;
625 key->sta = sta;
626 657
627 mutex_lock(&sdata->local->key_mtx); 658 mutex_lock(&sdata->local->key_mtx);
628 659
@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
633 else 664 else
634 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 665 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
635 666
667 /*
668 * Silently accept key re-installation without really installing the
669 * new version of the key to avoid nonce reuse or replay issues.
670 */
671 if (ieee80211_key_identical(sdata, old_key, key)) {
672 ieee80211_key_free_unused(key);
673 ret = 0;
674 goto out;
675 }
676
677 key->local = sdata->local;
678 key->sdata = sdata;
679 key->sta = sta;
680
636 increment_tailroom_need_count(sdata); 681 increment_tailroom_need_count(sdata);
637 682
638 ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 683 ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
648 ret = 0; 693 ret = 0;
649 } 694 }
650 695
696 out:
651 mutex_unlock(&sdata->local->key_mtx); 697 mutex_unlock(&sdata->local->key_mtx);
652 698
653 return ret; 699 return ret;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index af3d636534ef..d30f7bd741d0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
286 struct work_struct work; /* For channel management */ 286 struct work_struct work; /* For channel management */
287 struct packet_type ptype; /* NCSI packet Rx handler */ 287 struct packet_type ptype; /* NCSI packet Rx handler */
288 struct list_head node; /* Form NCSI device list */ 288 struct list_head node; /* Form NCSI device list */
289#define NCSI_MAX_VLAN_VIDS 15
289 struct list_head vlan_vids; /* List of active VLAN IDs */ 290 struct list_head vlan_vids; /* List of active VLAN IDs */
290}; 291};
291 292
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 6898e7229285..f135938bf781 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
187} ncsi_aen_handlers[] = { 187} ncsi_aen_handlers[] = {
188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc }, 188 { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr }, 189 { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
190 { NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc } 190 { NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
191}; 191};
192 192
193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb) 193int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 3fd3c39e6278..28c42b22b748 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
189 struct ncsi_channel *nc = (struct ncsi_channel *)data; 189 struct ncsi_channel *nc = (struct ncsi_channel *)data;
190 struct ncsi_package *np = nc->package; 190 struct ncsi_package *np = nc->package;
191 struct ncsi_dev_priv *ndp = np->ndp; 191 struct ncsi_dev_priv *ndp = np->ndp;
192 struct ncsi_channel_mode *ncm;
192 struct ncsi_cmd_arg nca; 193 struct ncsi_cmd_arg nca;
193 bool enabled, chained; 194 bool enabled, chained;
194 unsigned int monitor_state; 195 unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
202 monitor_state = nc->monitor.state; 203 monitor_state = nc->monitor.state;
203 spin_unlock_irqrestore(&nc->lock, flags); 204 spin_unlock_irqrestore(&nc->lock, flags);
204 205
205 if (!enabled || chained) 206 if (!enabled || chained) {
207 ncsi_stop_channel_monitor(nc);
206 return; 208 return;
209 }
207 if (state != NCSI_CHANNEL_INACTIVE && 210 if (state != NCSI_CHANNEL_INACTIVE &&
208 state != NCSI_CHANNEL_ACTIVE) 211 state != NCSI_CHANNEL_ACTIVE) {
212 ncsi_stop_channel_monitor(nc);
209 return; 213 return;
214 }
210 215
211 switch (monitor_state) { 216 switch (monitor_state) {
212 case NCSI_CHANNEL_MONITOR_START: 217 case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
217 nca.type = NCSI_PKT_CMD_GLS; 222 nca.type = NCSI_PKT_CMD_GLS;
218 nca.req_flags = 0; 223 nca.req_flags = 0;
219 ret = ncsi_xmit_cmd(&nca); 224 ret = ncsi_xmit_cmd(&nca);
220 if (ret) { 225 if (ret)
221 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", 226 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
222 ret); 227 ret);
223 return;
224 }
225
226 break; 228 break;
227 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: 229 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
228 break; 230 break;
229 default: 231 default:
230 if (!(ndp->flags & NCSI_DEV_HWA) && 232 if (!(ndp->flags & NCSI_DEV_HWA)) {
231 state == NCSI_CHANNEL_ACTIVE) {
232 ncsi_report_link(ndp, true); 233 ncsi_report_link(ndp, true);
233 ndp->flags |= NCSI_DEV_RESHUFFLE; 234 ndp->flags |= NCSI_DEV_RESHUFFLE;
234 } 235 }
235 236
237 ncsi_stop_channel_monitor(nc);
238
239 ncm = &nc->modes[NCSI_MODE_LINK];
236 spin_lock_irqsave(&nc->lock, flags); 240 spin_lock_irqsave(&nc->lock, flags);
237 nc->state = NCSI_CHANNEL_INVISIBLE; 241 nc->state = NCSI_CHANNEL_INVISIBLE;
242 ncm->data[2] &= ~0x1;
238 spin_unlock_irqrestore(&nc->lock, flags); 243 spin_unlock_irqrestore(&nc->lock, flags);
239 244
240 spin_lock_irqsave(&ndp->lock, flags); 245 spin_lock_irqsave(&ndp->lock, flags);
241 nc->state = NCSI_CHANNEL_INACTIVE; 246 nc->state = NCSI_CHANNEL_ACTIVE;
242 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 247 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
243 spin_unlock_irqrestore(&ndp->lock, flags); 248 spin_unlock_irqrestore(&ndp->lock, flags);
244 ncsi_process_next_channel(ndp); 249 ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
732 if (index < 0) { 737 if (index < 0) {
733 netdev_err(ndp->ndev.dev, 738 netdev_err(ndp->ndev.dev,
734 "Failed to add new VLAN tag, error %d\n", index); 739 "Failed to add new VLAN tag, error %d\n", index);
740 if (index == -ENOSPC)
741 netdev_err(ndp->ndev.dev,
742 "Channel %u already has all VLAN filters set\n",
743 nc->id);
735 return -1; 744 return -1;
736 } 745 }
737 746
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
998 struct ncsi_package *np; 1007 struct ncsi_package *np;
999 struct ncsi_channel *nc; 1008 struct ncsi_channel *nc;
1000 unsigned int cap; 1009 unsigned int cap;
1010 bool has_channel = false;
1001 1011
1002 /* The hardware arbitration is disabled if any one channel 1012 /* The hardware arbitration is disabled if any one channel
1003 * doesn't support explicitly. 1013 * doesn't support explicitly.
1004 */ 1014 */
1005 NCSI_FOR_EACH_PACKAGE(ndp, np) { 1015 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1006 NCSI_FOR_EACH_CHANNEL(np, nc) { 1016 NCSI_FOR_EACH_CHANNEL(np, nc) {
1017 has_channel = true;
1018
1007 cap = nc->caps[NCSI_CAP_GENERIC].cap; 1019 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1008 if (!(cap & NCSI_CAP_GENERIC_HWA) || 1020 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1009 (cap & NCSI_CAP_GENERIC_HWA_MASK) != 1021 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1014 } 1026 }
1015 } 1027 }
1016 1028
1017 ndp->flags |= NCSI_DEV_HWA; 1029 if (has_channel) {
1018 return true; 1030 ndp->flags |= NCSI_DEV_HWA;
1031 return true;
1032 }
1033
1034 ndp->flags &= ~NCSI_DEV_HWA;
1035 return false;
1019} 1036}
1020 1037
1021static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) 1038static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1403 1420
1404int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 1421int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1405{ 1422{
1406 struct ncsi_channel_filter *ncf;
1407 struct ncsi_dev_priv *ndp; 1423 struct ncsi_dev_priv *ndp;
1408 unsigned int n_vids = 0; 1424 unsigned int n_vids = 0;
1409 struct vlan_vid *vlan; 1425 struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1420 } 1436 }
1421 1437
1422 ndp = TO_NCSI_DEV_PRIV(nd); 1438 ndp = TO_NCSI_DEV_PRIV(nd);
1423 ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
1424 1439
1425 /* Add the VLAN id to our internal list */ 1440 /* Add the VLAN id to our internal list */
1426 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { 1441 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1431 return 0; 1446 return 0;
1432 } 1447 }
1433 } 1448 }
1434 1449 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1435 if (n_vids >= ncf->total) { 1450 netdev_warn(dev,
1436 netdev_info(dev, 1451 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1437 "NCSI Channel supports up to %u VLAN tags but %u are already set\n", 1452 vid, NCSI_MAX_VLAN_VIDS);
1438 ncf->total, n_vids); 1453 return -ENOSPC;
1439 return -EINVAL;
1440 } 1454 }
1441 1455
1442 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1456 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 265b9a892d41..927dad4759d1 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf }, 959 { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf }, 960 { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc }, 961 { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
962 { NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi }, 962 { NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc }, 963 { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp }, 964 { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps }, 965 { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps },
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e495b5e484b1..cf84f7b37cd9 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1191 from->family == to->family)) 1191 from->family == to->family))
1192 return -IPSET_ERR_TYPE_MISMATCH; 1192 return -IPSET_ERR_TYPE_MISMATCH;
1193 1193
1194 if (from->ref_netlink || to->ref_netlink) 1194 write_lock_bh(&ip_set_ref_lock);
1195
1196 if (from->ref_netlink || to->ref_netlink) {
1197 write_unlock_bh(&ip_set_ref_lock);
1195 return -EBUSY; 1198 return -EBUSY;
1199 }
1196 1200
1197 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 1201 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
1198 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 1202 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
1199 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 1203 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
1200 1204
1201 write_lock_bh(&ip_set_ref_lock);
1202 swap(from->ref, to->ref); 1205 swap(from->ref, to->ref);
1203 ip_set(inst, from_id) = to; 1206 ip_set(inst, from_id) = to;
1204 ip_set(inst, to_id) = from; 1207 ip_set(inst, to_id) = from;
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = {
2072static int __init 2075static int __init
2073ip_set_init(void) 2076ip_set_init(void)
2074{ 2077{
2075 int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); 2078 int ret = register_pernet_subsys(&ip_set_net_ops);
2079
2080 if (ret) {
2081 pr_err("ip_set: cannot register pernet_subsys.\n");
2082 return ret;
2083 }
2076 2084
2085 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
2077 if (ret != 0) { 2086 if (ret != 0) {
2078 pr_err("ip_set: cannot register with nfnetlink.\n"); 2087 pr_err("ip_set: cannot register with nfnetlink.\n");
2088 unregister_pernet_subsys(&ip_set_net_ops);
2079 return ret; 2089 return ret;
2080 } 2090 }
2091
2081 ret = nf_register_sockopt(&so_set); 2092 ret = nf_register_sockopt(&so_set);
2082 if (ret != 0) { 2093 if (ret != 0) {
2083 pr_err("SO_SET registry failed: %d\n", ret); 2094 pr_err("SO_SET registry failed: %d\n", ret);
2084 nfnetlink_subsys_unregister(&ip_set_netlink_subsys); 2095 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2096 unregister_pernet_subsys(&ip_set_net_ops);
2085 return ret; 2097 return ret;
2086 } 2098 }
2087 ret = register_pernet_subsys(&ip_set_net_ops); 2099
2088 if (ret) {
2089 pr_err("ip_set: cannot register pernet_subsys.\n");
2090 nf_unregister_sockopt(&so_set);
2091 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2092 return ret;
2093 }
2094 pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL); 2100 pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
2095 return 0; 2101 return 0;
2096} 2102}
@@ -2098,9 +2104,10 @@ ip_set_init(void)
2098static void __exit 2104static void __exit
2099ip_set_fini(void) 2105ip_set_fini(void)
2100{ 2106{
2101 unregister_pernet_subsys(&ip_set_net_ops);
2102 nf_unregister_sockopt(&so_set); 2107 nf_unregister_sockopt(&so_set);
2103 nfnetlink_subsys_unregister(&ip_set_netlink_subsys); 2108 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2109
2110 unregister_pernet_subsys(&ip_set_net_ops);
2104 pr_debug("these are the famous last words\n"); 2111 pr_debug("these are the famous last words\n");
2105} 2112}
2106 2113
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 20bfbd315f61..613eb212cb48 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
123 return ret; 123 return ret;
124 124
125 ip &= ip_set_hostmask(h->netmask); 125 ip &= ip_set_hostmask(h->netmask);
126 e.ip = htonl(ip);
127 if (e.ip == 0)
128 return -IPSET_ERR_HASH_ELEM;
126 129
127 if (adt == IPSET_TEST) { 130 if (adt == IPSET_TEST)
128 e.ip = htonl(ip);
129 if (e.ip == 0)
130 return -IPSET_ERR_HASH_ELEM;
131 return adtfn(set, &e, &ext, &ext, flags); 131 return adtfn(set, &e, &ext, &ext, flags);
132 }
133 132
134 ip_to = ip; 133 ip_to = ip;
135 if (tb[IPSET_ATTR_IP_TO]) { 134 if (tb[IPSET_ATTR_IP_TO]) {
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
148 147
149 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); 148 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
150 149
151 if (retried) 150 if (retried) {
152 ip = ntohl(h->next.ip); 151 ip = ntohl(h->next.ip);
153 for (; !before(ip_to, ip); ip += hosts) {
154 e.ip = htonl(ip); 152 e.ip = htonl(ip);
155 if (e.ip == 0) 153 }
156 return -IPSET_ERR_HASH_ELEM; 154 for (; ip <= ip_to;) {
157 ret = adtfn(set, &e, &ext, &ext, flags); 155 ret = adtfn(set, &e, &ext, &ext, flags);
158
159 if (ret && !ip_set_eexist(ret, flags)) 156 if (ret && !ip_set_eexist(ret, flags))
160 return ret; 157 return ret;
161 158
159 ip += hosts;
160 e.ip = htonl(ip);
161 if (e.ip == 0)
162 return 0;
163
162 ret = 0; 164 ret = 0;
163 } 165 }
164 return ret; 166 return ret;
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index b64cf14e8352..f3ba8348cf9d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
149 149
150 if (retried) 150 if (retried)
151 ip = ntohl(h->next.ip); 151 ip = ntohl(h->next.ip);
152 for (; !before(ip_to, ip); ip++) { 152 for (; ip <= ip_to; ip++) {
153 e.ip = htonl(ip); 153 e.ip = htonl(ip);
154 ret = adtfn(set, &e, &ext, &ext, flags); 154 ret = adtfn(set, &e, &ext, &ext, flags);
155 155
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index f438740e6c6a..ddb8039ec1d2 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
178 178
179 if (retried) 179 if (retried)
180 ip = ntohl(h->next.ip); 180 ip = ntohl(h->next.ip);
181 for (; !before(ip_to, ip); ip++) { 181 for (; ip <= ip_to; ip++) {
182 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) 182 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
183 : port; 183 : port;
184 for (; p <= port_to; p++) { 184 for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 6215fb898c50..a7f4d7a85420 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
185 185
186 if (retried) 186 if (retried)
187 ip = ntohl(h->next.ip); 187 ip = ntohl(h->next.ip);
188 for (; !before(ip_to, ip); ip++) { 188 for (; ip <= ip_to; ip++) {
189 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) 189 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
190 : port; 190 : port;
191 for (; p <= port_to; p++) { 191 for (; p <= port_to; p++) {
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5ab1b99a53c2..a2f19b9906e9 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
271 271
272 if (retried) 272 if (retried)
273 ip = ntohl(h->next.ip); 273 ip = ntohl(h->next.ip);
274 for (; !before(ip_to, ip); ip++) { 274 for (; ip <= ip_to; ip++) {
275 e.ip = htonl(ip); 275 e.ip = htonl(ip);
276 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) 276 p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
277 : port; 277 : port;
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
281 ip == ntohl(h->next.ip) && 281 ip == ntohl(h->next.ip) &&
282 p == ntohs(h->next.port) 282 p == ntohs(h->next.port)
283 ? ntohl(h->next.ip2) : ip2_from; 283 ? ntohl(h->next.ip2) : ip2_from;
284 while (!after(ip2, ip2_to)) { 284 while (ip2 <= ip2_to) {
285 e.ip2 = htonl(ip2); 285 e.ip2 = htonl(ip2);
286 ip2_last = ip_set_range_to_cidr(ip2, ip2_to, 286 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
287 &cidr); 287 &cidr);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 5d9e895452e7..1c67a1761e45 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
193 } 193 }
194 if (retried) 194 if (retried)
195 ip = ntohl(h->next.ip); 195 ip = ntohl(h->next.ip);
196 while (!after(ip, ip_to)) { 196 while (ip <= ip_to) {
197 e.ip = htonl(ip); 197 e.ip = htonl(ip);
198 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); 198 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
199 ret = adtfn(set, &e, &ext, &ext, flags); 199 ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 44cf11939c91..d417074f1c1a 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
255 255
256 if (retried) 256 if (retried)
257 ip = ntohl(h->next.ip); 257 ip = ntohl(h->next.ip);
258 while (!after(ip, ip_to)) { 258 while (ip <= ip_to) {
259 e.ip = htonl(ip); 259 e.ip = htonl(ip);
260 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); 260 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
261 ret = adtfn(set, &e, &ext, &ext, flags); 261 ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index db614e13b193..7f9ae2e9645b 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
250 if (retried) 250 if (retried)
251 ip = ntohl(h->next.ip[0]); 251 ip = ntohl(h->next.ip[0]);
252 252
253 while (!after(ip, ip_to)) { 253 while (ip <= ip_to) {
254 e.ip[0] = htonl(ip); 254 e.ip[0] = htonl(ip);
255 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); 255 last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
256 ip2 = (retried && 256 ip2 = (retried &&
257 ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) 257 ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
258 : ip2_from; 258 : ip2_from;
259 while (!after(ip2, ip2_to)) { 259 while (ip2 <= ip2_to) {
260 e.ip[1] = htonl(ip2); 260 e.ip[1] = htonl(ip2);
261 last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); 261 last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
262 ret = adtfn(set, &e, &ext, &ext, flags); 262 ret = adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 54b64b6cd0cd..e6ef382febe4 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
241 241
242 if (retried) 242 if (retried)
243 ip = ntohl(h->next.ip); 243 ip = ntohl(h->next.ip);
244 while (!after(ip, ip_to)) { 244 while (ip <= ip_to) {
245 e.ip = htonl(ip); 245 e.ip = htonl(ip);
246 last = ip_set_range_to_cidr(ip, ip_to, &cidr); 246 last = ip_set_range_to_cidr(ip, ip_to, &cidr);
247 e.cidr = cidr - 1; 247 e.cidr = cidr - 1;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index aff846960ac4..8602f2595a1a 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
291 if (retried) 291 if (retried)
292 ip = ntohl(h->next.ip[0]); 292 ip = ntohl(h->next.ip[0]);
293 293
294 while (!after(ip, ip_to)) { 294 while (ip <= ip_to) {
295 e.ip[0] = htonl(ip); 295 e.ip[0] = htonl(ip);
296 ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); 296 ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
297 p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) 297 p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
301 ip2 = (retried && ip == ntohl(h->next.ip[0]) && 301 ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
302 p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) 302 p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
303 : ip2_from; 303 : ip2_from;
304 while (!after(ip2, ip2_to)) { 304 while (ip2 <= ip2_to) {
305 e.ip[1] = htonl(ip2); 305 e.ip[1] = htonl(ip2);
306 ip2_last = ip_set_range_to_cidr(ip2, ip2_to, 306 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
307 &e.cidr[1]); 307 &e.cidr[1]);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 90d396814798..4527921b1c3a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
921{ 921{
922 struct sk_buff *new_skb = NULL; 922 struct sk_buff *new_skb = NULL;
923 struct iphdr *old_iph = NULL; 923 struct iphdr *old_iph = NULL;
924 __u8 old_dsfield;
924#ifdef CONFIG_IP_VS_IPV6 925#ifdef CONFIG_IP_VS_IPV6
925 struct ipv6hdr *old_ipv6h = NULL; 926 struct ipv6hdr *old_ipv6h = NULL;
926#endif 927#endif
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
945 *payload_len = 946 *payload_len =
946 ntohs(old_ipv6h->payload_len) + 947 ntohs(old_ipv6h->payload_len) +
947 sizeof(*old_ipv6h); 948 sizeof(*old_ipv6h);
948 *dsfield = ipv6_get_dsfield(old_ipv6h); 949 old_dsfield = ipv6_get_dsfield(old_ipv6h);
949 *ttl = old_ipv6h->hop_limit; 950 *ttl = old_ipv6h->hop_limit;
950 if (df) 951 if (df)
951 *df = 0; 952 *df = 0;
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
960 961
961 /* fix old IP header checksum */ 962 /* fix old IP header checksum */
962 ip_send_check(old_iph); 963 ip_send_check(old_iph);
963 *dsfield = ipv4_get_dsfield(old_iph); 964 old_dsfield = ipv4_get_dsfield(old_iph);
964 *ttl = old_iph->ttl; 965 *ttl = old_iph->ttl;
965 if (payload_len) 966 if (payload_len)
966 *payload_len = ntohs(old_iph->tot_len); 967 *payload_len = ntohs(old_iph->tot_len);
967 } 968 }
968 969
970 /* Implement full-functionality option for ECN encapsulation */
971 *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield);
972
969 return skb; 973 return skb;
970error: 974error:
971 kfree_skb(skb); 975 kfree_skb(skb);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 929927171426..64e1ee091225 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
1048 if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) 1048 if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
1049 goto nla_put_failure; 1049 goto nla_put_failure;
1050 1050
1051 if (nft_dump_stats(skb, nft_base_chain(chain)->stats)) 1051 if (basechain->stats && nft_dump_stats(skb, basechain->stats))
1052 goto nla_put_failure; 1052 goto nla_put_failure;
1053 } 1053 }
1054 1054
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1487 1487
1488 chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], 1488 chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME],
1489 genmask); 1489 genmask);
1490 if (IS_ERR(chain2)) 1490 if (!IS_ERR(chain2))
1491 return PTR_ERR(chain2); 1491 return -EEXIST;
1492 } 1492 }
1493 1493
1494 if (nla[NFTA_CHAIN_COUNTERS]) { 1494 if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2741,8 +2741,10 @@ cont:
2741 list_for_each_entry(i, &ctx->table->sets, list) { 2741 list_for_each_entry(i, &ctx->table->sets, list) {
2742 if (!nft_is_active_next(ctx->net, i)) 2742 if (!nft_is_active_next(ctx->net, i))
2743 continue; 2743 continue;
2744 if (!strcmp(set->name, i->name)) 2744 if (!strcmp(set->name, i->name)) {
2745 kfree(set->name);
2745 return -ENFILE; 2746 return -ENFILE;
2747 }
2746 } 2748 }
2747 return 0; 2749 return 0;
2748} 2750}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c83a3b5e1c6c..d8571f414208 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
892 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) 892 if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
893 return ERR_PTR(-EFAULT); 893 return ERR_PTR(-EFAULT);
894 894
895 strlcpy(info->name, compat_tmp.name, sizeof(info->name)); 895 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
896 info->num_counters = compat_tmp.num_counters; 896 info->num_counters = compat_tmp.num_counters;
897 user += sizeof(compat_tmp); 897 user += sizeof(compat_tmp);
898 } else 898 } else
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
905 if (copy_from_user(info, user, sizeof(*info)) != 0) 905 if (copy_from_user(info, user, sizeof(*info)) != 0)
906 return ERR_PTR(-EFAULT); 906 return ERR_PTR(-EFAULT);
907 907
908 info->name[sizeof(info->name) - 1] = '\0';
909 user += sizeof(*info); 908 user += sizeof(*info);
910 } 909 }
910 info->name[sizeof(info->name) - 1] = '\0';
911 911
912 size = sizeof(struct xt_counters); 912 size = sizeof(struct xt_counters);
913 size *= info->num_counters; 913 size *= info->num_counters;
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 38986a95216c..29123934887b 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/syscalls.h>
11#include <linux/skbuff.h> 12#include <linux/skbuff.h>
12#include <linux/filter.h> 13#include <linux/filter.h>
13#include <linux/bpf.h> 14#include <linux/bpf.h>
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret)
49 return 0; 50 return 0;
50} 51}
51 52
53static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
54{
55 mm_segment_t oldfs = get_fs();
56 int retval, fd;
57
58 set_fs(KERNEL_DS);
59 fd = bpf_obj_get_user(path);
60 set_fs(oldfs);
61 if (fd < 0)
62 return fd;
63
64 retval = __bpf_mt_check_fd(fd, ret);
65 sys_close(fd);
66 return retval;
67}
68
52static int bpf_mt_check(const struct xt_mtchk_param *par) 69static int bpf_mt_check(const struct xt_mtchk_param *par)
53{ 70{
54 struct xt_bpf_info *info = par->matchinfo; 71 struct xt_bpf_info *info = par->matchinfo;
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par)
66 return __bpf_mt_check_bytecode(info->bpf_program, 83 return __bpf_mt_check_bytecode(info->bpf_program,
67 info->bpf_program_num_elem, 84 info->bpf_program_num_elem,
68 &info->filter); 85 &info->filter);
69 else if (info->mode == XT_BPF_MODE_FD_PINNED || 86 else if (info->mode == XT_BPF_MODE_FD_ELF)
70 info->mode == XT_BPF_MODE_FD_ELF)
71 return __bpf_mt_check_fd(info->fd, &info->filter); 87 return __bpf_mt_check_fd(info->fd, &info->filter);
88 else if (info->mode == XT_BPF_MODE_PATH_PINNED)
89 return __bpf_mt_check_path(info->path, &info->filter);
72 else 90 else
73 return -EINVAL; 91 return -EINVAL;
74} 92}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index e75ef39669c5..575d2153e3b8 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
76 transparent = nf_sk_is_transparent(sk); 76 transparent = nf_sk_is_transparent(sk);
77 77
78 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && 78 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
79 transparent) 79 transparent && sk_fullsock(sk))
80 pskb->mark = sk->sk_mark; 80 pskb->mark = sk->sk_mark;
81 81
82 if (sk != skb->sk) 82 if (sk != skb->sk)
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
133 transparent = nf_sk_is_transparent(sk); 133 transparent = nf_sk_is_transparent(sk);
134 134
135 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && 135 if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
136 transparent) 136 transparent && sk_fullsock(sk))
137 pskb->mark = sk->sk_mark; 137 pskb->mark = sk->sk_mark;
138 138
139 if (sk != skb->sk) 139 if (sk != skb->sk)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 327807731b44..b93148e8e9fb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2266,14 +2266,18 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2266 cb->min_dump_alloc = control->min_dump_alloc; 2266 cb->min_dump_alloc = control->min_dump_alloc;
2267 cb->skb = skb; 2267 cb->skb = skb;
2268 2268
2269 if (cb->start) {
2270 ret = cb->start(cb);
2271 if (ret)
2272 goto error_unlock;
2273 }
2274
2269 nlk->cb_running = true; 2275 nlk->cb_running = true;
2270 2276
2271 mutex_unlock(nlk->cb_mutex); 2277 mutex_unlock(nlk->cb_mutex);
2272 2278
2273 if (cb->start)
2274 cb->start(cb);
2275
2276 ret = netlink_dump(sk); 2279 ret = netlink_dump(sk);
2280
2277 sock_put(sk); 2281 sock_put(sk);
2278 2282
2279 if (ret) 2283 if (ret)
@@ -2303,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2303 size_t tlvlen = 0; 2307 size_t tlvlen = 0;
2304 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); 2308 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2305 unsigned int flags = 0; 2309 unsigned int flags = 0;
2310 bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
2306 2311
2307 /* Error messages get the original request appened, unless the user 2312 /* Error messages get the original request appened, unless the user
2308 * requests to cap the error message, and get extra error data if 2313 * requests to cap the error message, and get extra error data if
@@ -2313,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2313 payload += nlmsg_len(nlh); 2318 payload += nlmsg_len(nlh);
2314 else 2319 else
2315 flags |= NLM_F_CAPPED; 2320 flags |= NLM_F_CAPPED;
2316 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2321 if (nlk_has_extack && extack) {
2317 if (extack->_msg) 2322 if (extack->_msg)
2318 tlvlen += nla_total_size(strlen(extack->_msg) + 1); 2323 tlvlen += nla_total_size(strlen(extack->_msg) + 1);
2319 if (extack->bad_attr) 2324 if (extack->bad_attr)
@@ -2322,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2322 } else { 2327 } else {
2323 flags |= NLM_F_CAPPED; 2328 flags |= NLM_F_CAPPED;
2324 2329
2325 if (nlk->flags & NETLINK_F_EXT_ACK && 2330 if (nlk_has_extack && extack && extack->cookie_len)
2326 extack && extack->cookie_len)
2327 tlvlen += nla_total_size(extack->cookie_len); 2331 tlvlen += nla_total_size(extack->cookie_len);
2328 } 2332 }
2329 2333
@@ -2351,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
2351 errmsg->error = err; 2355 errmsg->error = err;
2352 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); 2356 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2353 2357
2354 if (nlk->flags & NETLINK_F_EXT_ACK && extack) { 2358 if (nlk_has_extack && extack) {
2355 if (err) { 2359 if (err) {
2356 if (extack->_msg) 2360 if (extack->_msg)
2357 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, 2361 WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d288f52c53f7..2986941164b1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1769,7 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1769 1769
1770out: 1770out:
1771 if (err && rollover) { 1771 if (err && rollover) {
1772 kfree(rollover); 1772 kfree_rcu(rollover, rcu);
1773 po->rollover = NULL; 1773 po->rollover = NULL;
1774 } 1774 }
1775 mutex_unlock(&fanout_mutex); 1775 mutex_unlock(&fanout_mutex);
@@ -1796,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
1796 else 1796 else
1797 f = NULL; 1797 f = NULL;
1798 1798
1799 if (po->rollover) 1799 if (po->rollover) {
1800 kfree_rcu(po->rollover, rcu); 1800 kfree_rcu(po->rollover, rcu);
1801 po->rollover = NULL;
1802 }
1801 } 1803 }
1802 mutex_unlock(&fanout_mutex); 1804 mutex_unlock(&fanout_mutex);
1803 1805
@@ -2840,6 +2842,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2840 struct virtio_net_hdr vnet_hdr = { 0 }; 2842 struct virtio_net_hdr vnet_hdr = { 0 };
2841 int offset = 0; 2843 int offset = 0;
2842 struct packet_sock *po = pkt_sk(sk); 2844 struct packet_sock *po = pkt_sk(sk);
2845 bool has_vnet_hdr = false;
2843 int hlen, tlen, linear; 2846 int hlen, tlen, linear;
2844 int extra_len = 0; 2847 int extra_len = 0;
2845 2848
@@ -2883,6 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2883 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); 2886 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2884 if (err) 2887 if (err)
2885 goto out_unlock; 2888 goto out_unlock;
2889 has_vnet_hdr = true;
2886 } 2890 }
2887 2891
2888 if (unlikely(sock_flag(sk, SOCK_NOFCS))) { 2892 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2941,7 +2945,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2941 skb->priority = sk->sk_priority; 2945 skb->priority = sk->sk_priority;
2942 skb->mark = sockc.mark; 2946 skb->mark = sockc.mark;
2943 2947
2944 if (po->has_vnet_hdr) { 2948 if (has_vnet_hdr) {
2945 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); 2949 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2946 if (err) 2950 if (err)
2947 goto out_free; 2951 goto out_free;
@@ -3069,13 +3073,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3069 int ret = 0; 3073 int ret = 0;
3070 bool unlisted = false; 3074 bool unlisted = false;
3071 3075
3072 if (po->fanout)
3073 return -EINVAL;
3074
3075 lock_sock(sk); 3076 lock_sock(sk);
3076 spin_lock(&po->bind_lock); 3077 spin_lock(&po->bind_lock);
3077 rcu_read_lock(); 3078 rcu_read_lock();
3078 3079
3080 if (po->fanout) {
3081 ret = -EINVAL;
3082 goto out_unlock;
3083 }
3084
3079 if (name) { 3085 if (name) {
3080 dev = dev_get_by_name_rcu(sock_net(sk), name); 3086 dev = dev_get_by_name_rcu(sock_net(sk), name);
3081 if (!dev) { 3087 if (!dev) {
@@ -3847,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3847 void *data = &val; 3853 void *data = &val;
3848 union tpacket_stats_u st; 3854 union tpacket_stats_u st;
3849 struct tpacket_rollover_stats rstats; 3855 struct tpacket_rollover_stats rstats;
3856 struct packet_rollover *rollover;
3850 3857
3851 if (level != SOL_PACKET) 3858 if (level != SOL_PACKET)
3852 return -ENOPROTOOPT; 3859 return -ENOPROTOOPT;
@@ -3925,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3925 0); 3932 0);
3926 break; 3933 break;
3927 case PACKET_ROLLOVER_STATS: 3934 case PACKET_ROLLOVER_STATS:
3928 if (!po->rollover) 3935 rcu_read_lock();
3936 rollover = rcu_dereference(po->rollover);
3937 if (rollover) {
3938 rstats.tp_all = atomic_long_read(&rollover->num);
3939 rstats.tp_huge = atomic_long_read(&rollover->num_huge);
3940 rstats.tp_failed = atomic_long_read(&rollover->num_failed);
3941 data = &rstats;
3942 lv = sizeof(rstats);
3943 }
3944 rcu_read_unlock();
3945 if (!rollover)
3929 return -EINVAL; 3946 return -EINVAL;
3930 rstats.tp_all = atomic_long_read(&po->rollover->num);
3931 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3932 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3933 data = &rstats;
3934 lv = sizeof(rstats);
3935 break; 3947 break;
3936 case PACKET_TX_HAS_OFF: 3948 case PACKET_TX_HAS_OFF:
3937 val = po->tp_tx_has_off; 3949 val = po->tp_tx_has_off;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 3a6ad0f438dc..64f95624f219 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -296,6 +296,6 @@ static void __exit psample_module_exit(void)
296module_init(psample_module_init); 296module_init(psample_module_init);
297module_exit(psample_module_exit); 297module_exit(psample_module_exit);
298 298
299MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 299MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
300MODULE_DESCRIPTION("netlink channel for packet sampling"); 300MODULE_DESCRIPTION("netlink channel for packet sampling");
301MODULE_LICENSE("GPL v2"); 301MODULE_LICENSE("GPL v2");
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 6ab39dbcca01..8557a1cae041 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
661 } 661 }
662 } 662 }
663 663
664 rds_ib_set_wr_signal_state(ic, send, 0); 664 rds_ib_set_wr_signal_state(ic, send, false);
665 665
666 /* 666 /*
667 * Always signal the last one if we're stopping due to flow control. 667 * Always signal the last one if we're stopping due to flow control.
668 */ 668 */
669 if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) 669 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
670 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; 670 rds_ib_set_wr_signal_state(ic, send, true);
671 send->s_wr.send_flags |= IB_SEND_SOLICITED;
672 }
671 673
672 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 674 if (send->s_wr.send_flags & IB_SEND_SIGNALED)
673 nr_sig++; 675 nr_sig++;
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
705 if (scat == &rm->data.op_sg[rm->data.op_count]) { 707 if (scat == &rm->data.op_sg[rm->data.op_count]) {
706 prev->s_op = ic->i_data_op; 708 prev->s_op = ic->i_data_op;
707 prev->s_wr.send_flags |= IB_SEND_SOLICITED; 709 prev->s_wr.send_flags |= IB_SEND_SOLICITED;
708 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) { 710 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
709 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 711 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
710 prev->s_wr.send_flags |= IB_SEND_SIGNALED;
711 nr_sig++;
712 }
713 ic->i_data_op = NULL; 712 ic->i_data_op = NULL;
714 } 713 }
715 714
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
792 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; 791 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
793 send->s_atomic_wr.swap_mask = 0; 792 send->s_atomic_wr.swap_mask = 0;
794 } 793 }
794 send->s_wr.send_flags = 0;
795 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 795 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
796 send->s_atomic_wr.wr.num_sge = 1; 796 send->s_atomic_wr.wr.num_sge = 1;
797 send->s_atomic_wr.wr.next = NULL; 797 send->s_atomic_wr.wr.next = NULL;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index fb17552fd292..4b0a8288c98a 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
308 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, 308 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
309 gfp); 309 gfp);
310 /* The socket has been unlocked. */ 310 /* The socket has been unlocked. */
311 if (!IS_ERR(call)) 311 if (!IS_ERR(call)) {
312 call->notify_rx = notify_rx; 312 call->notify_rx = notify_rx;
313 mutex_unlock(&call->user_mutex);
314 }
313 315
314 mutex_unlock(&call->user_mutex);
315 _leave(" = %p", call); 316 _leave(" = %p", call);
316 return call; 317 return call;
317} 318}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ec986ae52808..8b5abcd2f32f 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -264,12 +264,13 @@ static int __init sample_init_module(void)
264 264
265static void __exit sample_cleanup_module(void) 265static void __exit sample_cleanup_module(void)
266{ 266{
267 rcu_barrier();
267 tcf_unregister_action(&act_sample_ops, &sample_net_ops); 268 tcf_unregister_action(&act_sample_ops, &sample_net_ops);
268} 269}
269 270
270module_init(sample_init_module); 271module_init(sample_init_module);
271module_exit(sample_cleanup_module); 272module_exit(sample_cleanup_module);
272 273
273MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); 274MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
274MODULE_DESCRIPTION("Packet sampling action"); 275MODULE_DESCRIPTION("Packet sampling action");
275MODULE_LICENSE("GPL v2"); 276MODULE_LICENSE("GPL v2");
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0b2219adf520..b2d310745487 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -77,6 +77,8 @@ out:
77} 77}
78EXPORT_SYMBOL(register_tcf_proto_ops); 78EXPORT_SYMBOL(register_tcf_proto_ops);
79 79
80static struct workqueue_struct *tc_filter_wq;
81
80int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 82int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
81{ 83{
82 struct tcf_proto_ops *t; 84 struct tcf_proto_ops *t;
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
86 * tcf_proto_ops's destroy() handler. 88 * tcf_proto_ops's destroy() handler.
87 */ 89 */
88 rcu_barrier(); 90 rcu_barrier();
91 flush_workqueue(tc_filter_wq);
89 92
90 write_lock(&cls_mod_lock); 93 write_lock(&cls_mod_lock);
91 list_for_each_entry(t, &tcf_proto_base, head) { 94 list_for_each_entry(t, &tcf_proto_base, head) {
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
100} 103}
101EXPORT_SYMBOL(unregister_tcf_proto_ops); 104EXPORT_SYMBOL(unregister_tcf_proto_ops);
102 105
106bool tcf_queue_work(struct work_struct *work)
107{
108 return queue_work(tc_filter_wq, work);
109}
110EXPORT_SYMBOL(tcf_queue_work);
111
103/* Select new prio value from the range, managed by kernel. */ 112/* Select new prio value from the range, managed by kernel. */
104 113
105static inline u32 tcf_auto_prio(struct tcf_proto *tp) 114static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@@ -266,6 +275,23 @@ err_chain_create:
266} 275}
267EXPORT_SYMBOL(tcf_block_get); 276EXPORT_SYMBOL(tcf_block_get);
268 277
278static void tcf_block_put_final(struct work_struct *work)
279{
280 struct tcf_block *block = container_of(work, struct tcf_block, work);
281 struct tcf_chain *chain, *tmp;
282
283 rtnl_lock();
284 /* Only chain 0 should be still here. */
285 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
286 tcf_chain_put(chain);
287 rtnl_unlock();
288 kfree(block);
289}
290
291/* XXX: Standalone actions are not allowed to jump to any chain, and bound
292 * actions should be all removed after flushing. However, filters are now
293 * destroyed in tc filter workqueue with RTNL lock, they can not race here.
294 */
269void tcf_block_put(struct tcf_block *block) 295void tcf_block_put(struct tcf_block *block)
270{ 296{
271 struct tcf_chain *chain, *tmp; 297 struct tcf_chain *chain, *tmp;
@@ -273,32 +299,15 @@ void tcf_block_put(struct tcf_block *block)
273 if (!block) 299 if (!block)
274 return; 300 return;
275 301
276 /* XXX: Standalone actions are not allowed to jump to any chain, and 302 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
277 * bound actions should be all removed after flushing. However,
278 * filters are destroyed in RCU callbacks, we have to hold the chains
279 * first, otherwise we would always race with RCU callbacks on this list
280 * without proper locking.
281 */
282
283 /* Wait for existing RCU callbacks to cool down. */
284 rcu_barrier();
285
286 /* Hold a refcnt for all chains, except 0, in case they are gone. */
287 list_for_each_entry(chain, &block->chain_list, list)
288 if (chain->index)
289 tcf_chain_hold(chain);
290
291 /* No race on the list, because no chain could be destroyed. */
292 list_for_each_entry(chain, &block->chain_list, list)
293 tcf_chain_flush(chain); 303 tcf_chain_flush(chain);
294 304
295 /* Wait for RCU callbacks to release the reference count. */ 305 INIT_WORK(&block->work, tcf_block_put_final);
306 /* Wait for RCU callbacks to release the reference count and make
307 * sure their works have been queued before this.
308 */
296 rcu_barrier(); 309 rcu_barrier();
297 310 tcf_queue_work(&block->work);
298 /* At this point, all the chains should have refcnt == 1. */
299 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
300 tcf_chain_put(chain);
301 kfree(block);
302} 311}
303EXPORT_SYMBOL(tcf_block_put); 312EXPORT_SYMBOL(tcf_block_put);
304 313
@@ -879,6 +888,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
879#ifdef CONFIG_NET_CLS_ACT 888#ifdef CONFIG_NET_CLS_ACT
880 LIST_HEAD(actions); 889 LIST_HEAD(actions);
881 890
891 ASSERT_RTNL();
882 tcf_exts_to_list(exts, &actions); 892 tcf_exts_to_list(exts, &actions);
883 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 893 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
884 kfree(exts->actions); 894 kfree(exts->actions);
@@ -1030,6 +1040,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
1030 1040
1031static int __init tc_filter_init(void) 1041static int __init tc_filter_init(void)
1032{ 1042{
1043 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1044 if (!tc_filter_wq)
1045 return -ENOMEM;
1046
1033 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1047 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1034 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1048 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1035 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1049 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d89ebafd2239..f177649a2419 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -34,7 +34,10 @@ struct basic_filter {
34 struct tcf_result res; 34 struct tcf_result res;
35 struct tcf_proto *tp; 35 struct tcf_proto *tp;
36 struct list_head link; 36 struct list_head link;
37 struct rcu_head rcu; 37 union {
38 struct work_struct work;
39 struct rcu_head rcu;
40 };
38}; 41};
39 42
40static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, 43static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -82,15 +85,26 @@ static int basic_init(struct tcf_proto *tp)
82 return 0; 85 return 0;
83} 86}
84 87
85static void basic_delete_filter(struct rcu_head *head) 88static void basic_delete_filter_work(struct work_struct *work)
86{ 89{
87 struct basic_filter *f = container_of(head, struct basic_filter, rcu); 90 struct basic_filter *f = container_of(work, struct basic_filter, work);
88 91
92 rtnl_lock();
89 tcf_exts_destroy(&f->exts); 93 tcf_exts_destroy(&f->exts);
90 tcf_em_tree_destroy(&f->ematches); 94 tcf_em_tree_destroy(&f->ematches);
95 rtnl_unlock();
96
91 kfree(f); 97 kfree(f);
92} 98}
93 99
100static void basic_delete_filter(struct rcu_head *head)
101{
102 struct basic_filter *f = container_of(head, struct basic_filter, rcu);
103
104 INIT_WORK(&f->work, basic_delete_filter_work);
105 tcf_queue_work(&f->work);
106}
107
94static void basic_destroy(struct tcf_proto *tp) 108static void basic_destroy(struct tcf_proto *tp)
95{ 109{
96 struct basic_head *head = rtnl_dereference(tp->root); 110 struct basic_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 520c5027646a..037a3ae86829 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -49,7 +49,10 @@ struct cls_bpf_prog {
49 struct sock_filter *bpf_ops; 49 struct sock_filter *bpf_ops;
50 const char *bpf_name; 50 const char *bpf_name;
51 struct tcf_proto *tp; 51 struct tcf_proto *tp;
52 struct rcu_head rcu; 52 union {
53 struct work_struct work;
54 struct rcu_head rcu;
55 };
53}; 56};
54 57
55static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 58static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
257 kfree(prog); 260 kfree(prog);
258} 261}
259 262
263static void cls_bpf_delete_prog_work(struct work_struct *work)
264{
265 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
266
267 rtnl_lock();
268 __cls_bpf_delete_prog(prog);
269 rtnl_unlock();
270}
271
260static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 272static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
261{ 273{
262 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu)); 274 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
275
276 INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
277 tcf_queue_work(&prog->work);
263} 278}
264 279
265static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) 280static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d48452f87975..a97e069bee89 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,7 +23,10 @@ struct cls_cgroup_head {
23 struct tcf_exts exts; 23 struct tcf_exts exts;
24 struct tcf_ematch_tree ematches; 24 struct tcf_ematch_tree ematches;
25 struct tcf_proto *tp; 25 struct tcf_proto *tp;
26 struct rcu_head rcu; 26 union {
27 struct work_struct work;
28 struct rcu_head rcu;
29 };
27}; 30};
28 31
29static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, 32static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
57 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, 60 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
58}; 61};
59 62
63static void cls_cgroup_destroy_work(struct work_struct *work)
64{
65 struct cls_cgroup_head *head = container_of(work,
66 struct cls_cgroup_head,
67 work);
68 rtnl_lock();
69 tcf_exts_destroy(&head->exts);
70 tcf_em_tree_destroy(&head->ematches);
71 kfree(head);
72 rtnl_unlock();
73}
74
60static void cls_cgroup_destroy_rcu(struct rcu_head *root) 75static void cls_cgroup_destroy_rcu(struct rcu_head *root)
61{ 76{
62 struct cls_cgroup_head *head = container_of(root, 77 struct cls_cgroup_head *head = container_of(root,
63 struct cls_cgroup_head, 78 struct cls_cgroup_head,
64 rcu); 79 rcu);
65 80
66 tcf_exts_destroy(&head->exts); 81 INIT_WORK(&head->work, cls_cgroup_destroy_work);
67 tcf_em_tree_destroy(&head->ematches); 82 tcf_queue_work(&head->work);
68 kfree(head);
69} 83}
70 84
71static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, 85static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2a3a60ec5b86..67f3a2af6aab 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -57,7 +57,10 @@ struct flow_filter {
57 u32 divisor; 57 u32 divisor;
58 u32 baseclass; 58 u32 baseclass;
59 u32 hashrnd; 59 u32 hashrnd;
60 struct rcu_head rcu; 60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
61}; 64};
62 65
63static inline u32 addr_fold(void *addr) 66static inline u32 addr_fold(void *addr)
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
369 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 372 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
370}; 373};
371 374
372static void flow_destroy_filter(struct rcu_head *head) 375static void flow_destroy_filter_work(struct work_struct *work)
373{ 376{
374 struct flow_filter *f = container_of(head, struct flow_filter, rcu); 377 struct flow_filter *f = container_of(work, struct flow_filter, work);
375 378
379 rtnl_lock();
376 del_timer_sync(&f->perturb_timer); 380 del_timer_sync(&f->perturb_timer);
377 tcf_exts_destroy(&f->exts); 381 tcf_exts_destroy(&f->exts);
378 tcf_em_tree_destroy(&f->ematches); 382 tcf_em_tree_destroy(&f->ematches);
379 kfree(f); 383 kfree(f);
384 rtnl_unlock();
385}
386
387static void flow_destroy_filter(struct rcu_head *head)
388{
389 struct flow_filter *f = container_of(head, struct flow_filter, rcu);
390
391 INIT_WORK(&f->work, flow_destroy_filter_work);
392 tcf_queue_work(&f->work);
380} 393}
381 394
382static int flow_change(struct net *net, struct sk_buff *in_skb, 395static int flow_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d230cb4c8094..5b5722c8b32c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -87,7 +87,10 @@ struct cls_fl_filter {
87 struct list_head list; 87 struct list_head list;
88 u32 handle; 88 u32 handle;
89 u32 flags; 89 u32 flags;
90 struct rcu_head rcu; 90 union {
91 struct work_struct work;
92 struct rcu_head rcu;
93 };
91 struct net_device *hw_dev; 94 struct net_device *hw_dev;
92}; 95};
93 96
@@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
215 return 0; 218 return 0;
216} 219}
217 220
218static void fl_destroy_filter(struct rcu_head *head) 221static void fl_destroy_filter_work(struct work_struct *work)
219{ 222{
220 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); 223 struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
221 224
225 rtnl_lock();
222 tcf_exts_destroy(&f->exts); 226 tcf_exts_destroy(&f->exts);
223 kfree(f); 227 kfree(f);
228 rtnl_unlock();
229}
230
231static void fl_destroy_filter(struct rcu_head *head)
232{
233 struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
234
235 INIT_WORK(&f->work, fl_destroy_filter_work);
236 tcf_queue_work(&f->work);
224} 237}
225 238
226static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) 239static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -234,6 +247,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
234 tc_cls_common_offload_init(&cls_flower.common, tp); 247 tc_cls_common_offload_init(&cls_flower.common, tp);
235 cls_flower.command = TC_CLSFLOWER_DESTROY; 248 cls_flower.command = TC_CLSFLOWER_DESTROY;
236 cls_flower.cookie = (unsigned long) f; 249 cls_flower.cookie = (unsigned long) f;
250 cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
237 251
238 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); 252 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
239} 253}
@@ -289,6 +303,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
289 cls_flower.command = TC_CLSFLOWER_STATS; 303 cls_flower.command = TC_CLSFLOWER_STATS;
290 cls_flower.cookie = (unsigned long) f; 304 cls_flower.cookie = (unsigned long) f;
291 cls_flower.exts = &f->exts; 305 cls_flower.exts = &f->exts;
306 cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
292 307
293 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, 308 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
294 &cls_flower); 309 &cls_flower);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 941245ad07fd..99183b8621ec 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -46,7 +46,10 @@ struct fw_filter {
46#endif /* CONFIG_NET_CLS_IND */ 46#endif /* CONFIG_NET_CLS_IND */
47 struct tcf_exts exts; 47 struct tcf_exts exts;
48 struct tcf_proto *tp; 48 struct tcf_proto *tp;
49 struct rcu_head rcu; 49 union {
50 struct work_struct work;
51 struct rcu_head rcu;
52 };
50}; 53};
51 54
52static u32 fw_hash(u32 handle) 55static u32 fw_hash(u32 handle)
@@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
119 return 0; 122 return 0;
120} 123}
121 124
122static void fw_delete_filter(struct rcu_head *head) 125static void fw_delete_filter_work(struct work_struct *work)
123{ 126{
124 struct fw_filter *f = container_of(head, struct fw_filter, rcu); 127 struct fw_filter *f = container_of(work, struct fw_filter, work);
125 128
129 rtnl_lock();
126 tcf_exts_destroy(&f->exts); 130 tcf_exts_destroy(&f->exts);
127 kfree(f); 131 kfree(f);
132 rtnl_unlock();
133}
134
135static void fw_delete_filter(struct rcu_head *head)
136{
137 struct fw_filter *f = container_of(head, struct fw_filter, rcu);
138
139 INIT_WORK(&f->work, fw_delete_filter_work);
140 tcf_queue_work(&f->work);
128} 141}
129 142
130static void fw_destroy(struct tcf_proto *tp) 143static void fw_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index eeac606c95ab..c33f711b9019 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,7 +21,10 @@ struct cls_mall_head {
21 struct tcf_result res; 21 struct tcf_result res;
22 u32 handle; 22 u32 handle;
23 u32 flags; 23 u32 flags;
24 struct rcu_head rcu; 24 union {
25 struct work_struct work;
26 struct rcu_head rcu;
27 };
25}; 28};
26 29
27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, 30static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
41 return 0; 44 return 0;
42} 45}
43 46
47static void mall_destroy_work(struct work_struct *work)
48{
49 struct cls_mall_head *head = container_of(work, struct cls_mall_head,
50 work);
51 rtnl_lock();
52 tcf_exts_destroy(&head->exts);
53 kfree(head);
54 rtnl_unlock();
55}
56
44static void mall_destroy_rcu(struct rcu_head *rcu) 57static void mall_destroy_rcu(struct rcu_head *rcu)
45{ 58{
46 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, 59 struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
47 rcu); 60 rcu);
48 61
49 tcf_exts_destroy(&head->exts); 62 INIT_WORK(&head->work, mall_destroy_work);
50 kfree(head); 63 tcf_queue_work(&head->work);
51} 64}
52 65
53static int mall_replace_hw_filter(struct tcf_proto *tp, 66static int mall_replace_hw_filter(struct tcf_proto *tp,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 9ddde65915d2..4b14ccd8b8f2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -57,7 +57,10 @@ struct route4_filter {
57 u32 handle; 57 u32 handle;
58 struct route4_bucket *bkt; 58 struct route4_bucket *bkt;
59 struct tcf_proto *tp; 59 struct tcf_proto *tp;
60 struct rcu_head rcu; 60 union {
61 struct work_struct work;
62 struct rcu_head rcu;
63 };
61}; 64};
62 65
63#define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) 66#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
254 return 0; 257 return 0;
255} 258}
256 259
257static void route4_delete_filter(struct rcu_head *head) 260static void route4_delete_filter_work(struct work_struct *work)
258{ 261{
259 struct route4_filter *f = container_of(head, struct route4_filter, rcu); 262 struct route4_filter *f = container_of(work, struct route4_filter, work);
260 263
264 rtnl_lock();
261 tcf_exts_destroy(&f->exts); 265 tcf_exts_destroy(&f->exts);
262 kfree(f); 266 kfree(f);
267 rtnl_unlock();
268}
269
270static void route4_delete_filter(struct rcu_head *head)
271{
272 struct route4_filter *f = container_of(head, struct route4_filter, rcu);
273
274 INIT_WORK(&f->work, route4_delete_filter_work);
275 tcf_queue_work(&f->work);
263} 276}
264 277
265static void route4_destroy(struct tcf_proto *tp) 278static void route4_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b1f6ed48bc72..bdbc541787f8 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -97,7 +97,10 @@ struct rsvp_filter {
97 97
98 u32 handle; 98 u32 handle;
99 struct rsvp_session *sess; 99 struct rsvp_session *sess;
100 struct rcu_head rcu; 100 union {
101 struct work_struct work;
102 struct rcu_head rcu;
103 };
101}; 104};
102 105
103static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) 106static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
282 return -ENOBUFS; 285 return -ENOBUFS;
283} 286}
284 287
285static void rsvp_delete_filter_rcu(struct rcu_head *head) 288static void rsvp_delete_filter_work(struct work_struct *work)
286{ 289{
287 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu); 290 struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
288 291
292 rtnl_lock();
289 tcf_exts_destroy(&f->exts); 293 tcf_exts_destroy(&f->exts);
290 kfree(f); 294 kfree(f);
295 rtnl_unlock();
296}
297
298static void rsvp_delete_filter_rcu(struct rcu_head *head)
299{
300 struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
301
302 INIT_WORK(&f->work, rsvp_delete_filter_work);
303 tcf_queue_work(&f->work);
291} 304}
292 305
293static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) 306static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 14a7e08b2fa9..beaa95e09c25 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,14 +27,20 @@
27struct tcindex_filter_result { 27struct tcindex_filter_result {
28 struct tcf_exts exts; 28 struct tcf_exts exts;
29 struct tcf_result res; 29 struct tcf_result res;
30 struct rcu_head rcu; 30 union {
31 struct work_struct work;
32 struct rcu_head rcu;
33 };
31}; 34};
32 35
33struct tcindex_filter { 36struct tcindex_filter {
34 u16 key; 37 u16 key;
35 struct tcindex_filter_result result; 38 struct tcindex_filter_result result;
36 struct tcindex_filter __rcu *next; 39 struct tcindex_filter __rcu *next;
37 struct rcu_head rcu; 40 union {
41 struct work_struct work;
42 struct rcu_head rcu;
43 };
38}; 44};
39 45
40 46
@@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
133 return 0; 139 return 0;
134} 140}
135 141
142static void tcindex_destroy_rexts_work(struct work_struct *work)
143{
144 struct tcindex_filter_result *r;
145
146 r = container_of(work, struct tcindex_filter_result, work);
147 rtnl_lock();
148 tcf_exts_destroy(&r->exts);
149 rtnl_unlock();
150}
151
136static void tcindex_destroy_rexts(struct rcu_head *head) 152static void tcindex_destroy_rexts(struct rcu_head *head)
137{ 153{
138 struct tcindex_filter_result *r; 154 struct tcindex_filter_result *r;
139 155
140 r = container_of(head, struct tcindex_filter_result, rcu); 156 r = container_of(head, struct tcindex_filter_result, rcu);
141 tcf_exts_destroy(&r->exts); 157 INIT_WORK(&r->work, tcindex_destroy_rexts_work);
158 tcf_queue_work(&r->work);
159}
160
161static void tcindex_destroy_fexts_work(struct work_struct *work)
162{
163 struct tcindex_filter *f = container_of(work, struct tcindex_filter,
164 work);
165
166 rtnl_lock();
167 tcf_exts_destroy(&f->result.exts);
168 kfree(f);
169 rtnl_unlock();
142} 170}
143 171
144static void tcindex_destroy_fexts(struct rcu_head *head) 172static void tcindex_destroy_fexts(struct rcu_head *head)
@@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
146 struct tcindex_filter *f = container_of(head, struct tcindex_filter, 174 struct tcindex_filter *f = container_of(head, struct tcindex_filter,
147 rcu); 175 rcu);
148 176
149 tcf_exts_destroy(&f->result.exts); 177 INIT_WORK(&f->work, tcindex_destroy_fexts_work);
150 kfree(f); 178 tcf_queue_work(&f->work);
151} 179}
152 180
153static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) 181static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 10b8d851fc6b..dadd1b344497 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -68,7 +68,10 @@ struct tc_u_knode {
68 u32 __percpu *pcpu_success; 68 u32 __percpu *pcpu_success;
69#endif 69#endif
70 struct tcf_proto *tp; 70 struct tcf_proto *tp;
71 struct rcu_head rcu; 71 union {
72 struct work_struct work;
73 struct rcu_head rcu;
74 };
72 /* The 'sel' field MUST be the last field in structure to allow for 75 /* The 'sel' field MUST be the last field in structure to allow for
73 * tc_u32_keys allocated at end of structure. 76 * tc_u32_keys allocated at end of structure.
74 */ 77 */
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
418 * this the u32_delete_key_rcu variant does not free the percpu 421 * this the u32_delete_key_rcu variant does not free the percpu
419 * statistics. 422 * statistics.
420 */ 423 */
424static void u32_delete_key_work(struct work_struct *work)
425{
426 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
427
428 rtnl_lock();
429 u32_destroy_key(key->tp, key, false);
430 rtnl_unlock();
431}
432
421static void u32_delete_key_rcu(struct rcu_head *rcu) 433static void u32_delete_key_rcu(struct rcu_head *rcu)
422{ 434{
423 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 435 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
424 436
425 u32_destroy_key(key->tp, key, false); 437 INIT_WORK(&key->work, u32_delete_key_work);
438 tcf_queue_work(&key->work);
426} 439}
427 440
428/* u32_delete_key_freepf_rcu is the rcu callback variant 441/* u32_delete_key_freepf_rcu is the rcu callback variant
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
432 * for the variant that should be used with keys return from 445 * for the variant that should be used with keys return from
433 * u32_init_knode() 446 * u32_init_knode()
434 */ 447 */
448static void u32_delete_key_freepf_work(struct work_struct *work)
449{
450 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
451
452 rtnl_lock();
453 u32_destroy_key(key->tp, key, true);
454 rtnl_unlock();
455}
456
435static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) 457static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
436{ 458{
437 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); 459 struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
438 460
439 u32_destroy_key(key->tp, key, true); 461 INIT_WORK(&key->work, u32_delete_key_freepf_work);
462 tcf_queue_work(&key->work);
440} 463}
441 464
442static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) 465static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c6deb74e3d2f..22bc6fc48311 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
301{ 301{
302 struct Qdisc *q; 302 struct Qdisc *q;
303 303
304 if (!handle)
305 return NULL;
304 q = qdisc_match_from_root(dev->qdisc, handle); 306 q = qdisc_match_from_root(dev->qdisc, handle);
305 if (q) 307 if (q)
306 goto out; 308 goto out;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 92a07141fd07..621b5ca3fd1c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
421{ 421{
422 struct dst_entry *dst; 422 struct dst_entry *dst;
423 423
424 if (!t) 424 if (sock_owned_by_user(sk) || !t)
425 return; 425 return;
426 dst = sctp_transport_dst_check(t); 426 dst = sctp_transport_dst_check(t);
427 if (dst) 427 if (dst)
@@ -794,7 +794,7 @@ hit:
794struct sctp_hash_cmp_arg { 794struct sctp_hash_cmp_arg {
795 const union sctp_addr *paddr; 795 const union sctp_addr *paddr;
796 const struct net *net; 796 const struct net *net;
797 u16 lport; 797 __be16 lport;
798}; 798};
799 799
800static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, 800static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -820,37 +820,37 @@ out:
820 return err; 820 return err;
821} 821}
822 822
823static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed) 823static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
824{ 824{
825 const struct sctp_transport *t = data; 825 const struct sctp_transport *t = data;
826 const union sctp_addr *paddr = &t->ipaddr; 826 const union sctp_addr *paddr = &t->ipaddr;
827 const struct net *net = sock_net(t->asoc->base.sk); 827 const struct net *net = sock_net(t->asoc->base.sk);
828 u16 lport = htons(t->asoc->base.bind_addr.port); 828 __be16 lport = htons(t->asoc->base.bind_addr.port);
829 u32 addr; 829 __u32 addr;
830 830
831 if (paddr->sa.sa_family == AF_INET6) 831 if (paddr->sa.sa_family == AF_INET6)
832 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 832 addr = jhash(&paddr->v6.sin6_addr, 16, seed);
833 else 833 else
834 addr = paddr->v4.sin_addr.s_addr; 834 addr = (__force __u32)paddr->v4.sin_addr.s_addr;
835 835
836 return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 836 return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
837 (__force __u32)lport, net_hash_mix(net), seed); 837 (__force __u32)lport, net_hash_mix(net), seed);
838} 838}
839 839
840static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed) 840static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
841{ 841{
842 const struct sctp_hash_cmp_arg *x = data; 842 const struct sctp_hash_cmp_arg *x = data;
843 const union sctp_addr *paddr = x->paddr; 843 const union sctp_addr *paddr = x->paddr;
844 const struct net *net = x->net; 844 const struct net *net = x->net;
845 u16 lport = x->lport; 845 __be16 lport = x->lport;
846 u32 addr; 846 __u32 addr;
847 847
848 if (paddr->sa.sa_family == AF_INET6) 848 if (paddr->sa.sa_family == AF_INET6)
849 addr = jhash(&paddr->v6.sin6_addr, 16, seed); 849 addr = jhash(&paddr->v6.sin6_addr, 16, seed);
850 else 850 else
851 addr = paddr->v4.sin_addr.s_addr; 851 addr = (__force __u32)paddr->v4.sin_addr.s_addr;
852 852
853 return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | 853 return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
854 (__force __u32)lport, net_hash_mix(net), seed); 854 (__force __u32)lport, net_hash_mix(net), seed);
855} 855}
856 856
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 51c488769590..a6dfa86c0201 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
738/* Was this packet marked by Explicit Congestion Notification? */ 738/* Was this packet marked by Explicit Congestion Notification? */
739static int sctp_v6_is_ce(const struct sk_buff *skb) 739static int sctp_v6_is_ce(const struct sk_buff *skb)
740{ 740{
741 return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); 741 return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
742} 742}
743 743
744/* Dump the v6 addr to the seq file. */ 744/* Dump the v6 addr to the seq file. */
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
882 net = sock_net(&opt->inet.sk); 882 net = sock_net(&opt->inet.sk);
883 rcu_read_lock(); 883 rcu_read_lock();
884 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); 884 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
885 if (!dev || 885 if (!dev || !(opt->inet.freebind ||
886 !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) { 886 net->ipv6.sysctl.ip_nonlocal_bind ||
887 ipv6_chk_addr(net, &addr->v6.sin6_addr,
888 dev, 0))) {
887 rcu_read_unlock(); 889 rcu_read_unlock();
888 return 0; 890 return 0;
889 } 891 }
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 22ed01a76b19..a72a7d925d46 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
463 .r = r, 463 .r = r,
464 .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), 464 .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
465 }; 465 };
466 int pos = cb->args[2];
466 467
467 /* eps hashtable dumps 468 /* eps hashtable dumps
468 * args: 469 * args:
@@ -493,7 +494,8 @@ skip:
493 goto done; 494 goto done;
494 495
495 sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, 496 sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
496 net, (int *)&cb->args[2], &commp); 497 net, &pos, &commp);
498 cb->args[2] = pos;
497 499
498done: 500done:
499 cb->args[1] = cb->args[4]; 501 cb->args[1] = cb->args[4];
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ca8f196b6c6c..514465b03829 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2854 addr_param_len = af->to_addr_param(addr, &addr_param); 2854 addr_param_len = af->to_addr_param(addr, &addr_param);
2855 param.param_hdr.type = flags; 2855 param.param_hdr.type = flags;
2856 param.param_hdr.length = htons(paramlen + addr_param_len); 2856 param.param_hdr.length = htons(paramlen + addr_param_len);
2857 param.crr_id = i; 2857 param.crr_id = htonl(i);
2858 2858
2859 sctp_addto_chunk(retval, paramlen, &param); 2859 sctp_addto_chunk(retval, paramlen, &param);
2860 sctp_addto_chunk(retval, addr_param_len, &addr_param); 2860 sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2867 addr_param_len = af->to_addr_param(addr, &addr_param); 2867 addr_param_len = af->to_addr_param(addr, &addr_param);
2868 param.param_hdr.type = SCTP_PARAM_DEL_IP; 2868 param.param_hdr.type = SCTP_PARAM_DEL_IP;
2869 param.param_hdr.length = htons(paramlen + addr_param_len); 2869 param.param_hdr.length = htons(paramlen + addr_param_len);
2870 param.crr_id = i; 2870 param.crr_id = htonl(i);
2871 2871
2872 sctp_addto_chunk(retval, paramlen, &param); 2872 sctp_addto_chunk(retval, paramlen, &param);
2873 sctp_addto_chunk(retval, addr_param_len, &addr_param); 2873 sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
3591 */ 3591 */
3592struct sctp_chunk *sctp_make_strreset_req( 3592struct sctp_chunk *sctp_make_strreset_req(
3593 const struct sctp_association *asoc, 3593 const struct sctp_association *asoc,
3594 __u16 stream_num, __u16 *stream_list, 3594 __u16 stream_num, __be16 *stream_list,
3595 bool out, bool in) 3595 bool out, bool in)
3596{ 3596{
3597 struct sctp_strreset_outreq outreq; 3597 struct sctp_strreset_outreq outreq;
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
3788{ 3788{
3789 struct sctp_reconf_chunk *hdr; 3789 struct sctp_reconf_chunk *hdr;
3790 union sctp_params param; 3790 union sctp_params param;
3791 __u16 last = 0, cnt = 0; 3791 __be16 last = 0;
3792 __u16 cnt = 0;
3792 3793
3793 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; 3794 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
3794 sctp_walk_params(param, hdr, params) { 3795 sctp_walk_params(param, hdr, params) {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e6a2974e020e..e2d9a4b49c9c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
1607 break; 1607 break;
1608 1608
1609 case SCTP_CMD_INIT_FAILED: 1609 case SCTP_CMD_INIT_FAILED:
1610 sctp_cmd_init_failed(commands, asoc, cmd->obj.err); 1610 sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
1611 break; 1611 break;
1612 1612
1613 case SCTP_CMD_ASSOC_FAILED: 1613 case SCTP_CMD_ASSOC_FAILED:
1614 sctp_cmd_assoc_failed(commands, asoc, event_type, 1614 sctp_cmd_assoc_failed(commands, asoc, event_type,
1615 subtype, chunk, cmd->obj.err); 1615 subtype, chunk, cmd->obj.u32);
1616 break; 1616 break;
1617 1617
1618 case SCTP_CMD_INIT_COUNTER_INC: 1618 case SCTP_CMD_INIT_COUNTER_INC:
@@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
1680 case SCTP_CMD_PROCESS_CTSN: 1680 case SCTP_CMD_PROCESS_CTSN:
1681 /* Dummy up a SACK for processing. */ 1681 /* Dummy up a SACK for processing. */
1682 sackh.cum_tsn_ack = cmd->obj.be32; 1682 sackh.cum_tsn_ack = cmd->obj.be32;
1683 sackh.a_rwnd = asoc->peer.rwnd + 1683 sackh.a_rwnd = htonl(asoc->peer.rwnd +
1684 asoc->outqueue.outstanding_bytes; 1684 asoc->outqueue.outstanding_bytes);
1685 sackh.num_gap_ack_blocks = 0; 1685 sackh.num_gap_ack_blocks = 0;
1686 sackh.num_dup_tsns = 0; 1686 sackh.num_dup_tsns = 0;
1687 chunk->subh.sack_hdr = &sackh; 1687 chunk->subh.sack_hdr = &sackh;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d4730ada7f32..6f45d1713452 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
170 sk_mem_charge(sk, chunk->skb->truesize); 170 sk_mem_charge(sk, chunk->skb->truesize);
171} 171}
172 172
173static void sctp_clear_owner_w(struct sctp_chunk *chunk)
174{
175 skb_orphan(chunk->skb);
176}
177
178static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
179 void (*cb)(struct sctp_chunk *))
180
181{
182 struct sctp_outq *q = &asoc->outqueue;
183 struct sctp_transport *t;
184 struct sctp_chunk *chunk;
185
186 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
187 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
188 cb(chunk);
189
190 list_for_each_entry(chunk, &q->retransmit, list)
191 cb(chunk);
192
193 list_for_each_entry(chunk, &q->sacked, list)
194 cb(chunk);
195
196 list_for_each_entry(chunk, &q->abandoned, list)
197 cb(chunk);
198
199 list_for_each_entry(chunk, &q->out_chunk_list, list)
200 cb(chunk);
201}
202
173/* Verify that this is a valid address. */ 203/* Verify that this is a valid address. */
174static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 204static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
175 int len) 205 int len)
@@ -4906,6 +4936,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4906 struct socket *sock; 4936 struct socket *sock;
4907 int err = 0; 4937 int err = 0;
4908 4938
4939 /* Do not peel off from one netns to another one. */
4940 if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
4941 return -EINVAL;
4942
4909 if (!asoc) 4943 if (!asoc)
4910 return -EINVAL; 4944 return -EINVAL;
4911 4945
@@ -8208,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
8208 * paths won't try to lock it and then oldsk. 8242 * paths won't try to lock it and then oldsk.
8209 */ 8243 */
8210 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 8244 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
8245 sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
8211 sctp_assoc_migrate(assoc, newsk); 8246 sctp_assoc_migrate(assoc, newsk);
8247 sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
8212 8248
8213 /* If the association on the newsk is already closed before accept() 8249 /* If the association on the newsk is already closed before accept()
8214 * is called, set RCV_SHUTDOWN flag. 8250 * is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 63ea15503714..fa8371ff05c4 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
118 __u16 i, str_nums, *str_list; 118 __u16 i, str_nums, *str_list;
119 struct sctp_chunk *chunk; 119 struct sctp_chunk *chunk;
120 int retval = -EINVAL; 120 int retval = -EINVAL;
121 __be16 *nstr_list;
121 bool out, in; 122 bool out, in;
122 123
123 if (!asoc->peer.reconf_capable || 124 if (!asoc->peer.reconf_capable ||
@@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
148 if (str_list[i] >= stream->incnt) 149 if (str_list[i] >= stream->incnt)
149 goto out; 150 goto out;
150 151
152 nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
153 if (!nstr_list) {
154 retval = -ENOMEM;
155 goto out;
156 }
157
151 for (i = 0; i < str_nums; i++) 158 for (i = 0; i < str_nums; i++)
152 str_list[i] = htons(str_list[i]); 159 nstr_list[i] = htons(str_list[i]);
153 160
154 chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); 161 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
155 162
156 for (i = 0; i < str_nums; i++) 163 kfree(nstr_list);
157 str_list[i] = ntohs(str_list[i]);
158 164
159 if (!chunk) { 165 if (!chunk) {
160 retval = -ENOMEM; 166 retval = -ENOMEM;
@@ -305,7 +311,7 @@ out:
305} 311}
306 312
307static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param( 313static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
308 struct sctp_association *asoc, __u32 resp_seq, 314 struct sctp_association *asoc, __be32 resp_seq,
309 __be16 type) 315 __be16 type)
310{ 316{
311 struct sctp_chunk *chunk = asoc->strreset_chunk; 317 struct sctp_chunk *chunk = asoc->strreset_chunk;
@@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
345{ 351{
346 struct sctp_strreset_outreq *outreq = param.v; 352 struct sctp_strreset_outreq *outreq = param.v;
347 struct sctp_stream *stream = &asoc->stream; 353 struct sctp_stream *stream = &asoc->stream;
348 __u16 i, nums, flags = 0, *str_p = NULL;
349 __u32 result = SCTP_STRRESET_DENIED; 354 __u32 result = SCTP_STRRESET_DENIED;
355 __u16 i, nums, flags = 0;
356 __be16 *str_p = NULL;
350 __u32 request_seq; 357 __u32 request_seq;
351 358
352 request_seq = ntohl(outreq->request_seq); 359 request_seq = ntohl(outreq->request_seq);
@@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
439 struct sctp_stream *stream = &asoc->stream; 446 struct sctp_stream *stream = &asoc->stream;
440 __u32 result = SCTP_STRRESET_DENIED; 447 __u32 result = SCTP_STRRESET_DENIED;
441 struct sctp_chunk *chunk = NULL; 448 struct sctp_chunk *chunk = NULL;
442 __u16 i, nums, *str_p;
443 __u32 request_seq; 449 __u32 request_seq;
450 __u16 i, nums;
451 __be16 *str_p;
444 452
445 request_seq = ntohl(inreq->request_seq); 453 request_seq = ntohl(inreq->request_seq);
446 if (TSN_lt(asoc->strreset_inseq, request_seq) || 454 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
769 777
770 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { 778 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
771 struct sctp_strreset_outreq *outreq; 779 struct sctp_strreset_outreq *outreq;
772 __u16 *str_p; 780 __be16 *str_p;
773 781
774 outreq = (struct sctp_strreset_outreq *)req; 782 outreq = (struct sctp_strreset_outreq *)req;
775 str_p = outreq->list_of_streams; 783 str_p = outreq->list_of_streams;
@@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
794 nums, str_p, GFP_ATOMIC); 802 nums, str_p, GFP_ATOMIC);
795 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { 803 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
796 struct sctp_strreset_inreq *inreq; 804 struct sctp_strreset_inreq *inreq;
797 __u16 *str_p; 805 __be16 *str_p;
798 806
799 /* if the result is performed, it's impossible for inreq */ 807 /* if the result is performed, it's impossible for inreq */
800 if (result == SCTP_STRRESET_PERFORMED) 808 if (result == SCTP_STRRESET_PERFORMED)
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 67abc0194f30..5447228bf1a0 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
847 847
848struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( 848struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
849 const struct sctp_association *asoc, __u16 flags, __u16 stream_num, 849 const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
850 __u16 *stream_list, gfp_t gfp) 850 __be16 *stream_list, gfp_t gfp)
851{ 851{
852 struct sctp_stream_reset_event *sreset; 852 struct sctp_stream_reset_event *sreset;
853 struct sctp_ulpevent *event; 853 struct sctp_ulpevent *event;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d4ea46a5f233..c5fda15ba319 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
49{ 49{
50 /* Unrecoverable error in receive */ 50 /* Unrecoverable error in receive */
51 51
52 del_timer(&strp->msg_timer); 52 cancel_delayed_work(&strp->msg_timer_work);
53 53
54 if (strp->stopped) 54 if (strp->stopped)
55 return; 55 return;
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
68static void strp_start_timer(struct strparser *strp, long timeo) 68static void strp_start_timer(struct strparser *strp, long timeo)
69{ 69{
70 if (timeo) 70 if (timeo)
71 mod_timer(&strp->msg_timer, timeo); 71 mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
72} 72}
73 73
74/* Lower lock held */ 74/* Lower lock held */
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
319 eaten += (cand_len - extra); 319 eaten += (cand_len - extra);
320 320
321 /* Hurray, we have a new message! */ 321 /* Hurray, we have a new message! */
322 del_timer(&strp->msg_timer); 322 cancel_delayed_work(&strp->msg_timer_work);
323 strp->skb_head = NULL; 323 strp->skb_head = NULL;
324 STRP_STATS_INCR(strp->stats.msgs); 324 STRP_STATS_INCR(strp->stats.msgs);
325 325
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
450 do_strp_work(container_of(w, struct strparser, work)); 450 do_strp_work(container_of(w, struct strparser, work));
451} 451}
452 452
453static void strp_msg_timeout(unsigned long arg) 453static void strp_msg_timeout(struct work_struct *w)
454{ 454{
455 struct strparser *strp = (struct strparser *)arg; 455 struct strparser *strp = container_of(w, struct strparser,
456 msg_timer_work.work);
456 457
457 /* Message assembly timed out */ 458 /* Message assembly timed out */
458 STRP_STATS_INCR(strp->stats.msg_timeouts); 459 STRP_STATS_INCR(strp->stats.msg_timeouts);
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
505 strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; 506 strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
506 strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; 507 strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
507 508
508 setup_timer(&strp->msg_timer, strp_msg_timeout, 509 INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
509 (unsigned long)strp);
510
511 INIT_WORK(&strp->work, strp_work); 510 INIT_WORK(&strp->work, strp_work);
512 511
513 return 0; 512 return 0;
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
532{ 531{
533 WARN_ON(!strp->stopped); 532 WARN_ON(!strp->stopped);
534 533
535 del_timer_sync(&strp->msg_timer); 534 cancel_delayed_work_sync(&strp->msg_timer_work);
536 cancel_work_sync(&strp->work); 535 cancel_work_sync(&strp->work);
537 536
538 if (strp->skb_head) { 537 if (strp->skb_head) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e741ec2b4d8e..898485e3ece4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task)
1333 rpc_count_iostats(task, task->tk_client->cl_metrics); 1333 rpc_count_iostats(task, task->tk_client->cl_metrics);
1334 spin_lock(&xprt->recv_lock); 1334 spin_lock(&xprt->recv_lock);
1335 if (!list_empty(&req->rq_list)) { 1335 if (!list_empty(&req->rq_list)) {
1336 list_del(&req->rq_list); 1336 list_del_init(&req->rq_list);
1337 xprt_wait_on_pinned_rqst(req); 1337 xprt_wait_on_pinned_rqst(req);
1338 } 1338 }
1339 spin_unlock(&xprt->recv_lock); 1339 spin_unlock(&xprt->recv_lock);
@@ -1445,6 +1445,23 @@ out:
1445 return xprt; 1445 return xprt;
1446} 1446}
1447 1447
1448static void xprt_destroy_cb(struct work_struct *work)
1449{
1450 struct rpc_xprt *xprt =
1451 container_of(work, struct rpc_xprt, task_cleanup);
1452
1453 rpc_xprt_debugfs_unregister(xprt);
1454 rpc_destroy_wait_queue(&xprt->binding);
1455 rpc_destroy_wait_queue(&xprt->pending);
1456 rpc_destroy_wait_queue(&xprt->sending);
1457 rpc_destroy_wait_queue(&xprt->backlog);
1458 kfree(xprt->servername);
1459 /*
1460 * Tear down transport state and free the rpc_xprt
1461 */
1462 xprt->ops->destroy(xprt);
1463}
1464
1448/** 1465/**
1449 * xprt_destroy - destroy an RPC transport, killing off all requests. 1466 * xprt_destroy - destroy an RPC transport, killing off all requests.
1450 * @xprt: transport to destroy 1467 * @xprt: transport to destroy
@@ -1454,22 +1471,19 @@ static void xprt_destroy(struct rpc_xprt *xprt)
1454{ 1471{
1455 dprintk("RPC: destroying transport %p\n", xprt); 1472 dprintk("RPC: destroying transport %p\n", xprt);
1456 1473
1457 /* Exclude transport connect/disconnect handlers */ 1474 /*
1475 * Exclude transport connect/disconnect handlers and autoclose
1476 */
1458 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); 1477 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1459 1478
1460 del_timer_sync(&xprt->timer); 1479 del_timer_sync(&xprt->timer);
1461 1480
1462 rpc_xprt_debugfs_unregister(xprt);
1463 rpc_destroy_wait_queue(&xprt->binding);
1464 rpc_destroy_wait_queue(&xprt->pending);
1465 rpc_destroy_wait_queue(&xprt->sending);
1466 rpc_destroy_wait_queue(&xprt->backlog);
1467 cancel_work_sync(&xprt->task_cleanup);
1468 kfree(xprt->servername);
1469 /* 1481 /*
1470 * Tear down transport state and free the rpc_xprt 1482 * Destroy sockets etc from the system workqueue so they can
1483 * safely flush receive work running on rpciod.
1471 */ 1484 */
1472 xprt->ops->destroy(xprt); 1485 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1486 schedule_work(&xprt->task_cleanup);
1473} 1487}
1474 1488
1475static void xprt_destroy_kref(struct kref *kref) 1489static void xprt_destroy_kref(struct kref *kref)
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 5a936a6a31a3..df062e086bdb 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -401,7 +401,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
401 if (unlikely(n != mw->mw_nents)) 401 if (unlikely(n != mw->mw_nents))
402 goto out_mapmr_err; 402 goto out_mapmr_err;
403 403
404 dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n", 404 dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n",
405 __func__, frmr, mw->mw_nents, mr->length); 405 __func__, frmr, mw->mw_nents, mr->length);
406 406
407 key = (u8)(mr->rkey & 0x000000FF); 407 key = (u8)(mr->rkey & 0x000000FF);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9b5de31aa429..c1841f234a71 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
2203 struct sock_xprt *transport = 2203 struct sock_xprt *transport =
2204 container_of(work, struct sock_xprt, connect_worker.work); 2204 container_of(work, struct sock_xprt, connect_worker.work);
2205 struct rpc_xprt *xprt = &transport->xprt; 2205 struct rpc_xprt *xprt = &transport->xprt;
2206 struct socket *sock = transport->sock; 2206 struct socket *sock;
2207 int status = -EIO; 2207 int status = -EIO;
2208 2208
2209 sock = xs_create_sock(xprt, transport, 2209 sock = xs_create_sock(xprt, transport,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7d99029df342..a140dd4a84af 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
233 struct sk_buff_head xmitq; 233 struct sk_buff_head xmitq;
234 int rc = 0; 234 int rc = 0;
235 235
236 __skb_queue_head_init(&xmitq); 236 skb_queue_head_init(&xmitq);
237 tipc_bcast_lock(net); 237 tipc_bcast_lock(net);
238 if (tipc_link_bc_peers(l)) 238 if (tipc_link_bc_peers(l))
239 rc = tipc_link_xmit(l, pkts, &xmitq); 239 rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
263 u32 dst, selector; 263 u32 dst, selector;
264 264
265 selector = msg_link_selector(buf_msg(skb_peek(pkts))); 265 selector = msg_link_selector(buf_msg(skb_peek(pkts)));
266 __skb_queue_head_init(&_pkts); 266 skb_queue_head_init(&_pkts);
267 267
268 list_for_each_entry_safe(n, tmp, &dests->list, list) { 268 list_for_each_entry_safe(n, tmp, &dests->list, list) {
269 dst = n->value; 269 dst = n->value;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 6ef379f004ac..17146c16ee2d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
551 return false; 551 return false;
552 if (msg_errcode(msg)) 552 if (msg_errcode(msg))
553 return false; 553 return false;
554 *err = -TIPC_ERR_NO_NAME; 554 *err = TIPC_ERR_NO_NAME;
555 if (skb_linearize(skb)) 555 if (skb_linearize(skb))
556 return false; 556 return false;
557 msg = buf_msg(skb); 557 msg = buf_msg(skb);
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
568 msg_set_destnode(msg, dnode); 568 msg_set_destnode(msg, dnode);
569 msg_set_destport(msg, dport); 569 msg_set_destport(msg, dport);
570 *err = TIPC_OK; 570 *err = TIPC_OK;
571
572 if (!skb_cloned(skb))
573 return true;
574
575 /* Unclone buffer in case it was bundled */
576 if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
577 return false;
578
571 return true; 579 return true;
572} 580}
573 581
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
257 err = -ENOENT; 257 err = -ENOENT;
258 if (sk == NULL) 258 if (sk == NULL)
259 goto out_nosk; 259 goto out_nosk;
260 if (!net_eq(sock_net(sk), net))
261 goto out;
260 262
261 err = sock_diag_check_cookie(sk, req->udiag_cookie); 263 err = sock_diag_check_cookie(sk, req->udiag_cookie);
262 if (err) 264 if (err)
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 14ed5a344cdf..e21991fe883a 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
310 struct sock *sk = get_per_channel_state(chan); 310 struct sock *sk = get_per_channel_state(chan);
311 struct vsock_sock *vsk = vsock_sk(sk); 311 struct vsock_sock *vsk = vsock_sk(sk);
312 312
313 lock_sock(sk);
314
313 sk->sk_state = SS_UNCONNECTED; 315 sk->sk_state = SS_UNCONNECTED;
314 sock_set_flag(sk, SOCK_DONE); 316 sock_set_flag(sk, SOCK_DONE);
315 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; 317 vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
316 318
317 sk->sk_state_change(sk); 319 sk->sk_state_change(sk);
320
321 release_sock(sk);
318} 322}
319 323
320static void hvs_open_connection(struct vmbus_channel *chan) 324static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
344 if (!sk) 348 if (!sk)
345 return; 349 return;
346 350
351 lock_sock(sk);
352
347 if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) || 353 if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
348 (!conn_from_host && sk->sk_state != SS_CONNECTING)) 354 (!conn_from_host && sk->sk_state != SS_CONNECTING))
349 goto out; 355 goto out;
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
395 401
396 vsock_insert_connected(vnew); 402 vsock_insert_connected(vnew);
397 403
398 lock_sock(sk);
399 vsock_enqueue_accept(sk, new); 404 vsock_enqueue_accept(sk, new);
400 release_sock(sk);
401 } else { 405 } else {
402 sk->sk_state = SS_CONNECTED; 406 sk->sk_state = SS_CONNECTED;
403 sk->sk_socket->state = SS_CONNECTED; 407 sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
410out: 414out:
411 /* Release refcnt obtained when we called vsock_find_bound_socket() */ 415 /* Release refcnt obtained when we called vsock_find_bound_socket() */
412 sock_put(sk); 416 sock_put(sk);
417
418 release_sock(sk);
413} 419}
414 420
415static u32 hvs_get_local_cid(void) 421static u32 hvs_get_local_cid(void)
@@ -476,13 +482,21 @@ out:
476 482
477static void hvs_release(struct vsock_sock *vsk) 483static void hvs_release(struct vsock_sock *vsk)
478{ 484{
485 struct sock *sk = sk_vsock(vsk);
479 struct hvsock *hvs = vsk->trans; 486 struct hvsock *hvs = vsk->trans;
480 struct vmbus_channel *chan = hvs->chan; 487 struct vmbus_channel *chan;
481 488
489 lock_sock(sk);
490
491 sk->sk_state = SS_DISCONNECTING;
492 vsock_remove_sock(vsk);
493
494 release_sock(sk);
495
496 chan = hvs->chan;
482 if (chan) 497 if (chan)
483 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); 498 hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
484 499
485 vsock_remove_sock(vsk);
486} 500}
487 501
488static void hvs_destruct(struct vsock_sock *vsk) 502static void hvs_destruct(struct vsock_sock *vsk)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 690874293cfc..d396cb61a280 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = {
549 [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, 549 [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED },
550}; 550};
551 551
552/* policy for packet pattern attributes */
553static const struct nla_policy
554nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
555 [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, },
556 [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, },
557 [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
558};
559
552static int nl80211_prepare_wdev_dump(struct sk_buff *skb, 560static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
553 struct netlink_callback *cb, 561 struct netlink_callback *cb,
554 struct cfg80211_registered_device **rdev, 562 struct cfg80211_registered_device **rdev,
@@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
10532 u8 *mask_pat; 10540 u8 *mask_pat;
10533 10541
10534 nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, 10542 nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
10535 NULL, info->extack); 10543 nl80211_packet_pattern_policy,
10544 info->extack);
10536 err = -EINVAL; 10545 err = -EINVAL;
10537 if (!pat_tb[NL80211_PKTPAT_MASK] || 10546 if (!pat_tb[NL80211_PKTPAT_MASK] ||
10538 !pat_tb[NL80211_PKTPAT_PATTERN]) 10547 !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
10781 rem) { 10790 rem) {
10782 u8 *mask_pat; 10791 u8 *mask_pat;
10783 10792
10784 nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL); 10793 nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
10794 nl80211_packet_pattern_policy, NULL);
10785 if (!pat_tb[NL80211_PKTPAT_MASK] || 10795 if (!pat_tb[NL80211_PKTPAT_MASK] ||
10786 !pat_tb[NL80211_PKTPAT_PATTERN]) 10796 !pat_tb[NL80211_PKTPAT_PATTERN])
10787 return -EINVAL; 10797 return -EINVAL;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0a49b88070d0..b6533ecbf5b1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
522 return -EOPNOTSUPP; 522 return -EOPNOTSUPP;
523 523
524 if (wdev->current_bss) { 524 if (wdev->current_bss) {
525 if (!prev_bssid)
526 return -EALREADY;
527 if (prev_bssid &&
528 !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
529 return -ENOTCONN;
530 cfg80211_unhold_bss(wdev->current_bss); 525 cfg80211_unhold_bss(wdev->current_bss);
531 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); 526 cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
532 wdev->current_bss = NULL; 527 wdev->current_bss = NULL;
@@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1063 1058
1064 ASSERT_WDEV_LOCK(wdev); 1059 ASSERT_WDEV_LOCK(wdev);
1065 1060
1066 if (WARN_ON(wdev->connect_keys)) { 1061 /*
1067 kzfree(wdev->connect_keys); 1062 * If we have an ssid_len, we're trying to connect or are
1068 wdev->connect_keys = NULL; 1063 * already connected, so reject a new SSID unless it's the
1064 * same (which is the case for re-association.)
1065 */
1066 if (wdev->ssid_len &&
1067 (wdev->ssid_len != connect->ssid_len ||
1068 memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
1069 return -EALREADY;
1070
1071 /*
1072 * If connected, reject (re-)association unless prev_bssid
1073 * matches the current BSSID.
1074 */
1075 if (wdev->current_bss) {
1076 if (!prev_bssid)
1077 return -EALREADY;
1078 if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
1079 return -ENOTCONN;
1069 } 1080 }
1070 1081
1082 /*
1083 * Reject if we're in the process of connecting with WEP,
1084 * this case isn't very interesting and trying to handle
1085 * it would make the code much more complex.
1086 */
1087 if (wdev->connect_keys)
1088 return -EINPROGRESS;
1089
1071 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, 1090 cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
1072 rdev->wiphy.ht_capa_mod_mask); 1091 rdev->wiphy.ht_capa_mod_mask);
1073 1092
@@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
1118 1137
1119 if (err) { 1138 if (err) {
1120 wdev->connect_keys = NULL; 1139 wdev->connect_keys = NULL;
1121 wdev->ssid_len = 0; 1140 /*
1141 * This could be reassoc getting refused, don't clear
1142 * ssid_len in that case.
1143 */
1144 if (!wdev->current_bss)
1145 wdev->ssid_len = 0;
1122 return err; 1146 return err;
1123 } 1147 }
1124 1148
@@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
1145 else if (wdev->ssid_len) 1169 else if (wdev->ssid_len)
1146 err = rdev_disconnect(rdev, dev, reason); 1170 err = rdev_disconnect(rdev, dev, reason);
1147 1171
1172 /*
1173 * Clear ssid_len unless we actually were fully connected,
1174 * in which case cfg80211_disconnected() will take care of
1175 * this later.
1176 */
1177 if (!wdev->current_bss)
1178 wdev->ssid_len = 0;
1179
1148 return err; 1180 return err;
1149} 1181}
1150 1182
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index acf00104ef31..30e5746085b8 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
91 } 91 }
92 92
93 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 93 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
94 xso->dev = NULL;
94 dev_put(dev); 95 dev_put(dev);
95 return 0; 96 return 0;
96 } 97 }
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 2515cd2bc5db..8ac9d32fb79d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -429,7 +429,8 @@ resume:
429 nf_reset(skb); 429 nf_reset(skb);
430 430
431 if (decaps) { 431 if (decaps) {
432 skb->sp->olen = 0; 432 if (skb->sp)
433 skb->sp->olen = 0;
433 skb_dst_drop(skb); 434 skb_dst_drop(skb);
434 gro_cells_receive(&gro_cells, skb); 435 gro_cells_receive(&gro_cells, skb);
435 return 0; 436 return 0;
@@ -440,7 +441,8 @@ resume:
440 441
441 err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); 442 err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
442 if (xfrm_gro) { 443 if (xfrm_gro) {
443 skb->sp->olen = 0; 444 if (skb->sp)
445 skb->sp->olen = 0;
444 skb_dst_drop(skb); 446 skb_dst_drop(skb);
445 gro_cells_receive(&gro_cells, skb); 447 gro_cells_receive(&gro_cells, skb);
446 return err; 448 return err;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 31a2e6d34dba..73ad8c8ef344 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -105,6 +105,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
105 if (xfrm_offload(skb)) { 105 if (xfrm_offload(skb)) {
106 x->type_offload->encap(x, skb); 106 x->type_offload->encap(x, skb);
107 } else { 107 } else {
108 /* Inner headers are invalid now. */
109 skb->encapsulation = 0;
110
108 err = x->type->output(x, skb); 111 err = x->type->output(x, skb);
109 if (err == -EINPROGRESS) 112 if (err == -EINPROGRESS)
110 goto out; 113 goto out;
@@ -208,7 +211,6 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
208 int err; 211 int err;
209 212
210 secpath_reset(skb); 213 secpath_reset(skb);
211 skb->encapsulation = 0;
212 214
213 if (xfrm_dev_offload_ok(skb, x)) { 215 if (xfrm_dev_offload_ok(skb, x)) {
214 struct sec_path *sp; 216 struct sec_path *sp;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f06253969972..8cafb3c0a4ac 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1573 goto put_states; 1573 goto put_states;
1574 } 1574 }
1575 1575
1576 if (!dst_prev)
1577 dst0 = dst1;
1578 else
1579 /* Ref count is taken during xfrm_alloc_dst()
1580 * No need to do dst_clone() on dst1
1581 */
1582 dst_prev->child = dst1;
1583
1576 if (xfrm[i]->sel.family == AF_UNSPEC) { 1584 if (xfrm[i]->sel.family == AF_UNSPEC) {
1577 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1585 inner_mode = xfrm_ip2inner_mode(xfrm[i],
1578 xfrm_af2proto(family)); 1586 xfrm_af2proto(family));
@@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1584 } else 1592 } else
1585 inner_mode = xfrm[i]->inner_mode; 1593 inner_mode = xfrm[i]->inner_mode;
1586 1594
1587 if (!dst_prev)
1588 dst0 = dst1;
1589 else
1590 /* Ref count is taken during xfrm_alloc_dst()
1591 * No need to do dst_clone() on dst1
1592 */
1593 dst_prev->child = dst1;
1594
1595 xdst->route = dst; 1595 xdst->route = dst;
1596 dst_copy_metrics(dst1, dst); 1596 dst_copy_metrics(dst1, dst);
1597 1597
@@ -2076,7 +2076,6 @@ make_dummy_bundle:
2076 xdst->num_xfrms = num_xfrms; 2076 xdst->num_xfrms = num_xfrms;
2077 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2077 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2078 2078
2079 dst_hold(&xdst->u.dst);
2080 return xdst; 2079 return xdst;
2081 2080
2082inc_error: 2081inc_error:
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0dab1cd79ce4..1f5cee2269af 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -732,12 +732,12 @@ restart:
732 } 732 }
733 } 733 }
734 } 734 }
735out:
736 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
735 if (cnt) { 737 if (cnt) {
736 err = 0; 738 err = 0;
737 xfrm_policy_cache_flush(); 739 xfrm_policy_cache_flush();
738 } 740 }
739out:
740 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
741 return err; 741 return err;
742} 742}
743EXPORT_SYMBOL(xfrm_state_flush); 743EXPORT_SYMBOL(xfrm_state_flush);
@@ -2069,6 +2069,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
2069 if (err >= 0) { 2069 if (err >= 0) {
2070 xfrm_sk_policy_insert(sk, err, pol); 2070 xfrm_sk_policy_insert(sk, err, pol);
2071 xfrm_pol_put(pol); 2071 xfrm_pol_put(pol);
2072 __sk_dst_reset(sk);
2072 err = 0; 2073 err = 0;
2073 } 2074 }
2074 2075
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 2bfbd9121e3b..e44a0fed48dd 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
657 657
658 if (err < 0) { 658 if (err < 0) {
659 x->km.state = XFRM_STATE_DEAD; 659 x->km.state = XFRM_STATE_DEAD;
660 xfrm_dev_state_delete(x);
660 __xfrm_state_put(x); 661 __xfrm_state_put(x);
661 goto out; 662 goto out;
662 } 663 }
@@ -1692,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
1692 1693
1693static int xfrm_dump_policy_done(struct netlink_callback *cb) 1694static int xfrm_dump_policy_done(struct netlink_callback *cb)
1694{ 1695{
1695 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1696 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1696 struct net *net = sock_net(cb->skb->sk); 1697 struct net *net = sock_net(cb->skb->sk);
1697 1698
1698 xfrm_policy_walk_done(walk, net); 1699 xfrm_policy_walk_done(walk, net);
1699 return 0; 1700 return 0;
1700} 1701}
1701 1702
1703static int xfrm_dump_policy_start(struct netlink_callback *cb)
1704{
1705 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1706
1707 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
1708
1709 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1710 return 0;
1711}
1712
1702static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1713static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1703{ 1714{
1704 struct net *net = sock_net(skb->sk); 1715 struct net *net = sock_net(skb->sk);
1705 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1716 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1706 struct xfrm_dump_info info; 1717 struct xfrm_dump_info info;
1707 1718
1708 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
1709 sizeof(cb->args) - sizeof(cb->args[0]));
1710
1711 info.in_skb = cb->skb; 1719 info.in_skb = cb->skb;
1712 info.out_skb = skb; 1720 info.out_skb = skb;
1713 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1721 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1714 info.nlmsg_flags = NLM_F_MULTI; 1722 info.nlmsg_flags = NLM_F_MULTI;
1715 1723
1716 if (!cb->args[0]) {
1717 cb->args[0] = 1;
1718 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1719 }
1720
1721 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1724 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1722 1725
1723 return skb->len; 1726 return skb->len;
@@ -2473,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
2473 2476
2474static const struct xfrm_link { 2477static const struct xfrm_link {
2475 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2478 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2479 int (*start)(struct netlink_callback *);
2476 int (*dump)(struct sk_buff *, struct netlink_callback *); 2480 int (*dump)(struct sk_buff *, struct netlink_callback *);
2477 int (*done)(struct netlink_callback *); 2481 int (*done)(struct netlink_callback *);
2478 const struct nla_policy *nla_pol; 2482 const struct nla_policy *nla_pol;
@@ -2486,6 +2490,7 @@ static const struct xfrm_link {
2486 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2490 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
2487 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2491 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
2488 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2492 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2493 .start = xfrm_dump_policy_start,
2489 .dump = xfrm_dump_policy, 2494 .dump = xfrm_dump_policy,
2490 .done = xfrm_dump_policy_done }, 2495 .done = xfrm_dump_policy_done },
2491 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2496 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2538,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
2538 2543
2539 { 2544 {
2540 struct netlink_dump_control c = { 2545 struct netlink_dump_control c = {
2546 .start = link->start,
2541 .dump = link->dump, 2547 .dump = link->dump,
2542 .done = link->done, 2548 .done = link->done,
2543 }; 2549 };
diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c
index f9b38ef82dc2..52b0053274f4 100644
--- a/samples/sockmap/sockmap_kern.c
+++ b/samples/sockmap/sockmap_kern.c
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
62 ret = 1; 62 ret = 1;
63 63
64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); 64 bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
65 return bpf_sk_redirect_map(&sock_map, ret, 0); 65 return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
66} 66}
67 67
68SEC("sockops") 68SEC("sockops")
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index bc7fcf010a5b..5522692100ba 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
78} 78}
79 79
80static DEFINE_MUTEX(thread_mutex); 80static DEFINE_MUTEX(thread_mutex);
81static int simple_thread_cnt;
81 82
82int foo_bar_reg(void) 83int foo_bar_reg(void)
83{ 84{
85 mutex_lock(&thread_mutex);
86 if (simple_thread_cnt++)
87 goto out;
88
84 pr_info("Starting thread for foo_bar_fn\n"); 89 pr_info("Starting thread for foo_bar_fn\n");
85 /* 90 /*
86 * We shouldn't be able to start a trace when the module is 91 * We shouldn't be able to start a trace when the module is
87 * unloading (there's other locks to prevent that). But 92 * unloading (there's other locks to prevent that). But
88 * for consistency sake, we still take the thread_mutex. 93 * for consistency sake, we still take the thread_mutex.
89 */ 94 */
90 mutex_lock(&thread_mutex);
91 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); 95 simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
96 out:
92 mutex_unlock(&thread_mutex); 97 mutex_unlock(&thread_mutex);
93 return 0; 98 return 0;
94} 99}
95 100
96void foo_bar_unreg(void) 101void foo_bar_unreg(void)
97{ 102{
98 pr_info("Killing thread for foo_bar_fn\n");
99 /* protect against module unloading */
100 mutex_lock(&thread_mutex); 103 mutex_lock(&thread_mutex);
104 if (--simple_thread_cnt)
105 goto out;
106
107 pr_info("Killing thread for foo_bar_fn\n");
101 if (simple_tsk_fn) 108 if (simple_tsk_fn)
102 kthread_stop(simple_tsk_fn); 109 kthread_stop(simple_tsk_fn);
103 simple_tsk_fn = NULL; 110 simple_tsk_fn = NULL;
111 out:
104 mutex_unlock(&thread_mutex); 112 mutex_unlock(&thread_mutex);
105} 113}
106 114
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 2e3a10e79ca9..061d0c3a420a 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -265,6 +265,8 @@ objtool_args += --no-fp
265endif 265endif
266ifdef CONFIG_GCOV_KERNEL 266ifdef CONFIG_GCOV_KERNEL
267objtool_args += --no-unreachable 267objtool_args += --no-unreachable
268else
269objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
268endif 270endif
269 271
270# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory 272# 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 16923ba4b5b1..756d14f0d763 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -97,7 +97,6 @@ vmlinux.o: FORCE
97 $(call cmd,kernel-mod) 97 $(call cmd,kernel-mod)
98 98
99# Declare generated files as targets for modpost 99# Declare generated files as targets for modpost
100$(symverfile): __modpost ;
101$(modules:.ko=.mod.c): __modpost ; 100$(modules:.ko=.mod.c): __modpost ;
102 101
103 102
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index dd2c262aebbf..8b80bac055e4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6390,7 +6390,7 @@ sub process {
6390 exit(0); 6390 exit(0);
6391 } 6391 }
6392 6392
6393 if (!$is_patch && $file !~ /cover-letter\.patch$/) { 6393 if (!$is_patch && $filename !~ /cover-letter\.patch$/) {
6394 ERROR("NOT_UNIFIED_DIFF", 6394 ERROR("NOT_UNIFIED_DIFF",
6395 "Does not appear to be a unified-diff format patch\n"); 6395 "Does not appear to be a unified-diff format patch\n");
6396 } 6396 }
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 29df825d375c..2f6ce802397d 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -103,11 +103,12 @@ __faddr2line() {
103 103
104 # Go through each of the object's symbols which match the func name. 104 # Go through each of the object's symbols which match the func name.
105 # In rare cases there might be duplicates. 105 # In rare cases there might be duplicates.
106 file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
106 while read symbol; do 107 while read symbol; do
107 local fields=($symbol) 108 local fields=($symbol)
108 local sym_base=0x${fields[0]} 109 local sym_base=0x${fields[0]}
109 local sym_type=${fields[1]} 110 local sym_type=${fields[1]}
110 local sym_end=0x${fields[3]} 111 local sym_end=${fields[3]}
111 112
112 # calculate the size 113 # calculate the size
113 local sym_size=$(($sym_end - $sym_base)) 114 local sym_size=$(($sym_end - $sym_base))
@@ -157,7 +158,7 @@ __faddr2line() {
157 addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" 158 addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
158 DONE=1 159 DONE=1
159 160
160 done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }') 161 done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
161} 162}
162 163
163[[ $# -lt 2 ]] && usage 164[[ $# -lt 2 ]] && usage
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 5d554419170b..9ee9bf7fd1a2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
158 else if (str[0] == '$') 158 else if (str[0] == '$')
159 return -1; 159 return -1;
160 /* exclude debugging symbols */ 160 /* exclude debugging symbols */
161 else if (stype == 'N') 161 else if (stype == 'N' || stype == 'n')
162 return -1; 162 return -1;
163 163
164 /* include the type field in the symbol name, so that it gets 164 /* include the type field in the symbol name, so that it gets
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index 400ef35169c5..aa0cc49ad1ad 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -53,6 +53,7 @@ acumulator||accumulator
53adapater||adapter 53adapater||adapter
54addional||additional 54addional||additional
55additionaly||additionally 55additionaly||additionally
56additonal||additional
56addres||address 57addres||address
57adddress||address 58adddress||address
58addreses||addresses 59addreses||addresses
@@ -67,6 +68,8 @@ adviced||advised
67afecting||affecting 68afecting||affecting
68againt||against 69againt||against
69agaist||against 70agaist||against
71aggreataon||aggregation
72aggreation||aggregation
70albumns||albums 73albumns||albums
71alegorical||allegorical 74alegorical||allegorical
72algined||aligned 75algined||aligned
@@ -80,6 +83,8 @@ aligment||alignment
80alignement||alignment 83alignement||alignment
81allign||align 84allign||align
82alligned||aligned 85alligned||aligned
86alllocate||allocate
87alloated||allocated
83allocatote||allocate 88allocatote||allocate
84allocatrd||allocated 89allocatrd||allocated
85allocte||allocate 90allocte||allocate
@@ -171,6 +176,7 @@ availale||available
171availavility||availability 176availavility||availability
172availble||available 177availble||available
173availiable||available 178availiable||available
179availible||available
174avalable||available 180avalable||available
175avaliable||available 181avaliable||available
176aysnc||async 182aysnc||async
@@ -203,6 +209,7 @@ broadcat||broadcast
203cacluated||calculated 209cacluated||calculated
204caculation||calculation 210caculation||calculation
205calender||calendar 211calender||calendar
212calescing||coalescing
206calle||called 213calle||called
207callibration||calibration 214callibration||calibration
208calucate||calculate 215calucate||calculate
@@ -210,6 +217,7 @@ calulate||calculate
210cancelation||cancellation 217cancelation||cancellation
211cancle||cancel 218cancle||cancel
212capabilites||capabilities 219capabilites||capabilities
220capabilty||capability
213capabitilies||capabilities 221capabitilies||capabilities
214capatibilities||capabilities 222capatibilities||capabilities
215capapbilities||capabilities 223capapbilities||capabilities
@@ -302,6 +310,7 @@ containts||contains
302contaisn||contains 310contaisn||contains
303contant||contact 311contant||contact
304contence||contents 312contence||contents
313continious||continuous
305continous||continuous 314continous||continuous
306continously||continuously 315continously||continuously
307continueing||continuing 316continueing||continuing
@@ -393,6 +402,7 @@ differrence||difference
393diffrent||different 402diffrent||different
394diffrentiate||differentiate 403diffrentiate||differentiate
395difinition||definition 404difinition||definition
405dimesions||dimensions
396diplay||display 406diplay||display
397direectly||directly 407direectly||directly
398disassocation||disassociation 408disassocation||disassociation
@@ -449,6 +459,7 @@ equiped||equipped
449equivelant||equivalent 459equivelant||equivalent
450equivilant||equivalent 460equivilant||equivalent
451eror||error 461eror||error
462errorr||error
452estbalishment||establishment 463estbalishment||establishment
453etsablishment||establishment 464etsablishment||establishment
454etsbalishment||establishment 465etsbalishment||establishment
@@ -481,6 +492,7 @@ failied||failed
481faillure||failure 492faillure||failure
482failue||failure 493failue||failure
483failuer||failure 494failuer||failure
495failng||failing
484faireness||fairness 496faireness||fairness
485falied||failed 497falied||failed
486faliure||failure 498faliure||failure
@@ -493,6 +505,7 @@ fetaure||feature
493fetaures||features 505fetaures||features
494fileystem||filesystem 506fileystem||filesystem
495fimware||firmware 507fimware||firmware
508firware||firmware
496finanize||finalize 509finanize||finalize
497findn||find 510findn||find
498finilizes||finalizes 511finilizes||finalizes
@@ -502,6 +515,7 @@ folloing||following
502followign||following 515followign||following
503followings||following 516followings||following
504follwing||following 517follwing||following
518fonud||found
505forseeable||foreseeable 519forseeable||foreseeable
506forse||force 520forse||force
507fortan||fortran 521fortan||fortran
@@ -532,6 +546,7 @@ grabing||grabbing
532grahical||graphical 546grahical||graphical
533grahpical||graphical 547grahpical||graphical
534grapic||graphic 548grapic||graphic
549grranted||granted
535guage||gauge 550guage||gauge
536guarenteed||guaranteed 551guarenteed||guaranteed
537guarentee||guarantee 552guarentee||guarantee
@@ -543,6 +558,7 @@ happend||happened
543harware||hardware 558harware||hardware
544heirarchically||hierarchically 559heirarchically||hierarchically
545helpfull||helpful 560helpfull||helpful
561hybernate||hibernate
546hierachy||hierarchy 562hierachy||hierarchy
547hierarchie||hierarchy 563hierarchie||hierarchy
548howver||however 564howver||however
@@ -565,16 +581,19 @@ implemenation||implementation
565implementaiton||implementation 581implementaiton||implementation
566implementated||implemented 582implementated||implemented
567implemention||implementation 583implemention||implementation
584implementd||implemented
568implemetation||implementation 585implemetation||implementation
569implemntation||implementation 586implemntation||implementation
570implentation||implementation 587implentation||implementation
571implmentation||implementation 588implmentation||implementation
572implmenting||implementing 589implmenting||implementing
590incative||inactive
573incomming||incoming 591incomming||incoming
574incompatabilities||incompatibilities 592incompatabilities||incompatibilities
575incompatable||incompatible 593incompatable||incompatible
576inconsistant||inconsistent 594inconsistant||inconsistent
577increas||increase 595increas||increase
596incremeted||incremented
578incrment||increment 597incrment||increment
579indendation||indentation 598indendation||indentation
580indended||intended 599indended||intended
@@ -619,6 +638,7 @@ interger||integer
619intermittant||intermittent 638intermittant||intermittent
620internel||internal 639internel||internal
621interoprability||interoperability 640interoprability||interoperability
641interuupt||interrupt
622interrface||interface 642interrface||interface
623interrrupt||interrupt 643interrrupt||interrupt
624interrup||interrupt 644interrup||interrupt
@@ -638,8 +658,10 @@ intrrupt||interrupt
638intterrupt||interrupt 658intterrupt||interrupt
639intuative||intuitive 659intuative||intuitive
640invaid||invalid 660invaid||invalid
661invald||invalid
641invalde||invalid 662invalde||invalid
642invalide||invalid 663invalide||invalid
664invalidiate||invalidate
643invalud||invalid 665invalud||invalid
644invididual||individual 666invididual||individual
645invokation||invocation 667invokation||invocation
@@ -713,6 +735,7 @@ misformed||malformed
713mispelled||misspelled 735mispelled||misspelled
714mispelt||misspelt 736mispelt||misspelt
715mising||missing 737mising||missing
738mismactch||mismatch
716missmanaged||mismanaged 739missmanaged||mismanaged
717missmatch||mismatch 740missmatch||mismatch
718miximum||maximum 741miximum||maximum
@@ -731,6 +754,7 @@ multidimensionnal||multidimensional
731multple||multiple 754multple||multiple
732mumber||number 755mumber||number
733muticast||multicast 756muticast||multicast
757mutilcast||multicast
734mutiple||multiple 758mutiple||multiple
735mutli||multi 759mutli||multi
736nams||names 760nams||names
@@ -834,6 +858,7 @@ posible||possible
834positon||position 858positon||position
835possibilites||possibilities 859possibilites||possibilities
836powerfull||powerful 860powerfull||powerful
861preample||preamble
837preapre||prepare 862preapre||prepare
838preceeded||preceded 863preceeded||preceded
839preceeding||preceding 864preceeding||preceding
@@ -1059,6 +1084,7 @@ sturcture||structure
1059subdirectoires||subdirectories 1084subdirectoires||subdirectories
1060suble||subtle 1085suble||subtle
1061substract||subtract 1086substract||subtract
1087submition||submission
1062succesfully||successfully 1088succesfully||successfully
1063succesful||successful 1089succesful||successful
1064successed||succeeded 1090successed||succeeded
@@ -1078,6 +1104,7 @@ suppoted||supported
1078suppported||supported 1104suppported||supported
1079suppport||support 1105suppport||support
1080supress||suppress 1106supress||suppress
1107surpressed||suppressed
1081surpresses||suppresses 1108surpresses||suppresses
1082susbsystem||subsystem 1109susbsystem||subsystem
1083suspeneded||suspended 1110suspeneded||suspended
@@ -1091,6 +1118,7 @@ swithced||switched
1091swithcing||switching 1118swithcing||switching
1092swithed||switched 1119swithed||switched
1093swithing||switching 1120swithing||switching
1121swtich||switch
1094symetric||symmetric 1122symetric||symmetric
1095synax||syntax 1123synax||syntax
1096synchonized||synchronized 1124synchonized||synchronized
@@ -1111,7 +1139,9 @@ therfore||therefore
1111thier||their 1139thier||their
1112threds||threads 1140threds||threads
1113threshhold||threshold 1141threshhold||threshold
1142thresold||threshold
1114throught||through 1143throught||through
1144troughput||throughput
1115thses||these 1145thses||these
1116tiggered||triggered 1146tiggered||triggered
1117tipically||typically 1147tipically||typically
@@ -1120,6 +1150,7 @@ tmis||this
1120torerable||tolerable 1150torerable||tolerable
1121tramsmitted||transmitted 1151tramsmitted||transmitted
1122tramsmit||transmit 1152tramsmit||transmit
1153tranasction||transaction
1123tranfer||transfer 1154tranfer||transfer
1124transciever||transceiver 1155transciever||transceiver
1125transferd||transferred 1156transferd||transferred
@@ -1133,6 +1164,7 @@ trasmission||transmission
1133treshold||threshold 1164treshold||threshold
1134trigerring||triggering 1165trigerring||triggering
1135trun||turn 1166trun||turn
1167tunning||tuning
1136ture||true 1168ture||true
1137tyep||type 1169tyep||type
1138udpate||update 1170udpate||update
@@ -1199,6 +1231,7 @@ visiters||visitors
1199vitual||virtual 1231vitual||virtual
1200wakeus||wakeups 1232wakeus||wakeups
1201wating||waiting 1233wating||waiting
1234wiat||wait
1202wether||whether 1235wether||whether
1203whataver||whatever 1236whataver||whatever
1204whcih||which 1237whcih||which
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index d5b291e94264..9cdec70d72b8 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,5 @@
1# 1#
2# Generated include files 2# Generated include files
3# 3#
4net_names.h
5capability_names.h 4capability_names.h
6rlim_names.h 5rlim_names.h
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index dafdd387d42b..81a34426d024 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -4,44 +4,11 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
4 4
5apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ 5apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
6 path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ 6 path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
7 resource.o secid.o file.o policy_ns.o label.o mount.o net.o 7 resource.o secid.o file.o policy_ns.o label.o mount.o
8apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o 8apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
9 9
10clean-files := capability_names.h rlim_names.h net_names.h 10clean-files := capability_names.h rlim_names.h
11 11
12# Build a lower case string table of address family names
13# Transform lines from
14# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
15# #define AF_INET 2 /* Internet IP Protocol */
16# to
17# [1] = "local",
18# [2] = "inet",
19#
20# and build the securityfs entries for the mapping.
21# Transforms lines from
22# #define AF_INET 2 /* Internet IP Protocol */
23# to
24# #define AA_SFS_AF_MASK "local inet"
25quiet_cmd_make-af = GEN $@
26cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
27 sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
28 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
29 echo "};" >> $@ ;\
30 printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
31 sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
32 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
33 $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
34
35# Build a lower case string table of sock type names
36# Transform lines from
37# SOCK_STREAM = 1,
38# to
39# [1] = "stream",
40quiet_cmd_make-sock = GEN $@
41cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
42 sed $^ >>$@ -r -n \
43 -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
44 echo "};" >> $@
45 12
46# Build a lower case string table of capability names 13# Build a lower case string table of capability names
47# Transforms lines from 14# Transforms lines from
@@ -94,7 +61,6 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
94 tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ 61 tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
95 62
96$(obj)/capability.o : $(obj)/capability_names.h 63$(obj)/capability.o : $(obj)/capability_names.h
97$(obj)/net.o : $(obj)/net_names.h
98$(obj)/resource.o : $(obj)/rlim_names.h 64$(obj)/resource.o : $(obj)/rlim_names.h
99$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ 65$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
100 $(src)/Makefile 66 $(src)/Makefile
@@ -102,8 +68,3 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
102$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ 68$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
103 $(src)/Makefile 69 $(src)/Makefile
104 $(call cmd,make-rlim) 70 $(call cmd,make-rlim)
105$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
106 $(srctree)/include/linux/net.h \
107 $(src)/Makefile
108 $(call cmd,make-af)
109 $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 518d5928661b..caaf51dda648 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2202,7 +2202,6 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
2202 AA_SFS_DIR("policy", aa_sfs_entry_policy), 2202 AA_SFS_DIR("policy", aa_sfs_entry_policy),
2203 AA_SFS_DIR("domain", aa_sfs_entry_domain), 2203 AA_SFS_DIR("domain", aa_sfs_entry_domain),
2204 AA_SFS_DIR("file", aa_sfs_entry_file), 2204 AA_SFS_DIR("file", aa_sfs_entry_file),
2205 AA_SFS_DIR("network", aa_sfs_entry_network),
2206 AA_SFS_DIR("mount", aa_sfs_entry_mount), 2205 AA_SFS_DIR("mount", aa_sfs_entry_mount),
2207 AA_SFS_DIR("namespaces", aa_sfs_entry_ns), 2206 AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
2208 AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), 2207 AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index db80221891c6..3382518b87fa 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -21,7 +21,6 @@
21#include "include/context.h" 21#include "include/context.h"
22#include "include/file.h" 22#include "include/file.h"
23#include "include/match.h" 23#include "include/match.h"
24#include "include/net.h"
25#include "include/path.h" 24#include "include/path.h"
26#include "include/policy.h" 25#include "include/policy.h"
27#include "include/label.h" 26#include "include/label.h"
@@ -567,32 +566,6 @@ static int __file_path_perm(const char *op, struct aa_label *label,
567 return error; 566 return error;
568} 567}
569 568
570static int __file_sock_perm(const char *op, struct aa_label *label,
571 struct aa_label *flabel, struct file *file,
572 u32 request, u32 denied)
573{
574 struct socket *sock = (struct socket *) file->private_data;
575 int error;
576
577 AA_BUG(!sock);
578
579 /* revalidation due to label out of date. No revocation at this time */
580 if (!denied && aa_label_is_subset(flabel, label))
581 return 0;
582
583 /* TODO: improve to skip profiles cached in flabel */
584 error = aa_sock_file_perm(label, op, request, sock);
585 if (denied) {
586 /* TODO: improve to skip profiles checked above */
587 /* check every profile in file label to is cached */
588 last_error(error, aa_sock_file_perm(flabel, op, request, sock));
589 }
590 if (!error)
591 update_file_ctx(file_ctx(file), label, request);
592
593 return error;
594}
595
596/** 569/**
597 * aa_file_perm - do permission revalidation check & audit for @file 570 * aa_file_perm - do permission revalidation check & audit for @file
598 * @op: operation being checked 571 * @op: operation being checked
@@ -637,9 +610,6 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
637 error = __file_path_perm(op, label, flabel, file, request, 610 error = __file_path_perm(op, label, flabel, file, request,
638 denied); 611 denied);
639 612
640 else if (S_ISSOCK(file_inode(file)->i_mode))
641 error = __file_sock_perm(op, label, flabel, file, request,
642 denied);
643done: 613done:
644 rcu_read_unlock(); 614 rcu_read_unlock();
645 615
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index ff4316e1068d..620e81169659 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -121,29 +121,21 @@ struct apparmor_audit_data {
121 /* these entries require a custom callback fn */ 121 /* these entries require a custom callback fn */
122 struct { 122 struct {
123 struct aa_label *peer; 123 struct aa_label *peer;
124 union { 124 struct {
125 struct { 125 const char *target;
126 kuid_t ouid; 126 kuid_t ouid;
127 const char *target; 127 } fs;
128 } fs;
129 struct {
130 int type, protocol;
131 struct sock *peer_sk;
132 void *addr;
133 int addrlen;
134 } net;
135 int signal;
136 struct {
137 int rlim;
138 unsigned long max;
139 } rlim;
140 };
141 }; 128 };
142 struct { 129 struct {
143 struct aa_profile *profile; 130 struct aa_profile *profile;
144 const char *ns; 131 const char *ns;
145 long pos; 132 long pos;
146 } iface; 133 } iface;
134 int signal;
135 struct {
136 int rlim;
137 unsigned long max;
138 } rlim;
147 struct { 139 struct {
148 const char *src_name; 140 const char *src_name;
149 const char *type; 141 const char *type;
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
deleted file mode 100644
index 140c8efcf364..000000000000
--- a/security/apparmor/include/net.h
+++ /dev/null
@@ -1,114 +0,0 @@
1/*
2 * AppArmor security module
3 *
4 * This file contains AppArmor network mediation definitions.
5 *
6 * Copyright (C) 1998-2008 Novell/SUSE
7 * Copyright 2009-2017 Canonical Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
13 */
14
15#ifndef __AA_NET_H
16#define __AA_NET_H
17
18#include <net/sock.h>
19#include <linux/path.h>
20
21#include "apparmorfs.h"
22#include "label.h"
23#include "perms.h"
24#include "policy.h"
25
26#define AA_MAY_SEND AA_MAY_WRITE
27#define AA_MAY_RECEIVE AA_MAY_READ
28
29#define AA_MAY_SHUTDOWN AA_MAY_DELETE
30
31#define AA_MAY_CONNECT AA_MAY_OPEN
32#define AA_MAY_ACCEPT 0x00100000
33
34#define AA_MAY_BIND 0x00200000
35#define AA_MAY_LISTEN 0x00400000
36
37#define AA_MAY_SETOPT 0x01000000
38#define AA_MAY_GETOPT 0x02000000
39
40#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
41 AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
42 AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
43 AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
44
45#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
46 AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
47 AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
48 AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
49 AA_MAY_MPROT)
50
51#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
52 AA_MAY_ACCEPT)
53struct aa_sk_ctx {
54 struct aa_label *label;
55 struct aa_label *peer;
56 struct path path;
57};
58
59#define SK_CTX(X) ((X)->sk_security)
60#define SOCK_ctx(X) SOCK_INODE(X)->i_security
61#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
62 struct lsm_network_audit NAME ## _net = { .sk = (SK), \
63 .family = (F)}; \
64 DEFINE_AUDIT_DATA(NAME, \
65 ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
66 LSM_AUDIT_DATA_NONE, \
67 OP); \
68 NAME.u.net = &(NAME ## _net); \
69 aad(&NAME)->net.type = (T); \
70 aad(&NAME)->net.protocol = (P)
71
72#define DEFINE_AUDIT_SK(NAME, OP, SK) \
73 DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
74 (SK)->sk_protocol)
75
76/* struct aa_net - network confinement data
77 * @allow: basic network families permissions
78 * @audit: which network permissions to force audit
79 * @quiet: which network permissions to quiet rejects
80 */
81struct aa_net {
82 u16 allow[AF_MAX];
83 u16 audit[AF_MAX];
84 u16 quiet[AF_MAX];
85};
86
87
88extern struct aa_sfs_entry aa_sfs_entry_network[];
89
90void audit_net_cb(struct audit_buffer *ab, void *va);
91int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
92 u32 request, u16 family, int type);
93int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
94 int type, int protocol);
95static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
96 struct common_audit_data *sa,
97 u32 request,
98 struct sock *sk)
99{
100 return aa_profile_af_perm(profile, sa, request, sk->sk_family,
101 sk->sk_type);
102}
103int aa_sk_perm(const char *op, u32 request, struct sock *sk);
104
105int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
106 struct socket *sock);
107
108
109static inline void aa_free_net_rules(struct aa_net *new)
110{
111 /* NOP */
112}
113
114#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index af04d5a7d73d..2b27bb79aec4 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -135,10 +135,9 @@ extern struct aa_perms allperms;
135 135
136 136
137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); 137void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
138void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, 138void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
139 u32 mask);
140void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, 139void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
141 u32 chrsmask, const char * const *names, u32 namesmask); 140 u32 chrsmask, const char **names, u32 namesmask);
142void aa_apply_modes_to_perms(struct aa_profile *profile, 141void aa_apply_modes_to_perms(struct aa_profile *profile,
143 struct aa_perms *perms); 142 struct aa_perms *perms);
144void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, 143void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 4364088a0b9e..17fe41a9cac3 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -30,7 +30,6 @@
30#include "file.h" 30#include "file.h"
31#include "lib.h" 31#include "lib.h"
32#include "label.h" 32#include "label.h"
33#include "net.h"
34#include "perms.h" 33#include "perms.h"
35#include "resource.h" 34#include "resource.h"
36 35
@@ -112,7 +111,6 @@ struct aa_data {
112 * @policy: general match rules governing policy 111 * @policy: general match rules governing policy
113 * @file: The set of rules governing basic file access and domain transitions 112 * @file: The set of rules governing basic file access and domain transitions
114 * @caps: capabilities for the profile 113 * @caps: capabilities for the profile
115 * @net: network controls for the profile
116 * @rlimits: rlimits for the profile 114 * @rlimits: rlimits for the profile
117 * 115 *
118 * @dents: dentries for the profiles file entries in apparmorfs 116 * @dents: dentries for the profiles file entries in apparmorfs
@@ -150,7 +148,6 @@ struct aa_profile {
150 struct aa_policydb policy; 148 struct aa_policydb policy;
151 struct aa_file_rules file; 149 struct aa_file_rules file;
152 struct aa_caps caps; 150 struct aa_caps caps;
153 struct aa_net net;
154 struct aa_rlimit rlimits; 151 struct aa_rlimit rlimits;
155 152
156 struct aa_loaddata *rawdata; 153 struct aa_loaddata *rawdata;
@@ -223,16 +220,6 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
223 return 0; 220 return 0;
224} 221}
225 222
226static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
227 u16 AF) {
228 unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
229 u16 be_af = cpu_to_be16(AF);
230
231 if (!state)
232 return 0;
233 return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
234}
235
236/** 223/**
237 * aa_get_profile - increment refcount on profile @p 224 * aa_get_profile - increment refcount on profile @p
238 * @p: profile (MAYBE NULL) 225 * @p: profile (MAYBE NULL)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 8818621b5d95..08ca26bcca77 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -211,8 +211,7 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
211 *str = '\0'; 211 *str = '\0';
212} 212}
213 213
214void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, 214void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
215 u32 mask)
216{ 215{
217 const char *fmt = "%s"; 216 const char *fmt = "%s";
218 unsigned int i, perm = 1; 217 unsigned int i, perm = 1;
@@ -230,7 +229,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
230} 229}
231 230
232void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, 231void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
233 u32 chrsmask, const char * const *names, u32 namesmask) 232 u32 chrsmask, const char **names, u32 namesmask)
234{ 233{
235 char str[33]; 234 char str[33];
236 235
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 72b915dfcaf7..1346ee5be04f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -33,7 +33,6 @@
33#include "include/context.h" 33#include "include/context.h"
34#include "include/file.h" 34#include "include/file.h"
35#include "include/ipc.h" 35#include "include/ipc.h"
36#include "include/net.h"
37#include "include/path.h" 36#include "include/path.h"
38#include "include/label.h" 37#include "include/label.h"
39#include "include/policy.h" 38#include "include/policy.h"
@@ -737,368 +736,6 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
737 return error; 736 return error;
738} 737}
739 738
740/**
741 * apparmor_sk_alloc_security - allocate and attach the sk_security field
742 */
743static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
744{
745 struct aa_sk_ctx *ctx;
746
747 ctx = kzalloc(sizeof(*ctx), flags);
748 if (!ctx)
749 return -ENOMEM;
750
751 SK_CTX(sk) = ctx;
752
753 return 0;
754}
755
756/**
757 * apparmor_sk_free_security - free the sk_security field
758 */
759static void apparmor_sk_free_security(struct sock *sk)
760{
761 struct aa_sk_ctx *ctx = SK_CTX(sk);
762
763 SK_CTX(sk) = NULL;
764 aa_put_label(ctx->label);
765 aa_put_label(ctx->peer);
766 path_put(&ctx->path);
767 kfree(ctx);
768}
769
770/**
771 * apparmor_clone_security - clone the sk_security field
772 */
773static void apparmor_sk_clone_security(const struct sock *sk,
774 struct sock *newsk)
775{
776 struct aa_sk_ctx *ctx = SK_CTX(sk);
777 struct aa_sk_ctx *new = SK_CTX(newsk);
778
779 new->label = aa_get_label(ctx->label);
780 new->peer = aa_get_label(ctx->peer);
781 new->path = ctx->path;
782 path_get(&new->path);
783}
784
785static int aa_sock_create_perm(struct aa_label *label, int family, int type,
786 int protocol)
787{
788 AA_BUG(!label);
789 AA_BUG(in_interrupt());
790
791 return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type,
792 protocol);
793}
794
795
796/**
797 * apparmor_socket_create - check perms before creating a new socket
798 */
799static int apparmor_socket_create(int family, int type, int protocol, int kern)
800{
801 struct aa_label *label;
802 int error = 0;
803
804 label = begin_current_label_crit_section();
805 if (!(kern || unconfined(label)))
806 error = aa_sock_create_perm(label, family, type, protocol);
807 end_current_label_crit_section(label);
808
809 return error;
810}
811
812/**
813 * apparmor_socket_post_create - setup the per-socket security struct
814 *
815 * Note:
816 * - kernel sockets currently labeled unconfined but we may want to
817 * move to a special kernel label
818 * - socket may not have sk here if created with sock_create_lite or
819 * sock_alloc. These should be accept cases which will be handled in
820 * sock_graft.
821 */
822static int apparmor_socket_post_create(struct socket *sock, int family,
823 int type, int protocol, int kern)
824{
825 struct aa_label *label;
826
827 if (kern) {
828 struct aa_ns *ns = aa_get_current_ns();
829
830 label = aa_get_label(ns_unconfined(ns));
831 aa_put_ns(ns);
832 } else
833 label = aa_get_current_label();
834
835 if (sock->sk) {
836 struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
837
838 aa_put_label(ctx->label);
839 ctx->label = aa_get_label(label);
840 }
841 aa_put_label(label);
842
843 return 0;
844}
845
846/**
847 * apparmor_socket_bind - check perms before bind addr to socket
848 */
849static int apparmor_socket_bind(struct socket *sock,
850 struct sockaddr *address, int addrlen)
851{
852 AA_BUG(!sock);
853 AA_BUG(!sock->sk);
854 AA_BUG(!address);
855 AA_BUG(in_interrupt());
856
857 return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
858}
859
860/**
861 * apparmor_socket_connect - check perms before connecting @sock to @address
862 */
863static int apparmor_socket_connect(struct socket *sock,
864 struct sockaddr *address, int addrlen)
865{
866 AA_BUG(!sock);
867 AA_BUG(!sock->sk);
868 AA_BUG(!address);
869 AA_BUG(in_interrupt());
870
871 return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
872}
873
874/**
875 * apparmor_socket_list - check perms before allowing listen
876 */
877static int apparmor_socket_listen(struct socket *sock, int backlog)
878{
879 AA_BUG(!sock);
880 AA_BUG(!sock->sk);
881 AA_BUG(in_interrupt());
882
883 return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
884}
885
886/**
887 * apparmor_socket_accept - check perms before accepting a new connection.
888 *
889 * Note: while @newsock is created and has some information, the accept
890 * has not been done.
891 */
892static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
893{
894 AA_BUG(!sock);
895 AA_BUG(!sock->sk);
896 AA_BUG(!newsock);
897 AA_BUG(in_interrupt());
898
899 return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
900}
901
902static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
903 struct msghdr *msg, int size)
904{
905 AA_BUG(!sock);
906 AA_BUG(!sock->sk);
907 AA_BUG(!msg);
908 AA_BUG(in_interrupt());
909
910 return aa_sk_perm(op, request, sock->sk);
911}
912
913/**
914 * apparmor_socket_sendmsg - check perms before sending msg to another socket
915 */
916static int apparmor_socket_sendmsg(struct socket *sock,
917 struct msghdr *msg, int size)
918{
919 return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
920}
921
922/**
923 * apparmor_socket_recvmsg - check perms before receiving a message
924 */
925static int apparmor_socket_recvmsg(struct socket *sock,
926 struct msghdr *msg, int size, int flags)
927{
928 return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
929}
930
931/* revaliation, get/set attr, shutdown */
932static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
933{
934 AA_BUG(!sock);
935 AA_BUG(!sock->sk);
936 AA_BUG(in_interrupt());
937
938 return aa_sk_perm(op, request, sock->sk);
939}
940
941/**
942 * apparmor_socket_getsockname - check perms before getting the local address
943 */
944static int apparmor_socket_getsockname(struct socket *sock)
945{
946 return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
947}
948
949/**
950 * apparmor_socket_getpeername - check perms before getting remote address
951 */
952static int apparmor_socket_getpeername(struct socket *sock)
953{
954 return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
955}
956
957/* revaliation, get/set attr, opt */
958static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
959 int level, int optname)
960{
961 AA_BUG(!sock);
962 AA_BUG(!sock->sk);
963 AA_BUG(in_interrupt());
964
965 return aa_sk_perm(op, request, sock->sk);
966}
967
968/**
969 * apparmor_getsockopt - check perms before getting socket options
970 */
971static int apparmor_socket_getsockopt(struct socket *sock, int level,
972 int optname)
973{
974 return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
975 level, optname);
976}
977
978/**
979 * apparmor_setsockopt - check perms before setting socket options
980 */
981static int apparmor_socket_setsockopt(struct socket *sock, int level,
982 int optname)
983{
984 return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
985 level, optname);
986}
987
988/**
989 * apparmor_socket_shutdown - check perms before shutting down @sock conn
990 */
991static int apparmor_socket_shutdown(struct socket *sock, int how)
992{
993 return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
994}
995
996/**
997 * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
998 *
999 * Note: can not sleep may be called with locks held
1000 *
1001 * dont want protocol specific in __skb_recv_datagram()
1002 * to deny an incoming connection socket_sock_rcv_skb()
1003 */
1004static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
1005{
1006 return 0;
1007}
1008
1009
1010static struct aa_label *sk_peer_label(struct sock *sk)
1011{
1012 struct aa_sk_ctx *ctx = SK_CTX(sk);
1013
1014 if (ctx->peer)
1015 return ctx->peer;
1016
1017 return ERR_PTR(-ENOPROTOOPT);
1018}
1019
1020/**
1021 * apparmor_socket_getpeersec_stream - get security context of peer
1022 *
1023 * Note: for tcp only valid if using ipsec or cipso on lan
1024 */
1025static int apparmor_socket_getpeersec_stream(struct socket *sock,
1026 char __user *optval,
1027 int __user *optlen,
1028 unsigned int len)
1029{
1030 char *name;
1031 int slen, error = 0;
1032 struct aa_label *label;
1033 struct aa_label *peer;
1034
1035 label = begin_current_label_crit_section();
1036 peer = sk_peer_label(sock->sk);
1037 if (IS_ERR(peer)) {
1038 error = PTR_ERR(peer);
1039 goto done;
1040 }
1041 slen = aa_label_asxprint(&name, labels_ns(label), peer,
1042 FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
1043 FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
1044 /* don't include terminating \0 in slen, it breaks some apps */
1045 if (slen < 0) {
1046 error = -ENOMEM;
1047 } else {
1048 if (slen > len) {
1049 error = -ERANGE;
1050 } else if (copy_to_user(optval, name, slen)) {
1051 error = -EFAULT;
1052 goto out;
1053 }
1054 if (put_user(slen, optlen))
1055 error = -EFAULT;
1056out:
1057 kfree(name);
1058
1059 }
1060
1061done:
1062 end_current_label_crit_section(label);
1063
1064 return error;
1065}
1066
1067/**
1068 * apparmor_socket_getpeersec_dgram - get security label of packet
1069 * @sock: the peer socket
1070 * @skb: packet data
1071 * @secid: pointer to where to put the secid of the packet
1072 *
1073 * Sets the netlabel socket state on sk from parent
1074 */
1075static int apparmor_socket_getpeersec_dgram(struct socket *sock,
1076 struct sk_buff *skb, u32 *secid)
1077
1078{
1079 /* TODO: requires secid support */
1080 return -ENOPROTOOPT;
1081}
1082
1083/**
1084 * apparmor_sock_graft - Initialize newly created socket
1085 * @sk: child sock
1086 * @parent: parent socket
1087 *
1088 * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
1089 * just set sk security information off of current creating process label
1090 * Labeling of sk for accept case - probably should be sock based
1091 * instead of task, because of the case where an implicitly labeled
1092 * socket is shared by different tasks.
1093 */
1094static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
1095{
1096 struct aa_sk_ctx *ctx = SK_CTX(sk);
1097
1098 if (!ctx->label)
1099 ctx->label = aa_get_current_label();
1100}
1101
1102static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { 739static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
1103 LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), 740 LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
1104 LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), 741 LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1133,30 +770,6 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
1133 LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), 770 LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
1134 LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), 771 LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
1135 772
1136 LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
1137 LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
1138 LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
1139
1140 LSM_HOOK_INIT(socket_create, apparmor_socket_create),
1141 LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
1142 LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
1143 LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
1144 LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
1145 LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
1146 LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
1147 LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
1148 LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
1149 LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
1150 LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
1151 LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
1152 LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
1153 LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
1154 LSM_HOOK_INIT(socket_getpeersec_stream,
1155 apparmor_socket_getpeersec_stream),
1156 LSM_HOOK_INIT(socket_getpeersec_dgram,
1157 apparmor_socket_getpeersec_dgram),
1158 LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
1159
1160 LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), 773 LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
1161 LSM_HOOK_INIT(cred_free, apparmor_cred_free), 774 LSM_HOOK_INIT(cred_free, apparmor_cred_free),
1162 LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), 775 LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
deleted file mode 100644
index 33d54435f8d6..000000000000
--- a/security/apparmor/net.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/*
2 * AppArmor security module
3 *
4 * This file contains AppArmor network mediation
5 *
6 * Copyright (C) 1998-2008 Novell/SUSE
7 * Copyright 2009-2017 Canonical Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
13 */
14
15#include "include/apparmor.h"
16#include "include/audit.h"
17#include "include/context.h"
18#include "include/label.h"
19#include "include/net.h"
20#include "include/policy.h"
21
22#include "net_names.h"
23
24
25struct aa_sfs_entry aa_sfs_entry_network[] = {
26 AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
27 { }
28};
29
30static const char * const net_mask_names[] = {
31 "unknown",
32 "send",
33 "receive",
34 "unknown",
35
36 "create",
37 "shutdown",
38 "connect",
39 "unknown",
40
41 "setattr",
42 "getattr",
43 "setcred",
44 "getcred",
45
46 "chmod",
47 "chown",
48 "chgrp",
49 "lock",
50
51 "mmap",
52 "mprot",
53 "unknown",
54 "unknown",
55
56 "accept",
57 "bind",
58 "listen",
59 "unknown",
60
61 "setopt",
62 "getopt",
63 "unknown",
64 "unknown",
65
66 "unknown",
67 "unknown",
68 "unknown",
69 "unknown",
70};
71
72
73/* audit callback for net specific fields */
74void audit_net_cb(struct audit_buffer *ab, void *va)
75{
76 struct common_audit_data *sa = va;
77
78 audit_log_format(ab, " family=");
79 if (address_family_names[sa->u.net->family])
80 audit_log_string(ab, address_family_names[sa->u.net->family]);
81 else
82 audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family);
83 audit_log_format(ab, " sock_type=");
84 if (sock_type_names[aad(sa)->net.type])
85 audit_log_string(ab, sock_type_names[aad(sa)->net.type]);
86 else
87 audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type);
88 audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
89
90 if (aad(sa)->request & NET_PERMS_MASK) {
91 audit_log_format(ab, " requested_mask=");
92 aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
93 net_mask_names, NET_PERMS_MASK);
94
95 if (aad(sa)->denied & NET_PERMS_MASK) {
96 audit_log_format(ab, " denied_mask=");
97 aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
98 net_mask_names, NET_PERMS_MASK);
99 }
100 }
101 if (aad(sa)->peer) {
102 audit_log_format(ab, " peer=");
103 aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
104 FLAGS_NONE, GFP_ATOMIC);
105 }
106}
107
108
109/* Generic af perm */
110int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
111 u32 request, u16 family, int type)
112{
113 struct aa_perms perms = { };
114
115 AA_BUG(family >= AF_MAX);
116 AA_BUG(type < 0 || type >= SOCK_MAX);
117
118 if (profile_unconfined(profile))
119 return 0;
120
121 perms.allow = (profile->net.allow[family] & (1 << type)) ?
122 ALL_PERMS_MASK : 0;
123 perms.audit = (profile->net.audit[family] & (1 << type)) ?
124 ALL_PERMS_MASK : 0;
125 perms.quiet = (profile->net.quiet[family] & (1 << type)) ?
126 ALL_PERMS_MASK : 0;
127 aa_apply_modes_to_perms(profile, &perms);
128
129 return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
130}
131
132int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
133 int type, int protocol)
134{
135 struct aa_profile *profile;
136 DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
137
138 return fn_for_each_confined(label, profile,
139 aa_profile_af_perm(profile, &sa, request, family,
140 type));
141}
142
143static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
144 struct sock *sk)
145{
146 struct aa_profile *profile;
147 DEFINE_AUDIT_SK(sa, op, sk);
148
149 AA_BUG(!label);
150 AA_BUG(!sk);
151
152 if (unconfined(label))
153 return 0;
154
155 return fn_for_each_confined(label, profile,
156 aa_profile_af_sk_perm(profile, &sa, request, sk));
157}
158
159int aa_sk_perm(const char *op, u32 request, struct sock *sk)
160{
161 struct aa_label *label;
162 int error;
163
164 AA_BUG(!sk);
165 AA_BUG(in_interrupt());
166
167 /* TODO: switch to begin_current_label ???? */
168 label = begin_current_label_crit_section();
169 error = aa_label_sk_perm(label, op, request, sk);
170 end_current_label_crit_section(label);
171
172 return error;
173}
174
175
176int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
177 struct socket *sock)
178{
179 AA_BUG(!label);
180 AA_BUG(!sock);
181 AA_BUG(!sock->sk);
182
183 return aa_label_sk_perm(label, op, request, sock->sk);
184}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 5a2aec358322..4ede87c30f8b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -275,19 +275,6 @@ fail:
275 return 0; 275 return 0;
276} 276}
277 277
278static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
279{
280 if (unpack_nameX(e, AA_U16, name)) {
281 if (!inbounds(e, sizeof(u16)))
282 return 0;
283 if (data)
284 *data = le16_to_cpu(get_unaligned((__le16 *) e->pos));
285 e->pos += sizeof(u16);
286 return 1;
287 }
288 return 0;
289}
290
291static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) 278static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
292{ 279{
293 if (unpack_nameX(e, AA_U32, name)) { 280 if (unpack_nameX(e, AA_U32, name)) {
@@ -597,7 +584,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
597 struct aa_profile *profile = NULL; 584 struct aa_profile *profile = NULL;
598 const char *tmpname, *tmpns = NULL, *name = NULL; 585 const char *tmpname, *tmpns = NULL, *name = NULL;
599 const char *info = "failed to unpack profile"; 586 const char *info = "failed to unpack profile";
600 size_t size = 0, ns_len; 587 size_t ns_len;
601 struct rhashtable_params params = { 0 }; 588 struct rhashtable_params params = { 0 };
602 char *key = NULL; 589 char *key = NULL;
603 struct aa_data *data; 590 struct aa_data *data;
@@ -730,38 +717,6 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
730 goto fail; 717 goto fail;
731 } 718 }
732 719
733 size = unpack_array(e, "net_allowed_af");
734 if (size) {
735
736 for (i = 0; i < size; i++) {
737 /* discard extraneous rules that this kernel will
738 * never request
739 */
740 if (i >= AF_MAX) {
741 u16 tmp;
742
743 if (!unpack_u16(e, &tmp, NULL) ||
744 !unpack_u16(e, &tmp, NULL) ||
745 !unpack_u16(e, &tmp, NULL))
746 goto fail;
747 continue;
748 }
749 if (!unpack_u16(e, &profile->net.allow[i], NULL))
750 goto fail;
751 if (!unpack_u16(e, &profile->net.audit[i], NULL))
752 goto fail;
753 if (!unpack_u16(e, &profile->net.quiet[i], NULL))
754 goto fail;
755 }
756 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
757 goto fail;
758 }
759 if (VERSION_LT(e->version, v7)) {
760 /* pre v7 policy always allowed these */
761 profile->net.allow[AF_UNIX] = 0xffff;
762 profile->net.allow[AF_NETLINK] = 0xffff;
763 }
764
765 if (unpack_nameX(e, AA_STRUCT, "policydb")) { 720 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
766 /* generic policy dfa - optional and may be NULL */ 721 /* generic policy dfa - optional and may be NULL */
767 info = "failed to unpack policydb"; 722 info = "failed to unpack policydb";
diff --git a/security/commoncap.c b/security/commoncap.c
index c25e0d27537f..fc46f5b85251 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
585 struct vfs_ns_cap_data data, *nscaps = &data; 585 struct vfs_ns_cap_data data, *nscaps = &data;
586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data; 586 struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
587 kuid_t rootkuid; 587 kuid_t rootkuid;
588 struct user_namespace *fs_ns = inode->i_sb->s_user_ns; 588 struct user_namespace *fs_ns;
589 589
590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); 590 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
591 591
592 if (!inode) 592 if (!inode)
593 return -ENODATA; 593 return -ENODATA;
594 594
595 fs_ns = inode->i_sb->s_user_ns;
595 size = __vfs_getxattr((struct dentry *)dentry, inode, 596 size = __vfs_getxattr((struct dentry *)dentry, inode,
596 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ); 597 XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
597 if (size == -ENODATA || size == -EOPNOTSUPP) 598 if (size == -ENODATA || size == -EOPNOTSUPP)
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index a7a23b5541f8..6462e6654ccf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -45,10 +45,9 @@ config BIG_KEYS
45 bool "Large payload keys" 45 bool "Large payload keys"
46 depends on KEYS 46 depends on KEYS
47 depends on TMPFS 47 depends on TMPFS
48 depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y) 48 select CRYPTO
49 select CRYPTO_AES 49 select CRYPTO_AES
50 select CRYPTO_ECB 50 select CRYPTO_GCM
51 select CRYPTO_RNG
52 help 51 help
53 This option provides support for holding large keys within the kernel 52 This option provides support for holding large keys within the kernel
54 (for example Kerberos ticket caches). The data may be stored out to 53 (for example Kerberos ticket caches). The data may be stored out to
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 6acb00f6f22c..929e14978c42 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -1,5 +1,6 @@
1/* Large capacity key type 1/* Large capacity key type
2 * 2 *
3 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
3 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. 4 * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 5 * Written by David Howells (dhowells@redhat.com)
5 * 6 *
@@ -16,10 +17,10 @@
16#include <linux/shmem_fs.h> 17#include <linux/shmem_fs.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/scatterlist.h> 19#include <linux/scatterlist.h>
20#include <linux/random.h>
19#include <keys/user-type.h> 21#include <keys/user-type.h>
20#include <keys/big_key-type.h> 22#include <keys/big_key-type.h>
21#include <crypto/rng.h> 23#include <crypto/aead.h>
22#include <crypto/skcipher.h>
23 24
24/* 25/*
25 * Layout of key payload words. 26 * Layout of key payload words.
@@ -49,7 +50,12 @@ enum big_key_op {
49/* 50/*
50 * Key size for big_key data encryption 51 * Key size for big_key data encryption
51 */ 52 */
52#define ENC_KEY_SIZE 16 53#define ENC_KEY_SIZE 32
54
55/*
56 * Authentication tag length
57 */
58#define ENC_AUTHTAG_SIZE 16
53 59
54/* 60/*
55 * big_key defined keys take an arbitrary string as the description and an 61 * big_key defined keys take an arbitrary string as the description and an
@@ -64,57 +70,62 @@ struct key_type key_type_big_key = {
64 .destroy = big_key_destroy, 70 .destroy = big_key_destroy,
65 .describe = big_key_describe, 71 .describe = big_key_describe,
66 .read = big_key_read, 72 .read = big_key_read,
73 /* no ->update(); don't add it without changing big_key_crypt() nonce */
67}; 74};
68 75
69/* 76/*
70 * Crypto names for big_key data encryption 77 * Crypto names for big_key data authenticated encryption
71 */ 78 */
72static const char big_key_rng_name[] = "stdrng"; 79static const char big_key_alg_name[] = "gcm(aes)";
73static const char big_key_alg_name[] = "ecb(aes)";
74 80
75/* 81/*
76 * Crypto algorithms for big_key data encryption 82 * Crypto algorithms for big_key data authenticated encryption
77 */ 83 */
78static struct crypto_rng *big_key_rng; 84static struct crypto_aead *big_key_aead;
79static struct crypto_skcipher *big_key_skcipher;
80 85
81/* 86/*
82 * Generate random key to encrypt big_key data 87 * Since changing the key affects the entire object, we need a mutex.
83 */ 88 */
84static inline int big_key_gen_enckey(u8 *key) 89static DEFINE_MUTEX(big_key_aead_lock);
85{
86 return crypto_rng_get_bytes(big_key_rng, key, ENC_KEY_SIZE);
87}
88 90
89/* 91/*
90 * Encrypt/decrypt big_key data 92 * Encrypt/decrypt big_key data
91 */ 93 */
92static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key) 94static int big_key_crypt(enum big_key_op op, u8 *data, size_t datalen, u8 *key)
93{ 95{
94 int ret = -EINVAL; 96 int ret;
95 struct scatterlist sgio; 97 struct scatterlist sgio;
96 SKCIPHER_REQUEST_ON_STACK(req, big_key_skcipher); 98 struct aead_request *aead_req;
97 99 /* We always use a zero nonce. The reason we can get away with this is
98 if (crypto_skcipher_setkey(big_key_skcipher, key, ENC_KEY_SIZE)) { 100 * because we're using a different randomly generated key for every
101 * different encryption. Notably, too, key_type_big_key doesn't define
102 * an .update function, so there's no chance we'll wind up reusing the
103 * key to encrypt updated data. Simply put: one key, one encryption.
104 */
105 u8 zero_nonce[crypto_aead_ivsize(big_key_aead)];
106
107 aead_req = aead_request_alloc(big_key_aead, GFP_KERNEL);
108 if (!aead_req)
109 return -ENOMEM;
110
111 memset(zero_nonce, 0, sizeof(zero_nonce));
112 sg_init_one(&sgio, data, datalen + (op == BIG_KEY_ENC ? ENC_AUTHTAG_SIZE : 0));
113 aead_request_set_crypt(aead_req, &sgio, &sgio, datalen, zero_nonce);
114 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
115 aead_request_set_ad(aead_req, 0);
116
117 mutex_lock(&big_key_aead_lock);
118 if (crypto_aead_setkey(big_key_aead, key, ENC_KEY_SIZE)) {
99 ret = -EAGAIN; 119 ret = -EAGAIN;
100 goto error; 120 goto error;
101 } 121 }
102
103 skcipher_request_set_tfm(req, big_key_skcipher);
104 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
105 NULL, NULL);
106
107 sg_init_one(&sgio, data, datalen);
108 skcipher_request_set_crypt(req, &sgio, &sgio, datalen, NULL);
109
110 if (op == BIG_KEY_ENC) 122 if (op == BIG_KEY_ENC)
111 ret = crypto_skcipher_encrypt(req); 123 ret = crypto_aead_encrypt(aead_req);
112 else 124 else
113 ret = crypto_skcipher_decrypt(req); 125 ret = crypto_aead_decrypt(aead_req);
114
115 skcipher_request_zero(req);
116
117error: 126error:
127 mutex_unlock(&big_key_aead_lock);
128 aead_request_free(aead_req);
118 return ret; 129 return ret;
119} 130}
120 131
@@ -146,16 +157,13 @@ int big_key_preparse(struct key_preparsed_payload *prep)
146 * 157 *
147 * File content is stored encrypted with randomly generated key. 158 * File content is stored encrypted with randomly generated key.
148 */ 159 */
149 size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); 160 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
150 loff_t pos = 0; 161 loff_t pos = 0;
151 162
152 /* prepare aligned data to encrypt */
153 data = kmalloc(enclen, GFP_KERNEL); 163 data = kmalloc(enclen, GFP_KERNEL);
154 if (!data) 164 if (!data)
155 return -ENOMEM; 165 return -ENOMEM;
156
157 memcpy(data, prep->data, datalen); 166 memcpy(data, prep->data, datalen);
158 memset(data + datalen, 0x00, enclen - datalen);
159 167
160 /* generate random key */ 168 /* generate random key */
161 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL); 169 enckey = kmalloc(ENC_KEY_SIZE, GFP_KERNEL);
@@ -163,13 +171,12 @@ int big_key_preparse(struct key_preparsed_payload *prep)
163 ret = -ENOMEM; 171 ret = -ENOMEM;
164 goto error; 172 goto error;
165 } 173 }
166 174 ret = get_random_bytes_wait(enckey, ENC_KEY_SIZE);
167 ret = big_key_gen_enckey(enckey); 175 if (unlikely(ret))
168 if (ret)
169 goto err_enckey; 176 goto err_enckey;
170 177
171 /* encrypt aligned data */ 178 /* encrypt aligned data */
172 ret = big_key_crypt(BIG_KEY_ENC, data, enclen, enckey); 179 ret = big_key_crypt(BIG_KEY_ENC, data, datalen, enckey);
173 if (ret) 180 if (ret)
174 goto err_enckey; 181 goto err_enckey;
175 182
@@ -195,7 +202,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
195 *path = file->f_path; 202 *path = file->f_path;
196 path_get(path); 203 path_get(path);
197 fput(file); 204 fput(file);
198 kfree(data); 205 kzfree(data);
199 } else { 206 } else {
200 /* Just store the data in a buffer */ 207 /* Just store the data in a buffer */
201 void *data = kmalloc(datalen, GFP_KERNEL); 208 void *data = kmalloc(datalen, GFP_KERNEL);
@@ -211,9 +218,9 @@ int big_key_preparse(struct key_preparsed_payload *prep)
211err_fput: 218err_fput:
212 fput(file); 219 fput(file);
213err_enckey: 220err_enckey:
214 kfree(enckey); 221 kzfree(enckey);
215error: 222error:
216 kfree(data); 223 kzfree(data);
217 return ret; 224 return ret;
218} 225}
219 226
@@ -227,7 +234,7 @@ void big_key_free_preparse(struct key_preparsed_payload *prep)
227 234
228 path_put(path); 235 path_put(path);
229 } 236 }
230 kfree(prep->payload.data[big_key_data]); 237 kzfree(prep->payload.data[big_key_data]);
231} 238}
232 239
233/* 240/*
@@ -240,7 +247,7 @@ void big_key_revoke(struct key *key)
240 247
241 /* clear the quota */ 248 /* clear the quota */
242 key_payload_reserve(key, 0); 249 key_payload_reserve(key, 0);
243 if (key_is_instantiated(key) && 250 if (key_is_positive(key) &&
244 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) 251 (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
245 vfs_truncate(path, 0); 252 vfs_truncate(path, 0);
246} 253}
@@ -259,7 +266,7 @@ void big_key_destroy(struct key *key)
259 path->mnt = NULL; 266 path->mnt = NULL;
260 path->dentry = NULL; 267 path->dentry = NULL;
261 } 268 }
262 kfree(key->payload.data[big_key_data]); 269 kzfree(key->payload.data[big_key_data]);
263 key->payload.data[big_key_data] = NULL; 270 key->payload.data[big_key_data] = NULL;
264} 271}
265 272
@@ -272,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
272 279
273 seq_puts(m, key->description); 280 seq_puts(m, key->description);
274 281
275 if (key_is_instantiated(key)) 282 if (key_is_positive(key))
276 seq_printf(m, ": %zu [%s]", 283 seq_printf(m, ": %zu [%s]",
277 datalen, 284 datalen,
278 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); 285 datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
@@ -295,7 +302,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
295 struct file *file; 302 struct file *file;
296 u8 *data; 303 u8 *data;
297 u8 *enckey = (u8 *)key->payload.data[big_key_data]; 304 u8 *enckey = (u8 *)key->payload.data[big_key_data];
298 size_t enclen = ALIGN(datalen, crypto_skcipher_blocksize(big_key_skcipher)); 305 size_t enclen = datalen + ENC_AUTHTAG_SIZE;
299 loff_t pos = 0; 306 loff_t pos = 0;
300 307
301 data = kmalloc(enclen, GFP_KERNEL); 308 data = kmalloc(enclen, GFP_KERNEL);
@@ -328,7 +335,7 @@ long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
328err_fput: 335err_fput:
329 fput(file); 336 fput(file);
330error: 337error:
331 kfree(data); 338 kzfree(data);
332 } else { 339 } else {
333 ret = datalen; 340 ret = datalen;
334 if (copy_to_user(buffer, key->payload.data[big_key_data], 341 if (copy_to_user(buffer, key->payload.data[big_key_data],
@@ -344,47 +351,31 @@ error:
344 */ 351 */
345static int __init big_key_init(void) 352static int __init big_key_init(void)
346{ 353{
347 struct crypto_skcipher *cipher;
348 struct crypto_rng *rng;
349 int ret; 354 int ret;
350 355
351 rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
352 if (IS_ERR(rng)) {
353 pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
354 return PTR_ERR(rng);
355 }
356
357 big_key_rng = rng;
358
359 /* seed RNG */
360 ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
361 if (ret) {
362 pr_err("Can't reset rng: %d\n", ret);
363 goto error_rng;
364 }
365
366 /* init block cipher */ 356 /* init block cipher */
367 cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC); 357 big_key_aead = crypto_alloc_aead(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
368 if (IS_ERR(cipher)) { 358 if (IS_ERR(big_key_aead)) {
369 ret = PTR_ERR(cipher); 359 ret = PTR_ERR(big_key_aead);
370 pr_err("Can't alloc crypto: %d\n", ret); 360 pr_err("Can't alloc crypto: %d\n", ret);
371 goto error_rng; 361 return ret;
362 }
363 ret = crypto_aead_setauthsize(big_key_aead, ENC_AUTHTAG_SIZE);
364 if (ret < 0) {
365 pr_err("Can't set crypto auth tag len: %d\n", ret);
366 goto free_aead;
372 } 367 }
373
374 big_key_skcipher = cipher;
375 368
376 ret = register_key_type(&key_type_big_key); 369 ret = register_key_type(&key_type_big_key);
377 if (ret < 0) { 370 if (ret < 0) {
378 pr_err("Can't register type: %d\n", ret); 371 pr_err("Can't register type: %d\n", ret);
379 goto error_cipher; 372 goto free_aead;
380 } 373 }
381 374
382 return 0; 375 return 0;
383 376
384error_cipher: 377free_aead:
385 crypto_free_skcipher(big_key_skcipher); 378 crypto_free_aead(big_key_aead);
386error_rng:
387 crypto_free_rng(big_key_rng);
388 return ret; 379 return ret;
389} 380}
390 381
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 69855ba0d3b3..d92cbf9687c3 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
309 309
310 down_read(&ukey->sem); 310 down_read(&ukey->sem);
311 upayload = user_key_payload_locked(ukey); 311 upayload = user_key_payload_locked(ukey);
312 if (!upayload) {
313 /* key was revoked before we acquired its semaphore */
314 up_read(&ukey->sem);
315 key_put(ukey);
316 ukey = ERR_PTR(-EKEYREVOKED);
317 goto error;
318 }
312 *master_key = upayload->data; 319 *master_key = upayload->data;
313 *master_keylen = upayload->datalen; 320 *master_keylen = upayload->datalen;
314error: 321error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
847 size_t datalen = prep->datalen; 854 size_t datalen = prep->datalen;
848 int ret = 0; 855 int ret = 0;
849 856
850 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 857 if (key_is_negative(key))
851 return -ENOKEY; 858 return -ENOKEY;
852 if (datalen <= 0 || datalen > 32767 || !prep->data) 859 if (datalen <= 0 || datalen > 32767 || !prep->data)
853 return -EINVAL; 860 return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 87cb260e4890..f01d48cb3de1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
129 while (!list_empty(keys)) { 129 while (!list_empty(keys)) {
130 struct key *key = 130 struct key *key =
131 list_entry(keys->next, struct key, graveyard_link); 131 list_entry(keys->next, struct key, graveyard_link);
132 short state = key->state;
133
132 list_del(&key->graveyard_link); 134 list_del(&key->graveyard_link);
133 135
134 kdebug("- %u", key->serial); 136 kdebug("- %u", key->serial);
135 key_check(key); 137 key_check(key);
136 138
137 /* Throw away the key data if the key is instantiated */ 139 /* Throw away the key data if the key is instantiated */
138 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && 140 if (state == KEY_IS_POSITIVE && key->type->destroy)
139 !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
140 key->type->destroy)
141 key->type->destroy(key); 141 key->type->destroy(key);
142 142
143 security_key_free(key); 143 security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
151 } 151 }
152 152
153 atomic_dec(&key->user->nkeys); 153 atomic_dec(&key->user->nkeys);
154 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 154 if (state != KEY_IS_UNINSTANTIATED)
155 atomic_dec(&key->user->nikeys); 155 atomic_dec(&key->user->nikeys);
156 156
157 key_user_put(key->user); 157 key_user_put(key->user);
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 1c02c6547038..503adbae7b0d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -141,7 +141,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
141extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx); 141extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
142extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx); 142extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
143 143
144extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); 144extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
145 145
146extern int install_user_keyrings(void); 146extern int install_user_keyrings(void);
147extern int install_thread_keyring_to_cred(struct cred *); 147extern int install_thread_keyring_to_cred(struct cred *);
diff --git a/security/keys/key.c b/security/keys/key.c
index 83da68d98b40..83bf4b4afd49 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -54,10 +54,10 @@ void __key_check(const struct key *key)
54struct key_user *key_user_lookup(kuid_t uid) 54struct key_user *key_user_lookup(kuid_t uid)
55{ 55{
56 struct key_user *candidate = NULL, *user; 56 struct key_user *candidate = NULL, *user;
57 struct rb_node *parent = NULL; 57 struct rb_node *parent, **p;
58 struct rb_node **p;
59 58
60try_again: 59try_again:
60 parent = NULL;
61 p = &key_user_tree.rb_node; 61 p = &key_user_tree.rb_node;
62 spin_lock(&key_user_lock); 62 spin_lock(&key_user_lock);
63 63
@@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
302 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 302 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
303 if (flags & KEY_ALLOC_BUILT_IN) 303 if (flags & KEY_ALLOC_BUILT_IN)
304 key->flags |= 1 << KEY_FLAG_BUILTIN; 304 key->flags |= 1 << KEY_FLAG_BUILTIN;
305 if (flags & KEY_ALLOC_UID_KEYRING)
306 key->flags |= 1 << KEY_FLAG_UID_KEYRING;
305 307
306#ifdef KEY_DEBUGGING 308#ifdef KEY_DEBUGGING
307 key->magic = KEY_DEBUG_MAGIC; 309 key->magic = KEY_DEBUG_MAGIC;
@@ -400,6 +402,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
400EXPORT_SYMBOL(key_payload_reserve); 402EXPORT_SYMBOL(key_payload_reserve);
401 403
402/* 404/*
405 * Change the key state to being instantiated.
406 */
407static void mark_key_instantiated(struct key *key, int reject_error)
408{
409 /* Commit the payload before setting the state; barrier versus
410 * key_read_state().
411 */
412 smp_store_release(&key->state,
413 (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
414}
415
416/*
403 * Instantiate a key and link it into the target keyring atomically. Must be 417 * Instantiate a key and link it into the target keyring atomically. Must be
404 * called with the target keyring's semaphore writelocked. The target key's 418 * called with the target keyring's semaphore writelocked. The target key's
405 * semaphore need not be locked as instantiation is serialised by 419 * semaphore need not be locked as instantiation is serialised by
@@ -422,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
422 mutex_lock(&key_construction_mutex); 436 mutex_lock(&key_construction_mutex);
423 437
424 /* can't instantiate twice */ 438 /* can't instantiate twice */
425 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 439 if (key->state == KEY_IS_UNINSTANTIATED) {
426 /* instantiate the key */ 440 /* instantiate the key */
427 ret = key->type->instantiate(key, prep); 441 ret = key->type->instantiate(key, prep);
428 442
429 if (ret == 0) { 443 if (ret == 0) {
430 /* mark the key as being instantiated */ 444 /* mark the key as being instantiated */
431 atomic_inc(&key->user->nikeys); 445 atomic_inc(&key->user->nikeys);
432 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 446 mark_key_instantiated(key, 0);
433 447
434 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 448 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
435 awaken = 1; 449 awaken = 1;
@@ -575,13 +589,10 @@ int key_reject_and_link(struct key *key,
575 mutex_lock(&key_construction_mutex); 589 mutex_lock(&key_construction_mutex);
576 590
577 /* can't instantiate twice */ 591 /* can't instantiate twice */
578 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 592 if (key->state == KEY_IS_UNINSTANTIATED) {
579 /* mark the key as being negatively instantiated */ 593 /* mark the key as being negatively instantiated */
580 atomic_inc(&key->user->nikeys); 594 atomic_inc(&key->user->nikeys);
581 key->reject_error = -error; 595 mark_key_instantiated(key, -error);
582 smp_wmb();
583 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
584 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
585 now = current_kernel_time(); 596 now = current_kernel_time();
586 key->expiry = now.tv_sec + timeout; 597 key->expiry = now.tv_sec + timeout;
587 key_schedule_gc(key->expiry + key_gc_delay); 598 key_schedule_gc(key->expiry + key_gc_delay);
@@ -750,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
750 761
751 ret = key->type->update(key, prep); 762 ret = key->type->update(key, prep);
752 if (ret == 0) 763 if (ret == 0)
753 /* updating a negative key instantiates it */ 764 /* Updating a negative key positively instantiates it */
754 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 765 mark_key_instantiated(key, 0);
755 766
756 up_write(&key->sem); 767 up_write(&key->sem);
757 768
@@ -934,6 +945,16 @@ error:
934 */ 945 */
935 __key_link_end(keyring, &index_key, edit); 946 __key_link_end(keyring, &index_key, edit);
936 947
948 key = key_ref_to_ptr(key_ref);
949 if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
950 ret = wait_for_key_construction(key, true);
951 if (ret < 0) {
952 key_ref_put(key_ref);
953 key_ref = ERR_PTR(ret);
954 goto error_free_prep;
955 }
956 }
957
937 key_ref = __key_update(key_ref, &prep); 958 key_ref = __key_update(key_ref, &prep);
938 goto error_free_prep; 959 goto error_free_prep;
939} 960}
@@ -984,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
984 1005
985 ret = key->type->update(key, &prep); 1006 ret = key->type->update(key, &prep);
986 if (ret == 0) 1007 if (ret == 0)
987 /* updating a negative key instantiates it */ 1008 /* Updating a negative key positively instantiates it */
988 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 1009 mark_key_instantiated(key, 0);
989 1010
990 up_write(&key->sem); 1011 up_write(&key->sem);
991 1012
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ab0b337c84b4..76d22f726ae4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,12 +766,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
766 766
767 key = key_ref_to_ptr(key_ref); 767 key = key_ref_to_ptr(key_ref);
768 768
769 ret = key_read_state(key);
770 if (ret < 0)
771 goto error2; /* Negatively instantiated */
772
769 /* see if we can read it directly */ 773 /* see if we can read it directly */
770 ret = key_permission(key_ref, KEY_NEED_READ); 774 ret = key_permission(key_ref, KEY_NEED_READ);
771 if (ret == 0) 775 if (ret == 0)
772 goto can_read_key; 776 goto can_read_key;
773 if (ret != -EACCES) 777 if (ret != -EACCES)
774 goto error; 778 goto error2;
775 779
776 /* we can't; see if it's searchable from this process's keyrings 780 /* we can't; see if it's searchable from this process's keyrings
777 * - we automatically take account of the fact that it may be 781 * - we automatically take account of the fact that it may be
@@ -896,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
896 atomic_dec(&key->user->nkeys); 900 atomic_dec(&key->user->nkeys);
897 atomic_inc(&newowner->nkeys); 901 atomic_inc(&newowner->nkeys);
898 902
899 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 903 if (key->state != KEY_IS_UNINSTANTIATED) {
900 atomic_dec(&key->user->nikeys); 904 atomic_dec(&key->user->nikeys);
901 atomic_inc(&newowner->nikeys); 905 atomic_inc(&newowner->nikeys);
902 } 906 }
@@ -1406,11 +1410,9 @@ long keyctl_assume_authority(key_serial_t id)
1406 } 1410 }
1407 1411
1408 ret = keyctl_change_reqkey_auth(authkey); 1412 ret = keyctl_change_reqkey_auth(authkey);
1409 if (ret < 0) 1413 if (ret == 0)
1410 goto error; 1414 ret = authkey->serial;
1411 key_put(authkey); 1415 key_put(authkey);
1412
1413 ret = authkey->serial;
1414error: 1416error:
1415 return ret; 1417 return ret;
1416} 1418}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index de81793f9920..a7e51f793867 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
414 else 414 else
415 seq_puts(m, "[anon]"); 415 seq_puts(m, "[anon]");
416 416
417 if (key_is_instantiated(keyring)) { 417 if (key_is_positive(keyring)) {
418 if (keyring->keys.nr_leaves_on_tree != 0) 418 if (keyring->keys.nr_leaves_on_tree != 0)
419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); 419 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
420 else 420 else
@@ -423,7 +423,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
423} 423}
424 424
425struct keyring_read_iterator_context { 425struct keyring_read_iterator_context {
426 size_t qty; 426 size_t buflen;
427 size_t count; 427 size_t count;
428 key_serial_t __user *buffer; 428 key_serial_t __user *buffer;
429}; 429};
@@ -435,9 +435,9 @@ static int keyring_read_iterator(const void *object, void *data)
435 int ret; 435 int ret;
436 436
437 kenter("{%s,%d},,{%zu/%zu}", 437 kenter("{%s,%d},,{%zu/%zu}",
438 key->type->name, key->serial, ctx->count, ctx->qty); 438 key->type->name, key->serial, ctx->count, ctx->buflen);
439 439
440 if (ctx->count >= ctx->qty) 440 if (ctx->count >= ctx->buflen)
441 return 1; 441 return 1;
442 442
443 ret = put_user(key->serial, ctx->buffer); 443 ret = put_user(key->serial, ctx->buffer);
@@ -472,16 +472,12 @@ static long keyring_read(const struct key *keyring,
472 return 0; 472 return 0;
473 473
474 /* Calculate how much data we could return */ 474 /* Calculate how much data we could return */
475 ctx.qty = nr_keys * sizeof(key_serial_t);
476
477 if (!buffer || !buflen) 475 if (!buffer || !buflen)
478 return ctx.qty; 476 return nr_keys * sizeof(key_serial_t);
479
480 if (buflen > ctx.qty)
481 ctx.qty = buflen;
482 477
483 /* Copy the IDs of the subscribed keys into the buffer */ 478 /* Copy the IDs of the subscribed keys into the buffer */
484 ctx.buffer = (key_serial_t __user *)buffer; 479 ctx.buffer = (key_serial_t __user *)buffer;
480 ctx.buflen = buflen;
485 ctx.count = 0; 481 ctx.count = 0;
486 ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); 482 ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
487 if (ret < 0) { 483 if (ret < 0) {
@@ -557,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
557{ 553{
558 struct keyring_search_context *ctx = iterator_data; 554 struct keyring_search_context *ctx = iterator_data;
559 const struct key *key = keyring_ptr_to_key(object); 555 const struct key *key = keyring_ptr_to_key(object);
560 unsigned long kflags = key->flags; 556 unsigned long kflags = READ_ONCE(key->flags);
557 short state = READ_ONCE(key->state);
561 558
562 kenter("{%d}", key->serial); 559 kenter("{%d}", key->serial);
563 560
@@ -569,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
569 566
570 /* skip invalidated, revoked and expired keys */ 567 /* skip invalidated, revoked and expired keys */
571 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 568 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
569 time_t expiry = READ_ONCE(key->expiry);
570
572 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 571 if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
573 (1 << KEY_FLAG_REVOKED))) { 572 (1 << KEY_FLAG_REVOKED))) {
574 ctx->result = ERR_PTR(-EKEYREVOKED); 573 ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -576,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
576 goto skipped; 575 goto skipped;
577 } 576 }
578 577
579 if (key->expiry && ctx->now.tv_sec >= key->expiry) { 578 if (expiry && ctx->now.tv_sec >= expiry) {
580 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) 579 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
581 ctx->result = ERR_PTR(-EKEYEXPIRED); 580 ctx->result = ERR_PTR(-EKEYEXPIRED);
582 kleave(" = %d [expire]", ctx->skipped_ret); 581 kleave(" = %d [expire]", ctx->skipped_ret);
@@ -601,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
601 600
602 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 601 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
603 /* we set a different error code if we pass a negative key */ 602 /* we set a different error code if we pass a negative key */
604 if (kflags & (1 << KEY_FLAG_NEGATIVE)) { 603 if (state < 0) {
605 smp_rmb(); 604 ctx->result = ERR_PTR(state);
606 ctx->result = ERR_PTR(key->reject_error);
607 kleave(" = %d [neg]", ctx->skipped_ret); 605 kleave(" = %d [neg]", ctx->skipped_ret);
608 goto skipped; 606 goto skipped;
609 } 607 }
@@ -1101,15 +1099,15 @@ found:
1101/* 1099/*
1102 * Find a keyring with the specified name. 1100 * Find a keyring with the specified name.
1103 * 1101 *
1104 * All named keyrings in the current user namespace are searched, provided they 1102 * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
1105 * grant Search permission directly to the caller (unless this check is 1103 * user in the current user namespace are considered. If @uid_keyring is %true,
1106 * skipped). Keyrings whose usage points have reached zero or who have been 1104 * the keyring additionally must have been allocated as a user or user session
1107 * revoked are skipped. 1105 * keyring; otherwise, it must grant Search permission directly to the caller.
1108 * 1106 *
1109 * Returns a pointer to the keyring with the keyring's refcount having being 1107 * Returns a pointer to the keyring with the keyring's refcount having being
1110 * incremented on success. -ENOKEY is returned if a key could not be found. 1108 * incremented on success. -ENOKEY is returned if a key could not be found.
1111 */ 1109 */
1112struct key *find_keyring_by_name(const char *name, bool skip_perm_check) 1110struct key *find_keyring_by_name(const char *name, bool uid_keyring)
1113{ 1111{
1114 struct key *keyring; 1112 struct key *keyring;
1115 int bucket; 1113 int bucket;
@@ -1137,10 +1135,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
1137 if (strcmp(keyring->description, name) != 0) 1135 if (strcmp(keyring->description, name) != 0)
1138 continue; 1136 continue;
1139 1137
1140 if (!skip_perm_check && 1138 if (uid_keyring) {
1141 key_permission(make_key_ref(keyring, 0), 1139 if (!test_bit(KEY_FLAG_UID_KEYRING,
1142 KEY_NEED_SEARCH) < 0) 1140 &keyring->flags))
1143 continue; 1141 continue;
1142 } else {
1143 if (key_permission(make_key_ref(keyring, 0),
1144 KEY_NEED_SEARCH) < 0)
1145 continue;
1146 }
1144 1147
1145 /* we've got a match but we might end up racing with 1148 /* we've got a match but we might end up racing with
1146 * key_cleanup() if the keyring is currently 'dead' 1149 * key_cleanup() if the keyring is currently 'dead'
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 732cc0beffdf..a72b4dd70c8a 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
88 */ 88 */
89int key_validate(const struct key *key) 89int key_validate(const struct key *key)
90{ 90{
91 unsigned long flags = key->flags; 91 unsigned long flags = READ_ONCE(key->flags);
92 time_t expiry = READ_ONCE(key->expiry);
92 93
93 if (flags & (1 << KEY_FLAG_INVALIDATED)) 94 if (flags & (1 << KEY_FLAG_INVALIDATED))
94 return -ENOKEY; 95 return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
99 return -EKEYREVOKED; 100 return -EKEYREVOKED;
100 101
101 /* check it hasn't expired */ 102 /* check it hasn't expired */
102 if (key->expiry) { 103 if (expiry) {
103 struct timespec now = current_kernel_time(); 104 struct timespec now = current_kernel_time();
104 if (now.tv_sec >= key->expiry) 105 if (now.tv_sec >= expiry)
105 return -EKEYEXPIRED; 106 return -EKEYEXPIRED;
106 } 107 }
107 108
diff --git a/security/keys/proc.c b/security/keys/proc.c
index bf08d02b6646..6d1fcbba1e09 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -179,15 +179,18 @@ static int proc_keys_show(struct seq_file *m, void *v)
179 struct rb_node *_p = v; 179 struct rb_node *_p = v;
180 struct key *key = rb_entry(_p, struct key, serial_node); 180 struct key *key = rb_entry(_p, struct key, serial_node);
181 struct timespec now; 181 struct timespec now;
182 time_t expiry;
182 unsigned long timo; 183 unsigned long timo;
184 unsigned long flags;
183 key_ref_t key_ref, skey_ref; 185 key_ref_t key_ref, skey_ref;
184 char xbuf[16]; 186 char xbuf[16];
187 short state;
185 int rc; 188 int rc;
186 189
187 struct keyring_search_context ctx = { 190 struct keyring_search_context ctx = {
188 .index_key.type = key->type, 191 .index_key.type = key->type,
189 .index_key.description = key->description, 192 .index_key.description = key->description,
190 .cred = current_cred(), 193 .cred = m->file->f_cred,
191 .match_data.cmp = lookup_user_key_possessed, 194 .match_data.cmp = lookup_user_key_possessed,
192 .match_data.raw_data = key, 195 .match_data.raw_data = key,
193 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 196 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
@@ -207,11 +210,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
207 } 210 }
208 } 211 }
209 212
210 /* check whether the current task is allowed to view the key (assuming 213 /* check whether the current task is allowed to view the key */
211 * non-possession)
212 * - the caller holds a spinlock, and thus the RCU read lock, making our
213 * access to __current_cred() safe
214 */
215 rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); 214 rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
216 if (rc < 0) 215 if (rc < 0)
217 return 0; 216 return 0;
@@ -221,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
221 rcu_read_lock(); 220 rcu_read_lock();
222 221
223 /* come up with a suitable timeout value */ 222 /* come up with a suitable timeout value */
224 if (key->expiry == 0) { 223 expiry = READ_ONCE(key->expiry);
224 if (expiry == 0) {
225 memcpy(xbuf, "perm", 5); 225 memcpy(xbuf, "perm", 5);
226 } else if (now.tv_sec >= key->expiry) { 226 } else if (now.tv_sec >= expiry) {
227 memcpy(xbuf, "expd", 5); 227 memcpy(xbuf, "expd", 5);
228 } else { 228 } else {
229 timo = key->expiry - now.tv_sec; 229 timo = expiry - now.tv_sec;
230 230
231 if (timo < 60) 231 if (timo < 60)
232 sprintf(xbuf, "%lus", timo); 232 sprintf(xbuf, "%lus", timo);
@@ -240,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
240 sprintf(xbuf, "%luw", timo / (60*60*24*7)); 240 sprintf(xbuf, "%luw", timo / (60*60*24*7));
241 } 241 }
242 242
243#define showflag(KEY, LETTER, FLAG) \ 243 state = key_read_state(key);
244 (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') 244
245#define showflag(FLAGS, LETTER, FLAG) \
246 ((FLAGS & (1 << FLAG)) ? LETTER : '-')
245 247
248 flags = READ_ONCE(key->flags);
246 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", 249 seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
247 key->serial, 250 key->serial,
248 showflag(key, 'I', KEY_FLAG_INSTANTIATED), 251 state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
249 showflag(key, 'R', KEY_FLAG_REVOKED), 252 showflag(flags, 'R', KEY_FLAG_REVOKED),
250 showflag(key, 'D', KEY_FLAG_DEAD), 253 showflag(flags, 'D', KEY_FLAG_DEAD),
251 showflag(key, 'Q', KEY_FLAG_IN_QUOTA), 254 showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
252 showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), 255 showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
253 showflag(key, 'N', KEY_FLAG_NEGATIVE), 256 state < 0 ? 'N' : '-',
254 showflag(key, 'i', KEY_FLAG_INVALIDATED), 257 showflag(flags, 'i', KEY_FLAG_INVALIDATED),
255 refcount_read(&key->usage), 258 refcount_read(&key->usage),
256 xbuf, 259 xbuf,
257 key->perm, 260 key->perm,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 86bced9fdbdf..740affd65ee9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -77,7 +77,8 @@ int install_user_keyrings(void)
77 if (IS_ERR(uid_keyring)) { 77 if (IS_ERR(uid_keyring)) {
78 uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, 78 uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
79 cred, user_keyring_perm, 79 cred, user_keyring_perm,
80 KEY_ALLOC_IN_QUOTA, 80 KEY_ALLOC_UID_KEYRING |
81 KEY_ALLOC_IN_QUOTA,
81 NULL, NULL); 82 NULL, NULL);
82 if (IS_ERR(uid_keyring)) { 83 if (IS_ERR(uid_keyring)) {
83 ret = PTR_ERR(uid_keyring); 84 ret = PTR_ERR(uid_keyring);
@@ -94,7 +95,8 @@ int install_user_keyrings(void)
94 session_keyring = 95 session_keyring =
95 keyring_alloc(buf, user->uid, INVALID_GID, 96 keyring_alloc(buf, user->uid, INVALID_GID,
96 cred, user_keyring_perm, 97 cred, user_keyring_perm,
97 KEY_ALLOC_IN_QUOTA, 98 KEY_ALLOC_UID_KEYRING |
99 KEY_ALLOC_IN_QUOTA,
98 NULL, NULL); 100 NULL, NULL);
99 if (IS_ERR(session_keyring)) { 101 if (IS_ERR(session_keyring)) {
100 ret = PTR_ERR(session_keyring); 102 ret = PTR_ERR(session_keyring);
@@ -728,7 +730,7 @@ try_again:
728 730
729 ret = -EIO; 731 ret = -EIO;
730 if (!(lflags & KEY_LOOKUP_PARTIAL) && 732 if (!(lflags & KEY_LOOKUP_PARTIAL) &&
731 !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 733 key_read_state(key) == KEY_IS_UNINSTANTIATED)
732 goto invalid_key; 734 goto invalid_key;
733 735
734 /* check the permissions */ 736 /* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 63e63a42db3c..e8036cd0ad54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 595 intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
596 if (ret) 596 if (ret)
597 return -ERESTARTSYS; 597 return -ERESTARTSYS;
598 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { 598 ret = key_read_state(key);
599 smp_rmb(); 599 if (ret < 0)
600 return key->reject_error; 600 return ret;
601 }
602 return key_validate(key); 601 return key_validate(key);
603} 602}
604EXPORT_SYMBOL(wait_for_key_construction); 603EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index afe9d22ab361..424e1d90412e 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
73 73
74 seq_puts(m, "key:"); 74 seq_puts(m, "key:");
75 seq_puts(m, key->description); 75 seq_puts(m, key->description);
76 if (key_is_instantiated(key)) 76 if (key_is_positive(key))
77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); 77 seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
78} 78}
79 79
@@ -120,6 +120,18 @@ static void request_key_auth_revoke(struct key *key)
120 } 120 }
121} 121}
122 122
123static void free_request_key_auth(struct request_key_auth *rka)
124{
125 if (!rka)
126 return;
127 key_put(rka->target_key);
128 key_put(rka->dest_keyring);
129 if (rka->cred)
130 put_cred(rka->cred);
131 kfree(rka->callout_info);
132 kfree(rka);
133}
134
123/* 135/*
124 * Destroy an instantiation authorisation token key. 136 * Destroy an instantiation authorisation token key.
125 */ 137 */
@@ -129,15 +141,7 @@ static void request_key_auth_destroy(struct key *key)
129 141
130 kenter("{%d}", key->serial); 142 kenter("{%d}", key->serial);
131 143
132 if (rka->cred) { 144 free_request_key_auth(rka);
133 put_cred(rka->cred);
134 rka->cred = NULL;
135 }
136
137 key_put(rka->target_key);
138 key_put(rka->dest_keyring);
139 kfree(rka->callout_info);
140 kfree(rka);
141} 145}
142 146
143/* 147/*
@@ -151,22 +155,18 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
151 const struct cred *cred = current->cred; 155 const struct cred *cred = current->cred;
152 struct key *authkey = NULL; 156 struct key *authkey = NULL;
153 char desc[20]; 157 char desc[20];
154 int ret; 158 int ret = -ENOMEM;
155 159
156 kenter("%d,", target->serial); 160 kenter("%d,", target->serial);
157 161
158 /* allocate a auth record */ 162 /* allocate a auth record */
159 rka = kmalloc(sizeof(*rka), GFP_KERNEL); 163 rka = kzalloc(sizeof(*rka), GFP_KERNEL);
160 if (!rka) { 164 if (!rka)
161 kleave(" = -ENOMEM"); 165 goto error;
162 return ERR_PTR(-ENOMEM); 166 rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL);
163 } 167 if (!rka->callout_info)
164 rka->callout_info = kmalloc(callout_len, GFP_KERNEL); 168 goto error_free_rka;
165 if (!rka->callout_info) { 169 rka->callout_len = callout_len;
166 kleave(" = -ENOMEM");
167 kfree(rka);
168 return ERR_PTR(-ENOMEM);
169 }
170 170
171 /* see if the calling process is already servicing the key request of 171 /* see if the calling process is already servicing the key request of
172 * another process */ 172 * another process */
@@ -176,8 +176,12 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
176 176
177 /* if the auth key has been revoked, then the key we're 177 /* if the auth key has been revoked, then the key we're
178 * servicing is already instantiated */ 178 * servicing is already instantiated */
179 if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) 179 if (test_bit(KEY_FLAG_REVOKED,
180 goto auth_key_revoked; 180 &cred->request_key_auth->flags)) {
181 up_read(&cred->request_key_auth->sem);
182 ret = -EKEYREVOKED;
183 goto error_free_rka;
184 }
181 185
182 irka = cred->request_key_auth->payload.data[0]; 186 irka = cred->request_key_auth->payload.data[0];
183 rka->cred = get_cred(irka->cred); 187 rka->cred = get_cred(irka->cred);
@@ -193,8 +197,6 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
193 197
194 rka->target_key = key_get(target); 198 rka->target_key = key_get(target);
195 rka->dest_keyring = key_get(dest_keyring); 199 rka->dest_keyring = key_get(dest_keyring);
196 memcpy(rka->callout_info, callout_info, callout_len);
197 rka->callout_len = callout_len;
198 200
199 /* allocate the auth key */ 201 /* allocate the auth key */
200 sprintf(desc, "%x", target->serial); 202 sprintf(desc, "%x", target->serial);
@@ -205,32 +207,22 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info,
205 KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL); 207 KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
206 if (IS_ERR(authkey)) { 208 if (IS_ERR(authkey)) {
207 ret = PTR_ERR(authkey); 209 ret = PTR_ERR(authkey);
208 goto error_alloc; 210 goto error_free_rka;
209 } 211 }
210 212
211 /* construct the auth key */ 213 /* construct the auth key */
212 ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); 214 ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
213 if (ret < 0) 215 if (ret < 0)
214 goto error_inst; 216 goto error_put_authkey;
215 217
216 kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage)); 218 kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage));
217 return authkey; 219 return authkey;
218 220
219auth_key_revoked: 221error_put_authkey:
220 up_read(&cred->request_key_auth->sem);
221 kfree(rka->callout_info);
222 kfree(rka);
223 kleave("= -EKEYREVOKED");
224 return ERR_PTR(-EKEYREVOKED);
225
226error_inst:
227 key_revoke(authkey);
228 key_put(authkey); 222 key_put(authkey);
229error_alloc: 223error_free_rka:
230 key_put(rka->target_key); 224 free_request_key_auth(rka);
231 key_put(rka->dest_keyring); 225error:
232 kfree(rka->callout_info);
233 kfree(rka);
234 kleave("= %d", ret); 226 kleave("= %d", ret);
235 return ERR_PTR(ret); 227 return ERR_PTR(ret);
236} 228}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ddfaebf60fc8..bd85315cbfeb 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1066 char *datablob; 1066 char *datablob;
1067 int ret = 0; 1067 int ret = 0;
1068 1068
1069 if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 1069 if (key_is_negative(key))
1070 return -ENOKEY; 1070 return -ENOKEY;
1071 p = key->payload.data[0]; 1071 p = key->payload.data[0];
1072 if (!p->migratable) 1072 if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 3d8c68eba516..9f558bedba23 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
114 114
115 /* attach the new data, displacing the old */ 115 /* attach the new data, displacing the old */
116 key->expiry = prep->expiry; 116 key->expiry = prep->expiry;
117 if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) 117 if (key_is_positive(key))
118 zap = dereference_key_locked(key); 118 zap = dereference_key_locked(key);
119 rcu_assign_keypointer(key, prep->payload.data[0]); 119 rcu_assign_keypointer(key, prep->payload.data[0]);
120 prep->payload.data[0] = NULL; 120 prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
162void user_describe(const struct key *key, struct seq_file *m) 162void user_describe(const struct key *key, struct seq_file *m)
163{ 163{
164 seq_puts(m, key->description); 164 seq_puts(m, key->description);
165 if (key_is_instantiated(key)) 165 if (key_is_positive(key))
166 seq_printf(m, ": %u", key->datalen); 166 seq_printf(m, ": %u", key->datalen);
167} 167}
168 168
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 319add31b4a4..286171a16ed2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1473,7 +1473,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
1473 * @inode: the object 1473 * @inode: the object
1474 * @name: attribute name 1474 * @name: attribute name
1475 * @buffer: where to put the result 1475 * @buffer: where to put the result
1476 * @alloc: unused 1476 * @alloc: duplicate memory
1477 * 1477 *
1478 * Returns the size of the attribute or an error code 1478 * Returns the size of the attribute or an error code
1479 */ 1479 */
@@ -1486,43 +1486,38 @@ static int smack_inode_getsecurity(struct inode *inode,
1486 struct super_block *sbp; 1486 struct super_block *sbp;
1487 struct inode *ip = (struct inode *)inode; 1487 struct inode *ip = (struct inode *)inode;
1488 struct smack_known *isp; 1488 struct smack_known *isp;
1489 int ilen;
1490 int rc = 0;
1491 1489
1492 if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { 1490 if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
1493 isp = smk_of_inode(inode); 1491 isp = smk_of_inode(inode);
1494 ilen = strlen(isp->smk_known); 1492 else {
1495 *buffer = isp->smk_known; 1493 /*
1496 return ilen; 1494 * The rest of the Smack xattrs are only on sockets.
1497 } 1495 */
1496 sbp = ip->i_sb;
1497 if (sbp->s_magic != SOCKFS_MAGIC)
1498 return -EOPNOTSUPP;
1498 1499
1499 /* 1500 sock = SOCKET_I(ip);
1500 * The rest of the Smack xattrs are only on sockets. 1501 if (sock == NULL || sock->sk == NULL)
1501 */ 1502 return -EOPNOTSUPP;
1502 sbp = ip->i_sb;
1503 if (sbp->s_magic != SOCKFS_MAGIC)
1504 return -EOPNOTSUPP;
1505 1503
1506 sock = SOCKET_I(ip); 1504 ssp = sock->sk->sk_security;
1507 if (sock == NULL || sock->sk == NULL)
1508 return -EOPNOTSUPP;
1509
1510 ssp = sock->sk->sk_security;
1511 1505
1512 if (strcmp(name, XATTR_SMACK_IPIN) == 0) 1506 if (strcmp(name, XATTR_SMACK_IPIN) == 0)
1513 isp = ssp->smk_in; 1507 isp = ssp->smk_in;
1514 else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) 1508 else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
1515 isp = ssp->smk_out; 1509 isp = ssp->smk_out;
1516 else 1510 else
1517 return -EOPNOTSUPP; 1511 return -EOPNOTSUPP;
1512 }
1518 1513
1519 ilen = strlen(isp->smk_known); 1514 if (alloc) {
1520 if (rc == 0) { 1515 *buffer = kstrdup(isp->smk_known, GFP_KERNEL);
1521 *buffer = isp->smk_known; 1516 if (*buffer == NULL)
1522 rc = ilen; 1517 return -ENOMEM;
1523 } 1518 }
1524 1519
1525 return rc; 1520 return strlen(isp->smk_known);
1526} 1521}
1527 1522
1528 1523
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index fec1dfdb14ad..4490a699030b 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = {
948static int snd_compress_dev_register(struct snd_device *device) 948static int snd_compress_dev_register(struct snd_device *device)
949{ 949{
950 int ret = -EINVAL; 950 int ret = -EINVAL;
951 char str[16];
952 struct snd_compr *compr; 951 struct snd_compr *compr;
953 952
954 if (snd_BUG_ON(!device || !device->device_data)) 953 if (snd_BUG_ON(!device || !device->device_data))
955 return -EBADFD; 954 return -EBADFD;
956 compr = device->device_data; 955 compr = device->device_data;
957 956
958 pr_debug("reg %s for device %s, direction %d\n", str, compr->name, 957 pr_debug("reg device %s, direction %d\n", compr->name,
959 compr->direction); 958 compr->direction);
960 /* register compressed device */ 959 /* register compressed device */
961 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, 960 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 3a1cc7b97e46..b719d0bd833e 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 {
547 u32 pad2; /* alignment */ 547 u32 pad2; /* alignment */
548 struct timespec tstamp; 548 struct timespec tstamp;
549 s32 suspended_state; 549 s32 suspended_state;
550 s32 pad3;
550 struct timespec audio_tstamp; 551 struct timespec audio_tstamp;
551} __packed; 552} __packed;
552 553
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index ea2d0ae85bd3..6c9cba2166d9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1259 struct snd_seq_port_info *info = arg; 1259 struct snd_seq_port_info *info = arg;
1260 struct snd_seq_client_port *port; 1260 struct snd_seq_client_port *port;
1261 struct snd_seq_port_callback *callback; 1261 struct snd_seq_port_callback *callback;
1262 int port_idx;
1262 1263
1263 /* it is not allowed to create the port for an another client */ 1264 /* it is not allowed to create the port for an another client */
1264 if (info->addr.client != client->number) 1265 if (info->addr.client != client->number)
@@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1269 return -ENOMEM; 1270 return -ENOMEM;
1270 1271
1271 if (client->type == USER_CLIENT && info->kernel) { 1272 if (client->type == USER_CLIENT && info->kernel) {
1272 snd_seq_delete_port(client, port->addr.port); 1273 port_idx = port->addr.port;
1274 snd_seq_port_unlock(port);
1275 snd_seq_delete_port(client, port_idx);
1273 return -EINVAL; 1276 return -EINVAL;
1274 } 1277 }
1275 if (client->type == KERNEL_CLIENT) { 1278 if (client->type == KERNEL_CLIENT) {
@@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1290 1293
1291 snd_seq_set_port_info(port, info); 1294 snd_seq_set_port_info(port, info);
1292 snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); 1295 snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
1296 snd_seq_port_unlock(port);
1293 1297
1294 return 0; 1298 return 0;
1295} 1299}
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 0ff7926a5a69..cda64b489e42 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
23#include <sound/core.h> 23#include <sound/core.h>
24#include "seq_lock.h" 24#include "seq_lock.h"
25 25
26#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
27
28/* wait until all locks are released */ 26/* wait until all locks are released */
29void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) 27void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
30{ 28{
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
41 } 39 }
42} 40}
43EXPORT_SYMBOL(snd_use_lock_sync_helper); 41EXPORT_SYMBOL(snd_use_lock_sync_helper);
44
45#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc2c9ef..ac38031c370e 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/sched.h> 4#include <linux/sched.h>
5 5
6#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
7
8typedef atomic_t snd_use_lock_t; 6typedef atomic_t snd_use_lock_t;
9 7
10/* initialize lock */ 8/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
20void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line); 18void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
21#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__) 19#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
22 20
23#else /* SMP || CONFIG_SND_DEBUG */
24
25typedef spinlock_t snd_use_lock_t; /* dummy */
26#define snd_use_lock_init(lockp) /**/
27#define snd_use_lock_use(lockp) /**/
28#define snd_use_lock_free(lockp) /**/
29#define snd_use_lock_sync(lockp) /**/
30
31#endif /* SMP || CONFIG_SND_DEBUG */
32
33#endif /* __SND_SEQ_LOCK_H */ 21#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 0a7020c82bfc..d21ece9f8d73 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
122} 122}
123 123
124 124
125/* create a port, port number is returned (-1 on failure) */ 125/* create a port, port number is returned (-1 on failure);
126 * the caller needs to unref the port via snd_seq_port_unlock() appropriately
127 */
126struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, 128struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
127 int port) 129 int port)
128{ 130{
@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
151 snd_use_lock_init(&new_port->use_lock); 153 snd_use_lock_init(&new_port->use_lock);
152 port_subs_info_init(&new_port->c_src); 154 port_subs_info_init(&new_port->c_src);
153 port_subs_info_init(&new_port->c_dest); 155 port_subs_info_init(&new_port->c_dest);
156 snd_use_lock_use(&new_port->use_lock);
154 157
155 num = port >= 0 ? port : 0; 158 num = port >= 0 ? port : 0;
156 mutex_lock(&client->ports_mutex); 159 mutex_lock(&client->ports_mutex);
@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
165 list_add_tail(&new_port->list, &p->list); 168 list_add_tail(&new_port->list, &p->list);
166 client->num_ports++; 169 client->num_ports++;
167 new_port->addr.port = num; /* store the port number in the port */ 170 new_port->addr.port = num; /* store the port number in the port */
171 sprintf(new_port->name, "port-%d", num);
168 write_unlock_irqrestore(&client->ports_lock, flags); 172 write_unlock_irqrestore(&client->ports_lock, flags);
169 mutex_unlock(&client->ports_mutex); 173 mutex_unlock(&client->ports_mutex);
170 sprintf(new_port->name, "port-%d", num);
171 174
172 return new_port; 175 return new_port;
173} 176}
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 8d93a4021c78..f48a4cd24ffc 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
77 * decode input event and put to read buffer of each opened file 77 * decode input event and put to read buffer of each opened file
78 */ 78 */
79static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, 79static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
80 struct snd_seq_event *ev) 80 struct snd_seq_event *ev,
81 bool atomic)
81{ 82{
82 struct snd_virmidi *vmidi; 83 struct snd_virmidi *vmidi;
83 unsigned char msg[4]; 84 unsigned char msg[4];
84 int len; 85 int len;
85 86
86 read_lock(&rdev->filelist_lock); 87 if (atomic)
88 read_lock(&rdev->filelist_lock);
89 else
90 down_read(&rdev->filelist_sem);
87 list_for_each_entry(vmidi, &rdev->filelist, list) { 91 list_for_each_entry(vmidi, &rdev->filelist, list) {
88 if (!vmidi->trigger) 92 if (!vmidi->trigger)
89 continue; 93 continue;
@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
97 snd_rawmidi_receive(vmidi->substream, msg, len); 101 snd_rawmidi_receive(vmidi->substream, msg, len);
98 } 102 }
99 } 103 }
100 read_unlock(&rdev->filelist_lock); 104 if (atomic)
105 read_unlock(&rdev->filelist_lock);
106 else
107 up_read(&rdev->filelist_sem);
101 108
102 return 0; 109 return 0;
103} 110}
@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
115 struct snd_virmidi_dev *rdev; 122 struct snd_virmidi_dev *rdev;
116 123
117 rdev = rmidi->private_data; 124 rdev = rmidi->private_data;
118 return snd_virmidi_dev_receive_event(rdev, ev); 125 return snd_virmidi_dev_receive_event(rdev, ev, true);
119} 126}
120#endif /* 0 */ 127#endif /* 0 */
121 128
@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
130 rdev = private_data; 137 rdev = private_data;
131 if (!(rdev->flags & SNDRV_VIRMIDI_USE)) 138 if (!(rdev->flags & SNDRV_VIRMIDI_USE))
132 return 0; /* ignored */ 139 return 0; /* ignored */
133 return snd_virmidi_dev_receive_event(rdev, ev); 140 return snd_virmidi_dev_receive_event(rdev, ev, atomic);
134} 141}
135 142
136/* 143/*
@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
209 struct snd_virmidi_dev *rdev = substream->rmidi->private_data; 216 struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
210 struct snd_rawmidi_runtime *runtime = substream->runtime; 217 struct snd_rawmidi_runtime *runtime = substream->runtime;
211 struct snd_virmidi *vmidi; 218 struct snd_virmidi *vmidi;
212 unsigned long flags;
213 219
214 vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); 220 vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
215 if (vmidi == NULL) 221 if (vmidi == NULL)
@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
223 vmidi->client = rdev->client; 229 vmidi->client = rdev->client;
224 vmidi->port = rdev->port; 230 vmidi->port = rdev->port;
225 runtime->private_data = vmidi; 231 runtime->private_data = vmidi;
226 write_lock_irqsave(&rdev->filelist_lock, flags); 232 down_write(&rdev->filelist_sem);
233 write_lock_irq(&rdev->filelist_lock);
227 list_add_tail(&vmidi->list, &rdev->filelist); 234 list_add_tail(&vmidi->list, &rdev->filelist);
228 write_unlock_irqrestore(&rdev->filelist_lock, flags); 235 write_unlock_irq(&rdev->filelist_lock);
236 up_write(&rdev->filelist_sem);
229 vmidi->rdev = rdev; 237 vmidi->rdev = rdev;
230 return 0; 238 return 0;
231} 239}
@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
264 struct snd_virmidi_dev *rdev = substream->rmidi->private_data; 272 struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
265 struct snd_virmidi *vmidi = substream->runtime->private_data; 273 struct snd_virmidi *vmidi = substream->runtime->private_data;
266 274
275 down_write(&rdev->filelist_sem);
267 write_lock_irq(&rdev->filelist_lock); 276 write_lock_irq(&rdev->filelist_lock);
268 list_del(&vmidi->list); 277 list_del(&vmidi->list);
269 write_unlock_irq(&rdev->filelist_lock); 278 write_unlock_irq(&rdev->filelist_lock);
279 up_write(&rdev->filelist_sem);
270 snd_midi_event_free(vmidi->parser); 280 snd_midi_event_free(vmidi->parser);
271 substream->runtime->private_data = NULL; 281 substream->runtime->private_data = NULL;
272 kfree(vmidi); 282 kfree(vmidi);
@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
520 rdev->rmidi = rmidi; 530 rdev->rmidi = rmidi;
521 rdev->device = device; 531 rdev->device = device;
522 rdev->client = -1; 532 rdev->client = -1;
533 init_rwsem(&rdev->filelist_sem);
523 rwlock_init(&rdev->filelist_lock); 534 rwlock_init(&rdev->filelist_lock);
524 INIT_LIST_HEAD(&rdev->filelist); 535 INIT_LIST_HEAD(&rdev->filelist);
525 rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; 536 rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f73a01..e43af18d4383 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
484 master->hook(master->hook_private_data, master->val); 484 master->hook(master->hook_private_data, master->val);
485} 485}
486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); 486EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
487
488/**
489 * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
490 * @kctl: vmaster kctl element
491 * @func: function to apply
492 * @arg: optional function argument
493 *
494 * Apply the function @func to each slave kctl of the given vmaster kctl.
495 * Returns 0 if successful, or a negative error code.
496 */
497int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
498 int (*func)(struct snd_kcontrol *, void *),
499 void *arg)
500{
501 struct link_master *master;
502 struct link_slave *slave;
503 int err;
504
505 master = snd_kcontrol_chip(kctl);
506 err = master_init(master);
507 if (err < 0)
508 return err;
509 list_for_each_entry(slave, &master->slaves, list) {
510 err = func(&slave->slave, arg);
511 if (err < 0)
512 return err;
513 }
514
515 return 0;
516}
517EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 978dc1801b3a..f6d2985b2520 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n", 284 dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF); 285 (cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
286 286
287 if (cur_cap == -1) {
288 dev_dbg(bus->dev, "Invalid capability reg read\n");
289 break;
290 }
291
287 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) { 292 switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
288 case AZX_ML_CAP_ID: 293 case AZX_ML_CAP_ID:
289 dev_dbg(bus->dev, "Found ML capability\n"); 294 dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 7e3aa50b21f9..5badd08e1d69 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
103 void __user *puhr; 103 void __user *puhr;
104 union hpi_message_buffer_v1 *hm; 104 union hpi_message_buffer_v1 *hm;
105 union hpi_response_buffer_v1 *hr; 105 union hpi_response_buffer_v1 *hr;
106 u16 msg_size;
106 u16 res_max_size; 107 u16 res_max_size;
107 u32 uncopied_bytes; 108 u32 uncopied_bytes;
108 int err = 0; 109 int err = 0;
@@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
127 } 128 }
128 129
129 /* Now read the message size and data from user space. */ 130 /* Now read the message size and data from user space. */
130 if (get_user(hm->h.size, (u16 __user *)puhm)) { 131 if (get_user(msg_size, (u16 __user *)puhm)) {
131 err = -EFAULT; 132 err = -EFAULT;
132 goto out; 133 goto out;
133 } 134 }
134 if (hm->h.size > sizeof(*hm)) 135 if (msg_size > sizeof(*hm))
135 hm->h.size = sizeof(*hm); 136 msg_size = sizeof(*hm);
136 137
137 /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */ 138 /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */
138 139
139 uncopied_bytes = copy_from_user(hm, puhm, hm->h.size); 140 uncopied_bytes = copy_from_user(hm, puhm, msg_size);
140 if (uncopied_bytes) { 141 if (uncopied_bytes) {
141 HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); 142 HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes);
142 err = -EFAULT; 143 err = -EFAULT;
143 goto out; 144 goto out;
144 } 145 }
145 146
147 /* Override h.size in case it is changed between two userspace fetches */
148 hm->h.size = msg_size;
149
146 if (get_user(res_max_size, (u16 __user *)puhr)) { 150 if (get_user(res_max_size, (u16 __user *)puhr)) {
147 err = -EFAULT; 151 err = -EFAULT;
148 goto out; 152 goto out;
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 7326695bca33..d68f99e076a8 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol,
1272 1272
1273 chip = snd_kcontrol_chip(kcontrol); 1273 chip = snd_kcontrol_chip(kcontrol);
1274 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1274 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1275 uinfo->count = 1;
1275 uinfo->value.integer.min = ECHOGAIN_MINOUT; 1276 uinfo->value.integer.min = ECHOGAIN_MINOUT;
1276 uinfo->value.integer.max = ECHOGAIN_MAXOUT; 1277 uinfo->value.integer.max = ECHOGAIN_MAXOUT;
1277 uinfo->dimen.d[0] = num_busses_out(chip); 1278 uinfo->dimen.d[0] = num_busses_out(chip);
1278 uinfo->dimen.d[1] = num_busses_in(chip); 1279 uinfo->dimen.d[1] = num_busses_in(chip);
1279 uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
1280 return 0; 1280 return 0;
1281} 1281}
1282 1282
@@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol,
1344 1344
1345 chip = snd_kcontrol_chip(kcontrol); 1345 chip = snd_kcontrol_chip(kcontrol);
1346 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1346 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1347 uinfo->count = 1;
1347 uinfo->value.integer.min = ECHOGAIN_MINOUT; 1348 uinfo->value.integer.min = ECHOGAIN_MINOUT;
1348 uinfo->value.integer.max = ECHOGAIN_MAXOUT; 1349 uinfo->value.integer.max = ECHOGAIN_MAXOUT;
1349 uinfo->dimen.d[0] = num_busses_out(chip); 1350 uinfo->dimen.d[0] = num_busses_out(chip);
1350 uinfo->dimen.d[1] = num_pipes_out(chip); 1351 uinfo->dimen.d[1] = num_pipes_out(chip);
1351 uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1];
1352 return 0; 1352 return 0;
1353} 1353}
1354 1354
@@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
1728 struct snd_ctl_elem_info *uinfo) 1728 struct snd_ctl_elem_info *uinfo)
1729{ 1729{
1730 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; 1730 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
1731 uinfo->count = 96;
1731 uinfo->value.integer.min = ECHOGAIN_MINOUT; 1732 uinfo->value.integer.min = ECHOGAIN_MINOUT;
1732 uinfo->value.integer.max = 0; 1733 uinfo->value.integer.max = 0;
1733#ifdef ECHOCARD_HAS_VMIXER 1734#ifdef ECHOCARD_HAS_VMIXER
@@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol,
1737#endif 1738#endif
1738 uinfo->dimen.d[1] = 16; /* 16 channels */ 1739 uinfo->dimen.d[1] = 16; /* 16 channels */
1739 uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */ 1740 uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */
1740 uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2];
1741 return 0; 1741 return 0;
1742} 1742}
1743 1743
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3db26c451837..a0989d231fd0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
1803 return 1; 1803 return 1;
1804} 1804}
1805 1805
1806/* guess the value corresponding to 0dB */
1807static int get_kctl_0dB_offset(struct hda_codec *codec,
1808 struct snd_kcontrol *kctl, int *step_to_check)
1809{
1810 int _tlv[4];
1811 const int *tlv = NULL;
1812 int val = -1;
1813
1814 if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
1815 kctl->tlv.c == snd_hda_mixer_amp_tlv) {
1816 get_ctl_amp_tlv(kctl, _tlv);
1817 tlv = _tlv;
1818 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1819 tlv = kctl->tlv.p;
1820 if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
1821 int step = tlv[3];
1822 step &= ~TLV_DB_SCALE_MUTE;
1823 if (!step)
1824 return -1;
1825 if (*step_to_check && *step_to_check != step) {
1826 codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
1827- *step_to_check, step);
1828 return -1;
1829 }
1830 *step_to_check = step;
1831 val = -tlv[2] / step;
1832 }
1833 return val;
1834}
1835
1836/* call kctl->put with the given value(s) */ 1806/* call kctl->put with the given value(s) */
1837static int put_kctl_with_value(struct snd_kcontrol *kctl, int val) 1807static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1838{ 1808{
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
1847 return 0; 1817 return 0;
1848} 1818}
1849 1819
1850/* initialize the slave volume with 0dB */ 1820struct slave_init_arg {
1851static int init_slave_0dB(struct hda_codec *codec, 1821 struct hda_codec *codec;
1852 void *data, struct snd_kcontrol *slave) 1822 int step;
1823};
1824
1825/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
1826static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
1853{ 1827{
1854 int offset = get_kctl_0dB_offset(codec, slave, data); 1828 struct slave_init_arg *arg = _arg;
1855 if (offset > 0) 1829 int _tlv[4];
1856 put_kctl_with_value(slave, offset); 1830 const int *tlv = NULL;
1831 int step;
1832 int val;
1833
1834 if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
1835 if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
1836 codec_err(arg->codec,
1837 "Unexpected TLV callback for slave %s:%d\n",
1838 kctl->id.name, kctl->id.index);
1839 return 0; /* ignore */
1840 }
1841 get_ctl_amp_tlv(kctl, _tlv);
1842 tlv = _tlv;
1843 } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
1844 tlv = kctl->tlv.p;
1845
1846 if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
1847 return 0;
1848
1849 step = tlv[3];
1850 step &= ~TLV_DB_SCALE_MUTE;
1851 if (!step)
1852 return 0;
1853 if (arg->step && arg->step != step) {
1854 codec_err(arg->codec,
1855 "Mismatching dB step for vmaster slave (%d!=%d)\n",
1856 arg->step, step);
1857 return 0;
1858 }
1859
1860 arg->step = step;
1861 val = -tlv[2] / step;
1862 if (val > 0) {
1863 put_kctl_with_value(kctl, val);
1864 return val;
1865 }
1866
1857 return 0; 1867 return 0;
1858} 1868}
1859 1869
1860/* unmute the slave */ 1870/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
1861static int init_slave_unmute(struct hda_codec *codec, 1871static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
1862 void *data, struct snd_kcontrol *slave)
1863{ 1872{
1864 return put_kctl_with_value(slave, 1); 1873 return put_kctl_with_value(slave, 1);
1865} 1874}
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
1919 /* init with master mute & zero volume */ 1928 /* init with master mute & zero volume */
1920 put_kctl_with_value(kctl, 0); 1929 put_kctl_with_value(kctl, 0);
1921 if (init_slave_vol) { 1930 if (init_slave_vol) {
1922 int step = 0; 1931 struct slave_init_arg arg = {
1923 map_slaves(codec, slaves, suffix, 1932 .codec = codec,
1924 tlv ? init_slave_0dB : init_slave_unmute, &step); 1933 .step = 0,
1934 };
1935 snd_ctl_apply_vmaster_slaves(kctl,
1936 tlv ? init_slave_0dB : init_slave_unmute,
1937 &arg);
1925 } 1938 }
1926 1939
1927 if (ctl_ret) 1940 if (ctl_ret)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2b64fabd5faa..c19c81d230bd 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
906 hda_nid_t pin_nid, u32 stream_tag, int format) 906 hda_nid_t pin_nid, u32 stream_tag, int format)
907{ 907{
908 struct hdmi_spec *spec = codec->spec; 908 struct hdmi_spec *spec = codec->spec;
909 unsigned int param;
909 int err; 910 int err;
910 911
911 err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); 912 err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format));
@@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
915 return err; 916 return err;
916 } 917 }
917 918
919 if (is_haswell_plus(codec)) {
920
921 /*
922 * on recent platforms IEC Coding Type is required for HBR
923 * support, read current Digital Converter settings and set
924 * ICT bitfield if needed.
925 */
926 param = snd_hda_codec_read(codec, cvt_nid, 0,
927 AC_VERB_GET_DIGI_CONVERT_1, 0);
928
929 param = (param >> 16) & ~(AC_DIG3_ICT);
930
931 /* on recent platforms ICT mode is required for HBR support */
932 if (is_hbr_format(format))
933 param |= 0x1;
934
935 snd_hda_codec_write(codec, cvt_nid, 0,
936 AC_VERB_SET_DIGI_CONVERT_3, param);
937 }
938
918 snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); 939 snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format);
919 return 0; 940 return 0;
920} 941}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0ce71111b4e3..546d515f3c1f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
327 case 0x10ec0215: 327 case 0x10ec0215:
328 case 0x10ec0225: 328 case 0x10ec0225:
329 case 0x10ec0233: 329 case 0x10ec0233:
330 case 0x10ec0236:
330 case 0x10ec0255: 331 case 0x10ec0255:
331 case 0x10ec0256: 332 case 0x10ec0256:
332 case 0x10ec0282: 333 case 0x10ec0282:
@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
911 { 0x10ec0275, 0x1028, 0, "ALC3260" }, 912 { 0x10ec0275, 0x1028, 0, "ALC3260" },
912 { 0x10ec0899, 0x1028, 0, "ALC3861" }, 913 { 0x10ec0899, 0x1028, 0, "ALC3861" },
913 { 0x10ec0298, 0x1028, 0, "ALC3266" }, 914 { 0x10ec0298, 0x1028, 0, "ALC3266" },
915 { 0x10ec0236, 0x1028, 0, "ALC3204" },
914 { 0x10ec0256, 0x1028, 0, "ALC3246" }, 916 { 0x10ec0256, 0x1028, 0, "ALC3246" },
915 { 0x10ec0225, 0x1028, 0, "ALC3253" }, 917 { 0x10ec0225, 0x1028, 0, "ALC3253" },
916 { 0x10ec0295, 0x1028, 0, "ALC3254" }, 918 { 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3930 alc_process_coef_fw(codec, coef0255_1); 3932 alc_process_coef_fw(codec, coef0255_1);
3931 alc_process_coef_fw(codec, coef0255); 3933 alc_process_coef_fw(codec, coef0255);
3932 break; 3934 break;
3935 case 0x10ec0236:
3933 case 0x10ec0256: 3936 case 0x10ec0256:
3934 alc_process_coef_fw(codec, coef0256); 3937 alc_process_coef_fw(codec, coef0256);
3935 alc_process_coef_fw(codec, coef0255); 3938 alc_process_coef_fw(codec, coef0255);
@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
4028 }; 4031 };
4029 4032
4030 switch (codec->core.vendor_id) { 4033 switch (codec->core.vendor_id) {
4034 case 0x10ec0236:
4031 case 0x10ec0255: 4035 case 0x10ec0255:
4032 case 0x10ec0256: 4036 case 0x10ec0256:
4033 alc_write_coef_idx(codec, 0x45, 0xc489); 4037 alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
4160 alc_process_coef_fw(codec, alc225_pre_hsmode); 4164 alc_process_coef_fw(codec, alc225_pre_hsmode);
4161 alc_process_coef_fw(codec, coef0225); 4165 alc_process_coef_fw(codec, coef0225);
4162 break; 4166 break;
4167 case 0x10ec0236:
4163 case 0x10ec0255: 4168 case 0x10ec0255:
4164 case 0x10ec0256: 4169 case 0x10ec0256:
4165 alc_process_coef_fw(codec, coef0255); 4170 alc_process_coef_fw(codec, coef0255);
@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
4256 case 0x10ec0255: 4261 case 0x10ec0255:
4257 alc_process_coef_fw(codec, coef0255); 4262 alc_process_coef_fw(codec, coef0255);
4258 break; 4263 break;
4264 case 0x10ec0236:
4259 case 0x10ec0256: 4265 case 0x10ec0256:
4260 alc_process_coef_fw(codec, coef0256); 4266 alc_process_coef_fw(codec, coef0256);
4261 break; 4267 break;
@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
4366 case 0x10ec0255: 4372 case 0x10ec0255:
4367 alc_process_coef_fw(codec, coef0255); 4373 alc_process_coef_fw(codec, coef0255);
4368 break; 4374 break;
4375 case 0x10ec0236:
4369 case 0x10ec0256: 4376 case 0x10ec0256:
4370 alc_process_coef_fw(codec, coef0256); 4377 alc_process_coef_fw(codec, coef0256);
4371 break; 4378 break;
@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
4451 }; 4458 };
4452 4459
4453 switch (codec->core.vendor_id) { 4460 switch (codec->core.vendor_id) {
4461 case 0x10ec0236:
4454 case 0x10ec0255: 4462 case 0x10ec0255:
4455 case 0x10ec0256: 4463 case 0x10ec0256:
4456 alc_process_coef_fw(codec, coef0255); 4464 alc_process_coef_fw(codec, coef0255);
@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
4705 case 0x10ec0255: 4713 case 0x10ec0255:
4706 alc_process_coef_fw(codec, alc255fw); 4714 alc_process_coef_fw(codec, alc255fw);
4707 break; 4715 break;
4716 case 0x10ec0236:
4708 case 0x10ec0256: 4717 case 0x10ec0256:
4709 alc_process_coef_fw(codec, alc256fw); 4718 alc_process_coef_fw(codec, alc256fw);
4710 break; 4719 break;
@@ -6419,6 +6428,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6419 ALC225_STANDARD_PINS, 6428 ALC225_STANDARD_PINS,
6420 {0x12, 0xb7a60130}, 6429 {0x12, 0xb7a60130},
6421 {0x1b, 0x90170110}), 6430 {0x1b, 0x90170110}),
6431 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6432 {0x12, 0x90a60140},
6433 {0x14, 0x90170110},
6434 {0x21, 0x02211020}),
6435 SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
6436 {0x12, 0x90a60140},
6437 {0x14, 0x90170150},
6438 {0x21, 0x02211020}),
6422 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, 6439 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
6423 {0x14, 0x90170110}, 6440 {0x14, 0x90170110},
6424 {0x21, 0x02211020}), 6441 {0x21, 0x02211020}),
@@ -6806,6 +6823,7 @@ static int patch_alc269(struct hda_codec *codec)
6806 case 0x10ec0255: 6823 case 0x10ec0255:
6807 spec->codec_variant = ALC269_TYPE_ALC255; 6824 spec->codec_variant = ALC269_TYPE_ALC255;
6808 break; 6825 break;
6826 case 0x10ec0236:
6809 case 0x10ec0256: 6827 case 0x10ec0256:
6810 spec->codec_variant = ALC269_TYPE_ALC256; 6828 spec->codec_variant = ALC269_TYPE_ALC256;
6811 spec->shutup = alc256_shutup; 6829 spec->shutup = alc256_shutup;
@@ -7857,6 +7875,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
7857 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), 7875 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
7858 HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269), 7876 HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
7859 HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), 7877 HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
7878 HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
7860 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269), 7879 HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
7861 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269), 7880 HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
7862 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260), 7881 HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 0fb6b1b79261..d8409d9ae55b 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
469 469
470 err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); 470 err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
471 if (err) 471 if (err)
472 return err; 472 goto err_kill_urb;
473 473
474 if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) 474 if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
475 return -ENODEV; 475 err = -ENODEV;
476 goto err_kill_urb;
477 }
476 478
477 usb_string(usb_dev, usb_dev->descriptor.iManufacturer, 479 usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
478 cdev->vendor_name, CAIAQ_USB_STR_LEN); 480 cdev->vendor_name, CAIAQ_USB_STR_LEN);
@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
507 509
508 setup_card(cdev); 510 setup_card(cdev);
509 return 0; 511 return 0;
512
513 err_kill_urb:
514 usb_kill_urb(&cdev->ep1_in_urb);
515 return err;
510} 516}
511 517
512static int snd_probe(struct usb_interface *intf, 518static int snd_probe(struct usb_interface *intf,
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 3dc36d913550..23d1d23aefec 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
221 struct usb_interface_descriptor *altsd; 221 struct usb_interface_descriptor *altsd;
222 void *control_header; 222 void *control_header;
223 int i, protocol; 223 int i, protocol;
224 int rest_bytes;
224 225
225 /* find audiocontrol interface */ 226 /* find audiocontrol interface */
226 host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; 227 host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
@@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
235 return -EINVAL; 236 return -EINVAL;
236 } 237 }
237 238
239 rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
240 control_header;
241
242 /* just to be sure -- this shouldn't hit at all */
243 if (rest_bytes <= 0) {
244 dev_err(&dev->dev, "invalid control header\n");
245 return -EINVAL;
246 }
247
238 switch (protocol) { 248 switch (protocol) {
239 default: 249 default:
240 dev_warn(&dev->dev, 250 dev_warn(&dev->dev,
@@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
245 case UAC_VERSION_1: { 255 case UAC_VERSION_1: {
246 struct uac1_ac_header_descriptor *h1 = control_header; 256 struct uac1_ac_header_descriptor *h1 = control_header;
247 257
258 if (rest_bytes < sizeof(*h1)) {
259 dev_err(&dev->dev, "too short v1 buffer descriptor\n");
260 return -EINVAL;
261 }
262
248 if (!h1->bInCollection) { 263 if (!h1->bInCollection) {
249 dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); 264 dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
250 return -EINVAL; 265 return -EINVAL;
251 } 266 }
252 267
268 if (rest_bytes < h1->bLength) {
269 dev_err(&dev->dev, "invalid buffer length (v1)\n");
270 return -EINVAL;
271 }
272
253 if (h1->bLength < sizeof(*h1) + h1->bInCollection) { 273 if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
254 dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); 274 dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
255 return -EINVAL; 275 return -EINVAL;
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 0ff5a7d2e19f..c8f723c3a033 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface,
779 return 0; 779 return 0;
780 780
781 error: 781 error:
782 if (line6->disconnect) 782 /* we can call disconnect callback here because no close-sync is
783 line6->disconnect(line6); 783 * needed yet at this point
784 snd_card_free(card); 784 */
785 line6_disconnect(interface);
785 return ret; 786 return ret;
786} 787}
787EXPORT_SYMBOL_GPL(line6_probe); 788EXPORT_SYMBOL_GPL(line6_probe);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 956f847a96e4..451007c27743 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6)
301 301
302 intf = usb_ifnum_to_if(line6->usbdev, 302 intf = usb_ifnum_to_if(line6->usbdev,
303 pod->line6.properties->ctrl_if); 303 pod->line6.properties->ctrl_if);
304 usb_driver_release_interface(&podhd_driver, intf); 304 if (intf)
305 usb_driver_release_interface(&podhd_driver, intf);
305 } 306 }
306} 307}
307 308
@@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6,
317 318
318 line6->disconnect = podhd_disconnect; 319 line6->disconnect = podhd_disconnect;
319 320
321 init_timer(&pod->startup_timer);
322 INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
323
320 if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { 324 if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
321 /* claim the data interface */ 325 /* claim the data interface */
322 intf = usb_ifnum_to_if(line6->usbdev, 326 intf = usb_ifnum_to_if(line6->usbdev,
@@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6,
358 } 362 }
359 363
360 /* init device and delay registering */ 364 /* init device and delay registering */
361 init_timer(&pod->startup_timer);
362 INIT_WORK(&pod->startup_work, podhd_startup_workqueue);
363 podhd_startup(pod); 365 podhd_startup(pod);
364 return 0; 366 return 0;
365} 367}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9732edf77f86..91bc8f18791e 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
2234 2234
2235static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) 2235static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
2236{ 2236{
2237 /* kill pending URBs */
2238 snd_usb_mixer_disconnect(mixer);
2239
2237 kfree(mixer->id_elems); 2240 kfree(mixer->id_elems);
2238 if (mixer->urb) { 2241 if (mixer->urb) {
2239 kfree(mixer->urb->transfer_buffer); 2242 kfree(mixer->urb->transfer_buffer);
@@ -2584,8 +2587,13 @@ _error:
2584 2587
2585void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) 2588void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
2586{ 2589{
2587 usb_kill_urb(mixer->urb); 2590 if (mixer->disconnected)
2588 usb_kill_urb(mixer->rc_urb); 2591 return;
2592 if (mixer->urb)
2593 usb_kill_urb(mixer->urb);
2594 if (mixer->rc_urb)
2595 usb_kill_urb(mixer->rc_urb);
2596 mixer->disconnected = true;
2589} 2597}
2590 2598
2591#ifdef CONFIG_PM 2599#ifdef CONFIG_PM
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 2b4b067646ab..545d99b09706 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -22,6 +22,8 @@ struct usb_mixer_interface {
22 struct urb *rc_urb; 22 struct urb *rc_urb;
23 struct usb_ctrlrequest *rc_setup_packet; 23 struct usb_ctrlrequest *rc_setup_packet;
24 u8 rc_buffer[6]; 24 u8 rc_buffer[6];
25
26 bool disconnected;
25}; 27};
26 28
27#define MAX_CHANNELS 16 /* max logical channels */ 29#define MAX_CHANNELS 16 /* max logical channels */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 913552078285..4f5f18f22974 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1137,6 +1137,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
1137 case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */ 1137 case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
1138 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ 1138 case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ 1139 case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
1140 case USB_ID(0x047F, 0xC022): /* Plantronics C310 */
1141 case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */
1142 case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */
1140 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ 1143 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
1141 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ 1144 case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
1142 case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ 1145 case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
@@ -1351,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1351 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */ 1354 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
1352 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */ 1355 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
1353 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */ 1356 case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
1357 case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
1354 if (fp->altsetting == 2) 1358 if (fp->altsetting == 2)
1355 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1359 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1356 break; 1360 break;
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
index 4dab49080700..e229abd21652 100644
--- a/sound/usb/usx2y/usb_stream.c
+++ b/sound/usb/usx2y/usb_stream.c
@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
191 } 191 }
192 192
193 pg = get_order(read_size); 193 pg = get_order(read_size);
194 sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); 194 sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
195 __GFP_NOWARN, pg);
195 if (!sk->s) { 196 if (!sk->s) {
196 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); 197 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
197 goto out; 198 goto out;
@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
211 pg = get_order(write_size); 212 pg = get_order(write_size);
212 213
213 sk->write_page = 214 sk->write_page =
214 (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); 215 (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
216 __GFP_NOWARN, pg);
215 if (!sk->write_page) { 217 if (!sk->write_page) {
216 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); 218 snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
217 usb_stream_free(sk); 219 usb_stream_free(sk);
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 69d09c39bbcd..cd7359e23d86 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -88,6 +88,12 @@ struct kvm_s390_io_adapter_req {
88/* kvm attributes for KVM_S390_VM_TOD */ 88/* kvm attributes for KVM_S390_VM_TOD */
89#define KVM_S390_VM_TOD_LOW 0 89#define KVM_S390_VM_TOD_LOW 0
90#define KVM_S390_VM_TOD_HIGH 1 90#define KVM_S390_VM_TOD_HIGH 1
91#define KVM_S390_VM_TOD_EXT 2
92
93struct kvm_s390_vm_tod_clock {
94 __u8 epoch_idx;
95 __u64 tod;
96};
91 97
92/* kvm attributes for KVM_S390_VM_CPU_MODEL */ 98/* kvm attributes for KVM_S390_VM_CPU_MODEL */
93/* processor related attributes are r/w */ 99/* processor related attributes are r/w */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 8ea315a11fe0..2519c6c801c9 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -196,6 +196,7 @@
196 196
197#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 197#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
198#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 198#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
199#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
199 200
200#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 201#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
201#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ 202#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
@@ -287,6 +288,7 @@
287#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ 288#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
288#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */ 289#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
289#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */ 290#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
291#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
290 292
291/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */ 293/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
292#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/ 294#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 5dff775af7cd..c10c9128f54e 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -21,11 +21,13 @@
21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31)) 21# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
22# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31)) 22# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
23# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31)) 23# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
24# define DISABLE_PCID 0
24#else 25#else
25# define DISABLE_VME 0 26# define DISABLE_VME 0
26# define DISABLE_K6_MTRR 0 27# define DISABLE_K6_MTRR 0
27# define DISABLE_CYRIX_ARR 0 28# define DISABLE_CYRIX_ARR 0
28# define DISABLE_CENTAUR_MCR 0 29# define DISABLE_CENTAUR_MCR 0
30# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
29#endif /* CONFIG_X86_64 */ 31#endif /* CONFIG_X86_64 */
30 32
31#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 33#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
@@ -49,7 +51,7 @@
49#define DISABLED_MASK1 0 51#define DISABLED_MASK1 0
50#define DISABLED_MASK2 0 52#define DISABLED_MASK2 0
51#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR) 53#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
52#define DISABLED_MASK4 0 54#define DISABLED_MASK4 (DISABLE_PCID)
53#define DISABLED_MASK5 0 55#define DISABLED_MASK5 0
54#define DISABLED_MASK6 0 56#define DISABLED_MASK6 0
55#define DISABLED_MASK7 0 57#define DISABLED_MASK7 0
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h
new file mode 100644
index 000000000000..e4732d3c2998
--- /dev/null
+++ b/tools/include/asm-generic/hugetlb_encode.h
@@ -0,0 +1,34 @@
1#ifndef _ASM_GENERIC_HUGETLB_ENCODE_H_
2#define _ASM_GENERIC_HUGETLB_ENCODE_H_
3
4/*
5 * Several system calls take a flag to request "hugetlb" huge pages.
6 * Without further specification, these system calls will use the
7 * system's default huge page size. If a system supports multiple
8 * huge page sizes, the desired huge page size can be specified in
9 * bits [26:31] of the flag arguments. The value in these 6 bits
10 * will encode the log2 of the huge page size.
11 *
12 * The following definitions are associated with this huge page size
13 * encoding in flag arguments. System call specific header files
14 * that use this encoding should include this file. They can then
15 * provide definitions based on these with their own specific prefix.
16 * for example:
17 * #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
18 */
19
20#define HUGETLB_FLAG_ENCODE_SHIFT 26
21#define HUGETLB_FLAG_ENCODE_MASK 0x3f
22
23#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
24#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
25#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
26#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
27#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
28#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
29#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
30#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
31#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
32#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
33
34#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 8c27db0c5c08..203268f9231e 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -58,20 +58,12 @@
58 overrides the coredump filter bits */ 58 overrides the coredump filter bits */
59#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */ 59#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
60 60
61#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
62#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
63
61/* compatibility flags */ 64/* compatibility flags */
62#define MAP_FILE 0 65#define MAP_FILE 0
63 66
64/*
65 * When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
66 * This gives us 6 bits, which is enough until someone invents 128 bit address
67 * spaces.
68 *
69 * Assume these are all power of twos.
70 * When 0 use the default page size.
71 */
72#define MAP_HUGE_SHIFT 26
73#define MAP_HUGE_MASK 0x3f
74
75#define PKEY_DISABLE_ACCESS 0x1 67#define PKEY_DISABLE_ACCESS 0x1
76#define PKEY_DISABLE_WRITE 0x2 68#define PKEY_DISABLE_WRITE 0x2
77#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ 69#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 101593ab10ac..97677cd6964d 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -700,6 +700,7 @@ struct drm_prime_handle {
700 700
701struct drm_syncobj_create { 701struct drm_syncobj_create {
702 __u32 handle; 702 __u32 handle;
703#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
703 __u32 flags; 704 __u32 flags;
704}; 705};
705 706
@@ -718,6 +719,24 @@ struct drm_syncobj_handle {
718 __u32 pad; 719 __u32 pad;
719}; 720};
720 721
722#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
723#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
724struct drm_syncobj_wait {
725 __u64 handles;
726 /* absolute timeout */
727 __s64 timeout_nsec;
728 __u32 count_handles;
729 __u32 flags;
730 __u32 first_signaled; /* only valid when not waiting all */
731 __u32 pad;
732};
733
734struct drm_syncobj_array {
735 __u64 handles;
736 __u32 count_handles;
737 __u32 pad;
738};
739
721#if defined(__cplusplus) 740#if defined(__cplusplus)
722} 741}
723#endif 742#endif
@@ -840,6 +859,9 @@ extern "C" {
840#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) 859#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
841#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) 860#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle)
842#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) 861#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
862#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
863#define DRM_IOCTL_SYNCOBJ_RESET DRM_IOWR(0xC4, struct drm_syncobj_array)
864#define DRM_IOCTL_SYNCOBJ_SIGNAL DRM_IOWR(0xC5, struct drm_syncobj_array)
843 865
844/** 866/**
845 * Device specific ioctls should only be in their respective headers 867 * Device specific ioctls should only be in their respective headers
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 7ccbd6a2bbe0..6598fb76d2c2 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -260,6 +260,8 @@ typedef struct _drm_i915_sarea {
260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 260#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 261#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
262#define DRM_I915_PERF_OPEN 0x36 262#define DRM_I915_PERF_OPEN 0x36
263#define DRM_I915_PERF_ADD_CONFIG 0x37
264#define DRM_I915_PERF_REMOVE_CONFIG 0x38
263 265
264#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 266#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
265#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 267#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +317,8 @@ typedef struct _drm_i915_sarea {
315#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) 317#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
316#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) 318#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
317#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param) 319#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
320#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
321#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
318 322
319/* Allow drivers to submit batchbuffers directly to hardware, relying 323/* Allow drivers to submit batchbuffers directly to hardware, relying
320 * on the security mechanisms provided by hardware. 324 * on the security mechanisms provided by hardware.
@@ -431,6 +435,11 @@ typedef struct drm_i915_irq_wait {
431 */ 435 */
432#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48 436#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
433 437
438/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
439 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
440 */
441#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
442
434typedef struct drm_i915_getparam { 443typedef struct drm_i915_getparam {
435 __s32 param; 444 __s32 param;
436 /* 445 /*
@@ -812,6 +821,17 @@ struct drm_i915_gem_exec_object2 {
812 __u64 rsvd2; 821 __u64 rsvd2;
813}; 822};
814 823
824struct drm_i915_gem_exec_fence {
825 /**
826 * User's handle for a drm_syncobj to wait on or signal.
827 */
828 __u32 handle;
829
830#define I915_EXEC_FENCE_WAIT (1<<0)
831#define I915_EXEC_FENCE_SIGNAL (1<<1)
832 __u32 flags;
833};
834
815struct drm_i915_gem_execbuffer2 { 835struct drm_i915_gem_execbuffer2 {
816 /** 836 /**
817 * List of gem_exec_object2 structs 837 * List of gem_exec_object2 structs
@@ -826,7 +846,11 @@ struct drm_i915_gem_execbuffer2 {
826 __u32 DR1; 846 __u32 DR1;
827 __u32 DR4; 847 __u32 DR4;
828 __u32 num_cliprects; 848 __u32 num_cliprects;
829 /** This is a struct drm_clip_rect *cliprects */ 849 /**
850 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
851 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
852 * struct drm_i915_gem_exec_fence *fences.
853 */
830 __u64 cliprects_ptr; 854 __u64 cliprects_ptr;
831#define I915_EXEC_RING_MASK (7<<0) 855#define I915_EXEC_RING_MASK (7<<0)
832#define I915_EXEC_DEFAULT (0<<0) 856#define I915_EXEC_DEFAULT (0<<0)
@@ -927,7 +951,14 @@ struct drm_i915_gem_execbuffer2 {
927 * element). 951 * element).
928 */ 952 */
929#define I915_EXEC_BATCH_FIRST (1<<18) 953#define I915_EXEC_BATCH_FIRST (1<<18)
930#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_BATCH_FIRST<<1)) 954
955/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
956 * define an array of i915_gem_exec_fence structures which specify a set of
957 * dma fences to wait upon or signal.
958 */
959#define I915_EXEC_FENCE_ARRAY (1<<19)
960
961#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
931 962
932#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 963#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
933#define i915_execbuffer2_set_context_id(eb2, context) \ 964#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1467,6 +1498,22 @@ enum drm_i915_perf_record_type {
1467 DRM_I915_PERF_RECORD_MAX /* non-ABI */ 1498 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1468}; 1499};
1469 1500
1501/**
1502 * Structure to upload perf dynamic configuration into the kernel.
1503 */
1504struct drm_i915_perf_oa_config {
1505 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1506 char uuid[36];
1507
1508 __u32 n_mux_regs;
1509 __u32 n_boolean_regs;
1510 __u32 n_flex_regs;
1511
1512 __u64 __user mux_regs_ptr;
1513 __u64 __user boolean_regs_ptr;
1514 __u64 __user flex_regs_ptr;
1515};
1516
1470#if defined(__cplusplus) 1517#if defined(__cplusplus)
1471} 1518}
1472#endif 1519#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 461811e57140..01cc7ba39924 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -143,12 +143,6 @@ enum bpf_attach_type {
143 143
144#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE 144#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
145 145
146enum bpf_sockmap_flags {
147 BPF_SOCKMAP_UNSPEC,
148 BPF_SOCKMAP_STRPARSER,
149 __MAX_BPF_SOCKMAP_FLAG
150};
151
152/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command 146/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
153 * to the given target_fd cgroup the descendent cgroup will be able to 147 * to the given target_fd cgroup the descendent cgroup will be able to
154 * override effective bpf program that was inherited from this cgroup 148 * override effective bpf program that was inherited from this cgroup
@@ -318,7 +312,7 @@ union bpf_attr {
318 * jump into another BPF program 312 * jump into another BPF program
319 * @ctx: context pointer passed to next program 313 * @ctx: context pointer passed to next program
320 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY 314 * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
321 * @index: index inside array that selects specific program to run 315 * @index: 32-bit index inside array that selects specific program to run
322 * Return: 0 on success or negative error 316 * Return: 0 on success or negative error
323 * 317 *
324 * int bpf_clone_redirect(skb, ifindex, flags) 318 * int bpf_clone_redirect(skb, ifindex, flags)
@@ -368,9 +362,20 @@ union bpf_attr {
368 * int bpf_redirect(ifindex, flags) 362 * int bpf_redirect(ifindex, flags)
369 * redirect to another netdev 363 * redirect to another netdev
370 * @ifindex: ifindex of the net device 364 * @ifindex: ifindex of the net device
371 * @flags: bit 0 - if set, redirect to ingress instead of egress 365 * @flags:
372 * other bits - reserved 366 * cls_bpf:
373 * Return: TC_ACT_REDIRECT 367 * bit 0 - if set, redirect to ingress instead of egress
368 * other bits - reserved
369 * xdp_bpf:
370 * all bits - reserved
371 * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
372 * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
373 * int bpf_redirect_map(map, key, flags)
374 * redirect to endpoint in map
375 * @map: pointer to dev map
376 * @key: index in map to lookup
377 * @flags: --
378 * Return: XDP_REDIRECT on success or XDP_ABORT on error
374 * 379 *
375 * u32 bpf_get_route_realm(skb) 380 * u32 bpf_get_route_realm(skb)
376 * retrieve a dst's tclassid 381 * retrieve a dst's tclassid
@@ -564,13 +569,14 @@ union bpf_attr {
564 * @flags: reserved for future use 569 * @flags: reserved for future use
565 * Return: 0 on success or negative error code 570 * Return: 0 on success or negative error code
566 * 571 *
567 * int bpf_sk_redirect_map(map, key, flags) 572 * int bpf_sk_redirect_map(skb, map, key, flags)
568 * Redirect skb to a sock in map using key as a lookup key for the 573 * Redirect skb to a sock in map using key as a lookup key for the
569 * sock in map. 574 * sock in map.
575 * @skb: pointer to skb
570 * @map: pointer to sockmap 576 * @map: pointer to sockmap
571 * @key: key to lookup sock in map 577 * @key: key to lookup sock in map
572 * @flags: reserved for future use 578 * @flags: reserved for future use
573 * Return: SK_REDIRECT 579 * Return: SK_PASS
574 * 580 *
575 * int bpf_sock_map_update(skops, map, key, flags) 581 * int bpf_sock_map_update(skops, map, key, flags)
576 * @skops: pointer to bpf_sock_ops 582 * @skops: pointer to bpf_sock_ops
@@ -632,7 +638,7 @@ union bpf_attr {
632 FN(skb_adjust_room), \ 638 FN(skb_adjust_room), \
633 FN(redirect_map), \ 639 FN(redirect_map), \
634 FN(sk_redirect_map), \ 640 FN(sk_redirect_map), \
635 FN(sock_map_update), 641 FN(sock_map_update), \
636 642
637/* integer value in 'imm' field of BPF_CALL instruction selects which helper 643/* integer value in 'imm' field of BPF_CALL instruction selects which helper
638 * function eBPF program intends to call 644 * function eBPF program intends to call
@@ -753,20 +759,23 @@ struct bpf_sock {
753 __u32 family; 759 __u32 family;
754 __u32 type; 760 __u32 type;
755 __u32 protocol; 761 __u32 protocol;
762 __u32 mark;
763 __u32 priority;
756}; 764};
757 765
758#define XDP_PACKET_HEADROOM 256 766#define XDP_PACKET_HEADROOM 256
759 767
760/* User return codes for XDP prog type. 768/* User return codes for XDP prog type.
761 * A valid XDP program must return one of these defined values. All other 769 * A valid XDP program must return one of these defined values. All other
762 * return codes are reserved for future use. Unknown return codes will result 770 * return codes are reserved for future use. Unknown return codes will
763 * in packet drop. 771 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
764 */ 772 */
765enum xdp_action { 773enum xdp_action {
766 XDP_ABORTED = 0, 774 XDP_ABORTED = 0,
767 XDP_DROP, 775 XDP_DROP,
768 XDP_PASS, 776 XDP_PASS,
769 XDP_TX, 777 XDP_TX,
778 XDP_REDIRECT,
770}; 779};
771 780
772/* user accessible metadata for XDP packet hook 781/* user accessible metadata for XDP packet hook
@@ -778,9 +787,8 @@ struct xdp_md {
778}; 787};
779 788
780enum sk_action { 789enum sk_action {
781 SK_ABORTED = 0, 790 SK_DROP = 0,
782 SK_DROP, 791 SK_PASS,
783 SK_REDIRECT,
784}; 792};
785 793
786#define BPF_TAG_SIZE 8 794#define BPF_TAG_SIZE 8
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 6cd63c18708a..838887587411 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -711,7 +711,8 @@ struct kvm_ppc_one_seg_page_size {
711struct kvm_ppc_smmu_info { 711struct kvm_ppc_smmu_info {
712 __u64 flags; 712 __u64 flags;
713 __u32 slb_size; 713 __u32 slb_size;
714 __u32 pad; 714 __u16 data_keys; /* # storage keys supported for data */
715 __u16 instr_keys; /* # storage keys supported for instructions */
715 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; 716 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
716}; 717};
717 718
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index 81d8edf11789..a937480d7cd3 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -1,7 +1,8 @@
1#ifndef _UAPI_LINUX_MMAN_H 1#ifndef _UAPI_LINUX_MMAN_H
2#define _UAPI_LINUX_MMAN_H 2#define _UAPI_LINUX_MMAN_H
3 3
4#include <uapi/asm/mman.h> 4#include <asm/mman.h>
5#include <asm-generic/hugetlb_encode.h>
5 6
6#define MREMAP_MAYMOVE 1 7#define MREMAP_MAYMOVE 1
7#define MREMAP_FIXED 2 8#define MREMAP_FIXED 2
@@ -10,4 +11,25 @@
10#define OVERCOMMIT_ALWAYS 1 11#define OVERCOMMIT_ALWAYS 1
11#define OVERCOMMIT_NEVER 2 12#define OVERCOMMIT_NEVER 2
12 13
14/*
15 * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
16 * size other than the default is desired. See hugetlb_encode.h.
17 * All known huge page size encodings are provided here. It is the
18 * responsibility of the application to know which sizes are supported on
19 * the running system. See mmap(2) man page for details.
20 */
21#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
22#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
23
24#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
25#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
26#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
27#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
28#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
29#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
30#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
31#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
32#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
33#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
34
13#endif /* _UAPI_LINUX_MMAN_H */ 35#endif /* _UAPI_LINUX_MMAN_H */
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 0f22768c0d4d..34a579f806e3 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -284,11 +284,16 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
284 case 0x8d: 284 case 0x8d:
285 if (sib == 0x24 && rex_w && !rex_b && !rex_x) { 285 if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
286 286
287 /* lea disp(%rsp), reg */
288 *type = INSN_STACK; 287 *type = INSN_STACK;
289 op->src.type = OP_SRC_ADD; 288 if (!insn.displacement.value) {
289 /* lea (%rsp), reg */
290 op->src.type = OP_SRC_REG;
291 } else {
292 /* lea disp(%rsp), reg */
293 op->src.type = OP_SRC_ADD;
294 op->src.offset = insn.displacement.value;
295 }
290 op->src.reg = CFI_SP; 296 op->src.reg = CFI_SP;
291 op->src.offset = insn.displacement.value;
292 op->dest.type = OP_DEST_REG; 297 op->dest.type = OP_DEST_REG;
293 op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r]; 298 op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
294 299
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a0c518ecf085..c0e26ad1fa7e 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
267 &insn->immediate, 267 &insn->immediate,
268 &insn->stack_op); 268 &insn->stack_op);
269 if (ret) 269 if (ret)
270 return ret; 270 goto err;
271 271
272 if (!insn->type || insn->type > INSN_LAST) { 272 if (!insn->type || insn->type > INSN_LAST) {
273 WARN_FUNC("invalid instruction type %d", 273 WARN_FUNC("invalid instruction type %d",
274 insn->sec, insn->offset, insn->type); 274 insn->sec, insn->offset, insn->type);
275 return -1; 275 ret = -1;
276 goto err;
276 } 277 }
277 278
278 hash_add(file->insn_hash, &insn->hash, insn->offset); 279 hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
296 } 297 }
297 298
298 return 0; 299 return 0;
300
301err:
302 free(insn);
303 return ret;
299} 304}
300 305
301/* 306/*
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e397453e5a46..63526f4416ea 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command> 11'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
12'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>] 12'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
13 13
14DESCRIPTION 14DESCRIPTION
15----------- 15-----------
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 62072822dc85..627b7cada144 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,34 +1,8 @@
1tools/perf 1tools/perf
2tools/arch/alpha/include/asm/barrier.h 2tools/arch
3tools/arch/arm/include/asm/barrier.h
4tools/arch/arm64/include/asm/barrier.h
5tools/arch/ia64/include/asm/barrier.h
6tools/arch/mips/include/asm/barrier.h
7tools/arch/powerpc/include/asm/barrier.h
8tools/arch/s390/include/asm/barrier.h
9tools/arch/sh/include/asm/barrier.h
10tools/arch/sparc/include/asm/barrier.h
11tools/arch/sparc/include/asm/barrier_32.h
12tools/arch/sparc/include/asm/barrier_64.h
13tools/arch/tile/include/asm/barrier.h
14tools/arch/x86/include/asm/barrier.h
15tools/arch/x86/include/asm/cmpxchg.h
16tools/arch/x86/include/asm/cpufeatures.h
17tools/arch/x86/include/asm/disabled-features.h
18tools/arch/x86/include/asm/required-features.h
19tools/arch/x86/include/uapi/asm/svm.h
20tools/arch/x86/include/uapi/asm/vmx.h
21tools/arch/x86/include/uapi/asm/kvm.h
22tools/arch/x86/include/uapi/asm/kvm_perf.h
23tools/arch/x86/lib/memcpy_64.S
24tools/arch/x86/lib/memset_64.S
25tools/arch/s390/include/uapi/asm/kvm_perf.h
26tools/arch/s390/include/uapi/asm/sie.h
27tools/arch/xtensa/include/asm/barrier.h
28tools/scripts 3tools/scripts
29tools/build 4tools/build
30tools/arch/x86/include/asm/atomic.h 5tools/include
31tools/arch/x86/include/asm/rmwcc.h
32tools/lib/traceevent 6tools/lib/traceevent
33tools/lib/api 7tools/lib/api
34tools/lib/bpf 8tools/lib/bpf
@@ -42,60 +16,3 @@ tools/lib/find_bit.c
42tools/lib/bitmap.c 16tools/lib/bitmap.c
43tools/lib/str_error_r.c 17tools/lib/str_error_r.c
44tools/lib/vsprintf.c 18tools/lib/vsprintf.c
45tools/include/asm/alternative-asm.h
46tools/include/asm/atomic.h
47tools/include/asm/barrier.h
48tools/include/asm/bug.h
49tools/include/asm-generic/atomic-gcc.h
50tools/include/asm-generic/barrier.h
51tools/include/asm-generic/bitops/arch_hweight.h
52tools/include/asm-generic/bitops/atomic.h
53tools/include/asm-generic/bitops/const_hweight.h
54tools/include/asm-generic/bitops/__ffs.h
55tools/include/asm-generic/bitops/__ffz.h
56tools/include/asm-generic/bitops/__fls.h
57tools/include/asm-generic/bitops/find.h
58tools/include/asm-generic/bitops/fls64.h
59tools/include/asm-generic/bitops/fls.h
60tools/include/asm-generic/bitops/hweight.h
61tools/include/asm-generic/bitops.h
62tools/include/linux/atomic.h
63tools/include/linux/bitops.h
64tools/include/linux/compiler.h
65tools/include/linux/compiler-gcc.h
66tools/include/linux/coresight-pmu.h
67tools/include/linux/bug.h
68tools/include/linux/filter.h
69tools/include/linux/hash.h
70tools/include/linux/kernel.h
71tools/include/linux/list.h
72tools/include/linux/log2.h
73tools/include/uapi/asm-generic/fcntl.h
74tools/include/uapi/asm-generic/ioctls.h
75tools/include/uapi/asm-generic/mman-common.h
76tools/include/uapi/asm-generic/mman.h
77tools/include/uapi/drm/drm.h
78tools/include/uapi/drm/i915_drm.h
79tools/include/uapi/linux/bpf.h
80tools/include/uapi/linux/bpf_common.h
81tools/include/uapi/linux/fcntl.h
82tools/include/uapi/linux/hw_breakpoint.h
83tools/include/uapi/linux/kvm.h
84tools/include/uapi/linux/mman.h
85tools/include/uapi/linux/perf_event.h
86tools/include/uapi/linux/sched.h
87tools/include/uapi/linux/stat.h
88tools/include/uapi/linux/vhost.h
89tools/include/uapi/sound/asound.h
90tools/include/linux/poison.h
91tools/include/linux/rbtree.h
92tools/include/linux/rbtree_augmented.h
93tools/include/linux/refcount.h
94tools/include/linux/string.h
95tools/include/linux/stringify.h
96tools/include/linux/types.h
97tools/include/linux/err.h
98tools/include/linux/bitmap.h
99tools/include/linux/time64.h
100tools/arch/*/include/uapi/asm/mman.h
101tools/arch/*/include/uapi/asm/perf_regs.h
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index bd518b623d7a..5bd7b9260cc0 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,5 +1,4 @@
1libperf-y += header.o 1libperf-y += header.o
2libperf-y += sym-handling.o
3libperf-y += kvm-stat.o 2libperf-y += kvm-stat.o
4 3
5libperf-$(CONFIG_DWARF) += dwarf-regs.o 4libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/s390/util/sym-handling.c b/tools/perf/arch/s390/util/sym-handling.c
deleted file mode 100644
index e103f6e46afe..000000000000
--- a/tools/perf/arch/s390/util/sym-handling.c
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Architecture specific ELF symbol handling and relocation mapping.
3 *
4 * Copyright 2017 IBM Corp.
5 * Author(s): Thomas Richter <tmricht@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11
12#include "symbol.h"
13
14#ifdef HAVE_LIBELF_SUPPORT
15bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
16{
17 if (ehdr.e_type == ET_EXEC)
18 return false;
19 return ehdr.e_type == ET_REL || ehdr.e_type == ET_DYN;
20}
21
22void arch__adjust_sym_map_offset(GElf_Sym *sym,
23 GElf_Shdr *shdr __maybe_unused,
24 struct map *map)
25{
26 if (map->type == MAP__FUNCTION)
27 sym->st_value += map->start;
28}
29#endif
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 3d4c3b5e1868..0c977b6e0f8b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample,
586 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); 586 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
587 } 587 }
588 588
589 printf("0x%"PRIx64, from); 589 printf(" 0x%"PRIx64, from);
590 if (PRINT_FIELD(DSO)) { 590 if (PRINT_FIELD(DSO)) {
591 printf("("); 591 printf("(");
592 map__fprintf_dsoname(alf.map, stdout); 592 map__fprintf_dsoname(alf.map, stdout);
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample,
681 if (alt.map && !alt.map->dso->adjust_symbols) 681 if (alt.map && !alt.map->dso->adjust_symbols)
682 to = map__map_ip(alt.map, to); 682 to = map__map_ip(alt.map, to);
683 683
684 printf("0x%"PRIx64, from); 684 printf(" 0x%"PRIx64, from);
685 if (PRINT_FIELD(DSO)) { 685 if (PRINT_FIELD(DSO)) {
686 printf("("); 686 printf("(");
687 map__fprintf_dsoname(alf.map, stdout); 687 map__fprintf_dsoname(alf.map, stdout);
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 462fc755092e..7a84d73324e3 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,6 +10,9 @@
10 10
11. $(dirname $0)/lib/probe.sh 11. $(dirname $0)/lib/probe.sh
12 12
13ld=$(realpath /lib64/ld*.so.* | uniq)
14libc=$(echo $ld | sed 's/ld/libc/g')
15
13trace_libc_inet_pton_backtrace() { 16trace_libc_inet_pton_backtrace() {
14 idx=0 17 idx=0
15 expected[0]="PING.*bytes" 18 expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
18 expected[3]=".*packets transmitted.*" 21 expected[3]=".*packets transmitted.*"
19 expected[4]="rtt min.*" 22 expected[4]="rtt min.*"
20 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" 23 expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
21 expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" 24 expected[6]=".*inet_pton[[:space:]]\($libc\)$"
22 expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" 25 expected[7]="getaddrinfo[[:space:]]\($libc\)$"
23 expected[8]=".*\(.*/bin/ping.*\)$" 26 expected[8]=".*\(.*/bin/ping.*\)$"
24 27
25 perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do 28 perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
35} 38}
36 39
37skip_if_no_perf_probe && \ 40skip_if_no_perf_probe && \
38perf probe -q /lib64/libc-*.so inet_pton && \ 41perf probe -q $libc inet_pton && \
39trace_libc_inet_pton_backtrace 42trace_libc_inet_pton_backtrace
40err=$? 43err=$?
41rm -f ${file} 44rm -f ${file}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ddb2c6fbdf91..db79017a6e56 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
532 532
533void perf_hpp__column_unregister(struct perf_hpp_fmt *format) 533void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
534{ 534{
535 list_del(&format->list); 535 list_del_init(&format->list);
536} 536}
537 537
538void perf_hpp__cancel_cumulate(void) 538void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
606 606
607static void fmt_free(struct perf_hpp_fmt *fmt) 607static void fmt_free(struct perf_hpp_fmt *fmt)
608{ 608{
609 /*
610 * At this point fmt should be completely
611 * unhooked, if not it's a bug.
612 */
613 BUG_ON(!list_empty(&fmt->list));
614 BUG_ON(!list_empty(&fmt->sort_list));
615
609 if (fmt->free) 616 if (fmt->free)
610 fmt->free(fmt); 617 fmt->free(fmt);
611} 618}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 510b513e0f01..a971caf3759d 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -65,8 +65,6 @@ static int parse_callchain_mode(const char *value)
65 callchain_param.mode = CHAIN_FOLDED; 65 callchain_param.mode = CHAIN_FOLDED;
66 return 0; 66 return 0;
67 } 67 }
68
69 pr_err("Invalid callchain mode: %s\n", value);
70 return -1; 68 return -1;
71} 69}
72 70
@@ -82,8 +80,6 @@ static int parse_callchain_order(const char *value)
82 callchain_param.order_set = true; 80 callchain_param.order_set = true;
83 return 0; 81 return 0;
84 } 82 }
85
86 pr_err("Invalid callchain order: %s\n", value);
87 return -1; 83 return -1;
88} 84}
89 85
@@ -105,8 +101,6 @@ static int parse_callchain_sort_key(const char *value)
105 callchain_param.branch_callstack = 1; 101 callchain_param.branch_callstack = 1;
106 return 0; 102 return 0;
107 } 103 }
108
109 pr_err("Invalid callchain sort key: %s\n", value);
110 return -1; 104 return -1;
111} 105}
112 106
@@ -124,8 +118,6 @@ static int parse_callchain_value(const char *value)
124 callchain_param.value = CCVAL_COUNT; 118 callchain_param.value = CCVAL_COUNT;
125 return 0; 119 return 0;
126 } 120 }
127
128 pr_err("Invalid callchain config key: %s\n", value);
129 return -1; 121 return -1;
130} 122}
131 123
@@ -319,12 +311,27 @@ int perf_callchain_config(const char *var, const char *value)
319 311
320 return ret; 312 return ret;
321 } 313 }
322 if (!strcmp(var, "print-type")) 314 if (!strcmp(var, "print-type")){
323 return parse_callchain_mode(value); 315 int ret;
324 if (!strcmp(var, "order")) 316 ret = parse_callchain_mode(value);
325 return parse_callchain_order(value); 317 if (ret == -1)
326 if (!strcmp(var, "sort-key")) 318 pr_err("Invalid callchain mode: %s\n", value);
327 return parse_callchain_sort_key(value); 319 return ret;
320 }
321 if (!strcmp(var, "order")){
322 int ret;
323 ret = parse_callchain_order(value);
324 if (ret == -1)
325 pr_err("Invalid callchain order: %s\n", value);
326 return ret;
327 }
328 if (!strcmp(var, "sort-key")){
329 int ret;
330 ret = parse_callchain_sort_key(value);
331 if (ret == -1)
332 pr_err("Invalid callchain sort key: %s\n", value);
333 return ret;
334 }
328 if (!strcmp(var, "threshold")) { 335 if (!strcmp(var, "threshold")) {
329 callchain_param.min_percent = strtod(value, &endptr); 336 callchain_param.min_percent = strtod(value, &endptr);
330 if (value == endptr) { 337 if (value == endptr) {
@@ -678,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
678{ 685{
679 struct symbol *sym = node->sym; 686 struct symbol *sym = node->sym;
680 u64 left, right; 687 u64 left, right;
688 struct dso *left_dso = NULL;
689 struct dso *right_dso = NULL;
681 690
682 if (callchain_param.key == CCKEY_SRCLINE) { 691 if (callchain_param.key == CCKEY_SRCLINE) {
683 enum match_result match = match_chain_srcline(node, cnode); 692 enum match_result match = match_chain_srcline(node, cnode);
@@ -689,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
689 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { 698 if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) {
690 left = cnode->ms.sym->start; 699 left = cnode->ms.sym->start;
691 right = sym->start; 700 right = sym->start;
701 left_dso = cnode->ms.map->dso;
702 right_dso = node->map->dso;
692 } else { 703 } else {
693 left = cnode->ip; 704 left = cnode->ip;
694 right = node->ip; 705 right = node->ip;
695 } 706 }
696 707
697 if (left == right) { 708 if (left == right && left_dso == right_dso) {
698 if (node->branch) { 709 if (node->branch) {
699 cnode->branch_count++; 710 cnode->branch_count++;
700 711
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 4bb89373eb52..0dccdb89572c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -271,12 +271,17 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
271 return evsel; 271 return evsel;
272} 272}
273 273
274static bool perf_event_can_profile_kernel(void)
275{
276 return geteuid() == 0 || perf_event_paranoid() == -1;
277}
278
274struct perf_evsel *perf_evsel__new_cycles(bool precise) 279struct perf_evsel *perf_evsel__new_cycles(bool precise)
275{ 280{
276 struct perf_event_attr attr = { 281 struct perf_event_attr attr = {
277 .type = PERF_TYPE_HARDWARE, 282 .type = PERF_TYPE_HARDWARE,
278 .config = PERF_COUNT_HW_CPU_CYCLES, 283 .config = PERF_COUNT_HW_CPU_CYCLES,
279 .exclude_kernel = geteuid() != 0, 284 .exclude_kernel = !perf_event_can_profile_kernel(),
280 }; 285 };
281 struct perf_evsel *evsel; 286 struct perf_evsel *evsel;
282 287
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index f6257fb4f08c..39b15968eab1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms)
309static struct perf_evsel * 309static struct perf_evsel *
310__add_event(struct list_head *list, int *idx, 310__add_event(struct list_head *list, int *idx,
311 struct perf_event_attr *attr, 311 struct perf_event_attr *attr,
312 char *name, struct cpu_map *cpus, 312 char *name, struct perf_pmu *pmu,
313 struct list_head *config_terms, bool auto_merge_stats) 313 struct list_head *config_terms, bool auto_merge_stats)
314{ 314{
315 struct perf_evsel *evsel; 315 struct perf_evsel *evsel;
316 struct cpu_map *cpus = pmu ? pmu->cpus : NULL;
316 317
317 event_attr_init(attr); 318 event_attr_init(attr);
318 319
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx,
323 (*idx)++; 324 (*idx)++;
324 evsel->cpus = cpu_map__get(cpus); 325 evsel->cpus = cpu_map__get(cpus);
325 evsel->own_cpus = cpu_map__get(cpus); 326 evsel->own_cpus = cpu_map__get(cpus);
326 evsel->system_wide = !!cpus; 327 evsel->system_wide = pmu ? pmu->is_uncore : false;
327 evsel->auto_merge_stats = auto_merge_stats; 328 evsel->auto_merge_stats = auto_merge_stats;
328 329
329 if (name) 330 if (name)
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
1233 1234
1234 if (!head_config) { 1235 if (!head_config) {
1235 attr.type = pmu->type; 1236 attr.type = pmu->type;
1236 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats); 1237 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
1237 return evsel ? 0 : -ENOMEM; 1238 return evsel ? 0 : -ENOMEM;
1238 } 1239 }
1239 1240
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state,
1254 return -EINVAL; 1255 return -EINVAL;
1255 1256
1256 evsel = __add_event(list, &parse_state->idx, &attr, 1257 evsel = __add_event(list, &parse_state->idx, &attr,
1257 get_config_name(head_config), pmu->cpus, 1258 get_config_name(head_config), pmu,
1258 &config_terms, auto_merge_stats); 1259 &config_terms, auto_merge_stats);
1259 if (evsel) { 1260 if (evsel) {
1260 evsel->unit = info.unit; 1261 evsel->unit = info.unit;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index c42edeac451f..dcfdafdc2f1c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -8,6 +8,9 @@
8 8
9%{ 9%{
10#include <errno.h> 10#include <errno.h>
11#include <sys/types.h>
12#include <sys/stat.h>
13#include <unistd.h>
11#include "../perf.h" 14#include "../perf.h"
12#include "parse-events.h" 15#include "parse-events.h"
13#include "parse-events-bison.h" 16#include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
53 return token; 56 return token;
54} 57}
55 58
56static bool isbpf(yyscan_t scanner) 59static bool isbpf_suffix(char *text)
57{ 60{
58 char *text = parse_events_get_text(scanner);
59 int len = strlen(text); 61 int len = strlen(text);
60 62
61 if (len < 2) 63 if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
68 return false; 70 return false;
69} 71}
70 72
73static bool isbpf(yyscan_t scanner)
74{
75 char *text = parse_events_get_text(scanner);
76 struct stat st;
77
78 if (!isbpf_suffix(text))
79 return false;
80
81 return stat(text, &st) == 0;
82}
83
71/* 84/*
72 * This function is called when the parser gets two kind of input: 85 * This function is called when the parser gets two kind of input:
73 * 86 *
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index ac16a9db1fb5..1c4d7b4e4fb5 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void)
470 closedir(dir); 470 closedir(dir);
471} 471}
472 472
473static struct cpu_map *__pmu_cpumask(const char *path)
474{
475 FILE *file;
476 struct cpu_map *cpus;
477
478 file = fopen(path, "r");
479 if (!file)
480 return NULL;
481
482 cpus = cpu_map__read(file);
483 fclose(file);
484 return cpus;
485}
486
487/*
488 * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64)
489 * may have a "cpus" file.
490 */
491#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
492#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
493
473static struct cpu_map *pmu_cpumask(const char *name) 494static struct cpu_map *pmu_cpumask(const char *name)
474{ 495{
475 struct stat st;
476 char path[PATH_MAX]; 496 char path[PATH_MAX];
477 FILE *file;
478 struct cpu_map *cpus; 497 struct cpu_map *cpus;
479 const char *sysfs = sysfs__mountpoint(); 498 const char *sysfs = sysfs__mountpoint();
480 const char *templates[] = { 499 const char *templates[] = {
481 "%s/bus/event_source/devices/%s/cpumask", 500 CPUS_TEMPLATE_UNCORE,
482 "%s/bus/event_source/devices/%s/cpus", 501 CPUS_TEMPLATE_CPU,
483 NULL 502 NULL
484 }; 503 };
485 const char **template; 504 const char **template;
486 505
@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name)
489 508
490 for (template = templates; *template; template++) { 509 for (template = templates; *template; template++) {
491 snprintf(path, PATH_MAX, *template, sysfs, name); 510 snprintf(path, PATH_MAX, *template, sysfs, name);
492 if (stat(path, &st) == 0) 511 cpus = __pmu_cpumask(path);
493 break; 512 if (cpus)
513 return cpus;
494 } 514 }
495 515
496 if (!*template) 516 return NULL;
497 return NULL; 517}
498 518
499 file = fopen(path, "r"); 519static bool pmu_is_uncore(const char *name)
500 if (!file) 520{
501 return NULL; 521 char path[PATH_MAX];
522 struct cpu_map *cpus;
523 const char *sysfs = sysfs__mountpoint();
502 524
503 cpus = cpu_map__read(file); 525 snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
504 fclose(file); 526 cpus = __pmu_cpumask(path);
505 return cpus; 527 cpu_map__put(cpus);
528
529 return !!cpus;
506} 530}
507 531
508/* 532/*
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name)
617 641
618 pmu->cpus = pmu_cpumask(name); 642 pmu->cpus = pmu_cpumask(name);
619 643
644 pmu->is_uncore = pmu_is_uncore(name);
645
620 INIT_LIST_HEAD(&pmu->format); 646 INIT_LIST_HEAD(&pmu->format);
621 INIT_LIST_HEAD(&pmu->aliases); 647 INIT_LIST_HEAD(&pmu->aliases);
622 list_splice(&format, &pmu->format); 648 list_splice(&format, &pmu->format);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 389e9729331f..fe0de0502ce2 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -22,6 +22,7 @@ struct perf_pmu {
22 char *name; 22 char *name;
23 __u32 type; 23 __u32 type;
24 bool selectable; 24 bool selectable;
25 bool is_uncore;
25 struct perf_event_attr *default_config; 26 struct perf_event_attr *default_config;
26 struct cpu_map *cpus; 27 struct cpu_map *cpus;
27 struct list_head format; /* HEAD struct perf_pmu_format -> list */ 28 struct list_head format; /* HEAD struct perf_pmu_format -> list */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a7ebd9fe8e40..76ab0709a20c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
374 tool->mmap2 = process_event_stub; 374 tool->mmap2 = process_event_stub;
375 if (tool->comm == NULL) 375 if (tool->comm == NULL)
376 tool->comm = process_event_stub; 376 tool->comm = process_event_stub;
377 if (tool->namespaces == NULL)
378 tool->namespaces = process_event_stub;
377 if (tool->fork == NULL) 379 if (tool->fork == NULL)
378 tool->fork = process_event_stub; 380 tool->fork = process_event_stub;
379 if (tool->exit == NULL) 381 if (tool->exit == NULL)
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 5c39f420111e..9cf781f0d8a2 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -810,12 +810,6 @@ static u64 ref_reloc(struct kmap *kmap)
810void __weak arch__sym_update(struct symbol *s __maybe_unused, 810void __weak arch__sym_update(struct symbol *s __maybe_unused,
811 GElf_Sym *sym __maybe_unused) { } 811 GElf_Sym *sym __maybe_unused) { }
812 812
813void __weak arch__adjust_sym_map_offset(GElf_Sym *sym, GElf_Shdr *shdr,
814 struct map *map __maybe_unused)
815{
816 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
817}
818
819int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 813int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
820 struct symsrc *runtime_ss, int kmodule) 814 struct symsrc *runtime_ss, int kmodule)
821{ 815{
@@ -996,7 +990,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
996 990
997 /* Adjust symbol to map to file offset */ 991 /* Adjust symbol to map to file offset */
998 if (adjust_kernel_syms) 992 if (adjust_kernel_syms)
999 arch__adjust_sym_map_offset(&sym, &shdr, map); 993 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1000 994
1001 if (strcmp(section_name, 995 if (strcmp(section_name,
1002 (curr_dso->short_name + 996 (curr_dso->short_name +
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 2bd6a1f01a1c..aad99e7e179b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -344,9 +344,6 @@ int setup_intlist(struct intlist **list, const char *list_str,
344#ifdef HAVE_LIBELF_SUPPORT 344#ifdef HAVE_LIBELF_SUPPORT
345bool elf__needs_adjust_symbols(GElf_Ehdr ehdr); 345bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
346void arch__sym_update(struct symbol *s, GElf_Sym *sym); 346void arch__sym_update(struct symbol *s, GElf_Sym *sym);
347void arch__adjust_sym_map_offset(GElf_Sym *sym,
348 GElf_Shdr *shdr __maybe_unused,
349 struct map *map __maybe_unused);
350#endif 347#endif
351 348
352#define SYMBOL_A 0 349#define SYMBOL_A 0
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 19e5db90394c..6eea7cff3d4e 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -15,9 +15,9 @@
15 15
16#include "syscalltbl.h" 16#include "syscalltbl.h"
17#include <stdlib.h> 17#include <stdlib.h>
18#include <linux/compiler.h>
18 19
19#ifdef HAVE_SYSCALL_TABLE 20#ifdef HAVE_SYSCALL_TABLE
20#include <linux/compiler.h>
21#include <string.h> 21#include <string.h>
22#include "string2.h" 22#include "string2.h"
23#include "util.h" 23#include "util.h"
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
index 4ba726c90870..54af60462130 100644
--- a/tools/perf/util/xyarray.h
+++ b/tools/perf/util/xyarray.h
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
23 23
24static inline int xyarray__max_y(struct xyarray *xy) 24static inline int xyarray__max_y(struct xyarray *xy)
25{ 25{
26 return xy->max_x; 26 return xy->max_y;
27} 27}
28 28
29static inline int xyarray__max_x(struct xyarray *xy) 29static inline int xyarray__max_x(struct xyarray *xy)
30{ 30{
31 return xy->max_y; 31 return xy->max_x;
32} 32}
33 33
34#endif /* _PERF_XYARRAY_H_ */ 34#endif /* _PERF_XYARRAY_H_ */
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 4c5a481a850c..d6e1c02ddcfe 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,7 +26,7 @@ endif
26 26
27ifneq ($(OUTPUT),) 27ifneq ($(OUTPUT),)
28# check that the output directory actually exists 28# check that the output directory actually exists
29OUTDIR := $(realpath $(OUTPUT)) 29OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 30$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
31endif 31endif
32 32
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dafba2c1e7d..bd9c6b31a504 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
92unsigned int crystal_hz; 92unsigned int crystal_hz;
93unsigned long long tsc_hz; 93unsigned long long tsc_hz;
94int base_cpu; 94int base_cpu;
95int do_migrate;
96double discover_bclk(unsigned int family, unsigned int model); 95double discover_bclk(unsigned int family, unsigned int model);
97unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ 96unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
98 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ 97 /* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
303 302
304int cpu_migrate(int cpu) 303int cpu_migrate(int cpu)
305{ 304{
306 if (!do_migrate)
307 return 0;
308
309 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); 305 CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
310 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); 306 CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
311 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) 307 if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
5007 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help 5003 {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
5008 {"Joules", no_argument, 0, 'J'}, 5004 {"Joules", no_argument, 0, 'J'},
5009 {"list", no_argument, 0, 'l'}, 5005 {"list", no_argument, 0, 'l'},
5010 {"migrate", no_argument, 0, 'm'},
5011 {"out", required_argument, 0, 'o'}, 5006 {"out", required_argument, 0, 'o'},
5012 {"quiet", no_argument, 0, 'q'}, 5007 {"quiet", no_argument, 0, 'q'},
5013 {"show", required_argument, 0, 's'}, 5008 {"show", required_argument, 0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
5019 5014
5020 progname = argv[0]; 5015 progname = argv[0];
5021 5016
5022 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v", 5017 while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
5023 long_options, &option_index)) != -1) { 5018 long_options, &option_index)) != -1) {
5024 switch (opt) { 5019 switch (opt) {
5025 case 'a': 5020 case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
5062 list_header_only++; 5057 list_header_only++;
5063 quiet++; 5058 quiet++;
5064 break; 5059 break;
5065 case 'm':
5066 do_migrate = 1;
5067 break;
5068 case 'o': 5060 case 'o':
5069 outf = fopen_or_die(optarg, "w"); 5061 outf = fopen_or_die(optarg, "w");
5070 break; 5062 break;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 9dc8f078a83c..1e8b6116ba3c 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -1,7 +1,7 @@
1ifneq ($(O),) 1ifneq ($(O),)
2ifeq ($(origin O), command line) 2ifeq ($(origin O), command line)
3 ABSOLUTE_O := $(realpath $(O)) 3 dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
4 dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist)) 4 ABSOLUTE_O := $(shell cd $(O) ; pwd)
5 OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) 5 OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
6 COMMAND_O := O=$(ABSOLUTE_O) 6 COMMAND_O := O=$(ABSOLUTE_O)
7ifeq ($(objtree),) 7ifeq ($(objtree),)
@@ -12,7 +12,7 @@ endif
12 12
13# check that the output directory actually exists 13# check that the output directory actually exists
14ifneq ($(OUTPUT),) 14ifneq ($(OUTPUT),)
15OUTDIR := $(realpath $(OUTPUT)) 15OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
16$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 16$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
17endif 17endif
18 18
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 26ce4f7168be..ff805643b5f7 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -52,6 +52,10 @@ override LDFLAGS =
52override MAKEFLAGS = 52override MAKEFLAGS =
53endif 53endif
54 54
55ifneq ($(KBUILD_SRC),)
56override LDFLAGS =
57endif
58
55BUILD := $(O) 59BUILD := $(O)
56ifndef BUILD 60ifndef BUILD
57 BUILD := $(KBUILD_OUTPUT) 61 BUILD := $(KBUILD_OUTPUT)
@@ -62,32 +66,32 @@ endif
62 66
63export BUILD 67export BUILD
64all: 68all:
65 for TARGET in $(TARGETS); do \ 69 @for TARGET in $(TARGETS); do \
66 BUILD_TARGET=$$BUILD/$$TARGET; \ 70 BUILD_TARGET=$$BUILD/$$TARGET; \
67 mkdir $$BUILD_TARGET -p; \ 71 mkdir $$BUILD_TARGET -p; \
68 make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ 72 make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
69 done; 73 done;
70 74
71run_tests: all 75run_tests: all
72 for TARGET in $(TARGETS); do \ 76 @for TARGET in $(TARGETS); do \
73 BUILD_TARGET=$$BUILD/$$TARGET; \ 77 BUILD_TARGET=$$BUILD/$$TARGET; \
74 make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ 78 make OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
75 done; 79 done;
76 80
77hotplug: 81hotplug:
78 for TARGET in $(TARGETS_HOTPLUG); do \ 82 @for TARGET in $(TARGETS_HOTPLUG); do \
79 BUILD_TARGET=$$BUILD/$$TARGET; \ 83 BUILD_TARGET=$$BUILD/$$TARGET; \
80 make OUTPUT=$$BUILD_TARGET -C $$TARGET;\ 84 make OUTPUT=$$BUILD_TARGET -C $$TARGET;\
81 done; 85 done;
82 86
83run_hotplug: hotplug 87run_hotplug: hotplug
84 for TARGET in $(TARGETS_HOTPLUG); do \ 88 @for TARGET in $(TARGETS_HOTPLUG); do \
85 BUILD_TARGET=$$BUILD/$$TARGET; \ 89 BUILD_TARGET=$$BUILD/$$TARGET; \
86 make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\ 90 make OUTPUT=$$BUILD_TARGET -C $$TARGET run_full_test;\
87 done; 91 done;
88 92
89clean_hotplug: 93clean_hotplug:
90 for TARGET in $(TARGETS_HOTPLUG); do \ 94 @for TARGET in $(TARGETS_HOTPLUG); do \
91 BUILD_TARGET=$$BUILD/$$TARGET; \ 95 BUILD_TARGET=$$BUILD/$$TARGET; \
92 make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ 96 make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
93 done; 97 done;
@@ -103,7 +107,7 @@ install:
103ifdef INSTALL_PATH 107ifdef INSTALL_PATH
104 @# Ask all targets to install their files 108 @# Ask all targets to install their files
105 mkdir -p $(INSTALL_PATH) 109 mkdir -p $(INSTALL_PATH)
106 for TARGET in $(TARGETS); do \ 110 @for TARGET in $(TARGETS); do \
107 BUILD_TARGET=$$BUILD/$$TARGET; \ 111 BUILD_TARGET=$$BUILD/$$TARGET; \
108 make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \ 112 make OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
109 done; 113 done;
@@ -128,7 +132,7 @@ else
128endif 132endif
129 133
130clean: 134clean:
131 for TARGET in $(TARGETS); do \ 135 @for TARGET in $(TARGETS); do \
132 BUILD_TARGET=$$BUILD/$$TARGET; \ 136 BUILD_TARGET=$$BUILD/$$TARGET; \
133 make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\ 137 make OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
134 done; 138 done;
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 36fb9161b34a..b2e02bdcd098 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
65static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, 65static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
66 int optlen) = 66 int optlen) =
67 (void *) BPF_FUNC_setsockopt; 67 (void *) BPF_FUNC_setsockopt;
68static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = 68static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
69 (void *) BPF_FUNC_sk_redirect_map; 69 (void *) BPF_FUNC_sk_redirect_map;
70static int (*bpf_sock_map_update)(void *map, void *key, void *value, 70static int (*bpf_sock_map_update)(void *map, void *key, void *value,
71 unsigned long long flags) = 71 unsigned long long flags) =
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 20ecbaa0d85d..6c53a8906eff 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -12,6 +12,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
12 unsigned int start, end, possible_cpus = 0; 12 unsigned int start, end, possible_cpus = 0;
13 char buff[128]; 13 char buff[128];
14 FILE *fp; 14 FILE *fp;
15 int n;
15 16
16 fp = fopen(fcpu, "r"); 17 fp = fopen(fcpu, "r");
17 if (!fp) { 18 if (!fp) {
@@ -20,17 +21,17 @@ static inline unsigned int bpf_num_possible_cpus(void)
20 } 21 }
21 22
22 while (fgets(buff, sizeof(buff), fp)) { 23 while (fgets(buff, sizeof(buff), fp)) {
23 if (sscanf(buff, "%u-%u", &start, &end) == 2) { 24 n = sscanf(buff, "%u-%u", &start, &end);
24 possible_cpus = start == 0 ? end + 1 : 0; 25 if (n == 0) {
25 break; 26 printf("Failed to retrieve # possible CPUs!\n");
27 exit(1);
28 } else if (n == 1) {
29 end = start;
26 } 30 }
31 possible_cpus = start == 0 ? end + 1 : 0;
32 break;
27 } 33 }
28
29 fclose(fp); 34 fclose(fp);
30 if (!possible_cpus) {
31 printf("Failed to retrieve # possible CPUs!\n");
32 exit(1);
33 }
34 35
35 return possible_cpus; 36 return possible_cpus;
36} 37}
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
index 9b99bd10807d..2cd2d552938b 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk); 61 bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
62 62
63 if (!map) 63 if (!map)
64 return bpf_sk_redirect_map(&sock_map_rx, sk, 0); 64 return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
65 return bpf_sk_redirect_map(&sock_map_tx, sk, 0); 65 return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
66} 66}
67 67
68char _license[] SEC("license") = "GPL"; 68char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index fe3a443a1102..50ce52d2013d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc; 466 int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; 467 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
468 int ports[] = {50200, 50201, 50202, 50204}; 468 int ports[] = {50200, 50201, 50202, 50204};
469 int err, i, fd, sfd[6] = {0xdeadbeef}; 469 int err, i, fd, udp, sfd[6] = {0xdeadbeef};
470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; 470 u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
471 int parse_prog, verdict_prog; 471 int parse_prog, verdict_prog;
472 struct sockaddr_in addr; 472 struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
548 goto out_sockmap; 548 goto out_sockmap;
549 } 549 }
550 550
551 /* Test update with unsupported UDP socket */
552 udp = socket(AF_INET, SOCK_DGRAM, 0);
553 i = 0;
554 err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
555 if (!err) {
556 printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
557 i, udp);
558 goto out_sockmap;
559 }
560
551 /* Test update without programs */ 561 /* Test update without programs */
552 for (i = 0; i < 6; i++) { 562 for (i = 0; i < 6; i++) {
553 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); 563 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 26f3250bdcd2..64ae21f64489 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
1130 .errstr = "invalid bpf_context access", 1130 .errstr = "invalid bpf_context access",
1131 }, 1131 },
1132 { 1132 {
1133 "check skb->mark is writeable by SK_SKB", 1133 "invalid access of skb->mark for SK_SKB",
1134 .insns = {
1135 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1136 offsetof(struct __sk_buff, mark)),
1137 BPF_EXIT_INSN(),
1138 },
1139 .result = REJECT,
1140 .prog_type = BPF_PROG_TYPE_SK_SKB,
1141 .errstr = "invalid bpf_context access",
1142 },
1143 {
1144 "check skb->mark is not writeable by SK_SKB",
1134 .insns = { 1145 .insns = {
1135 BPF_MOV64_IMM(BPF_REG_0, 0), 1146 BPF_MOV64_IMM(BPF_REG_0, 0),
1136 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 1147 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1137 offsetof(struct __sk_buff, mark)), 1148 offsetof(struct __sk_buff, mark)),
1138 BPF_EXIT_INSN(), 1149 BPF_EXIT_INSN(),
1139 }, 1150 },
1140 .result = ACCEPT, 1151 .result = REJECT,
1141 .prog_type = BPF_PROG_TYPE_SK_SKB, 1152 .prog_type = BPF_PROG_TYPE_SK_SKB,
1153 .errstr = "invalid bpf_context access",
1142 }, 1154 },
1143 { 1155 {
1144 "check skb->tc_index is writeable by SK_SKB", 1156 "check skb->tc_index is writeable by SK_SKB",
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = {
6645 .errstr = "BPF_END uses reserved fields", 6657 .errstr = "BPF_END uses reserved fields",
6646 .result = REJECT, 6658 .result = REJECT,
6647 }, 6659 },
6660 {
6661 "arithmetic ops make PTR_TO_CTX unusable",
6662 .insns = {
6663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
6664 offsetof(struct __sk_buff, data) -
6665 offsetof(struct __sk_buff, mark)),
6666 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6667 offsetof(struct __sk_buff, mark)),
6668 BPF_EXIT_INSN(),
6669 },
6670 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
6671 .result = REJECT,
6672 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6673 },
6674 {
6675 "XDP pkt read, pkt_end mangling, bad access 1",
6676 .insns = {
6677 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6678 offsetof(struct xdp_md, data)),
6679 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6680 offsetof(struct xdp_md, data_end)),
6681 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6682 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6683 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
6684 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6685 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6686 BPF_MOV64_IMM(BPF_REG_0, 0),
6687 BPF_EXIT_INSN(),
6688 },
6689 .errstr = "R1 offset is outside of the packet",
6690 .result = REJECT,
6691 .prog_type = BPF_PROG_TYPE_XDP,
6692 },
6693 {
6694 "XDP pkt read, pkt_end mangling, bad access 2",
6695 .insns = {
6696 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6697 offsetof(struct xdp_md, data)),
6698 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6699 offsetof(struct xdp_md, data_end)),
6700 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6702 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
6703 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6704 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6705 BPF_MOV64_IMM(BPF_REG_0, 0),
6706 BPF_EXIT_INSN(),
6707 },
6708 .errstr = "R1 offset is outside of the packet",
6709 .result = REJECT,
6710 .prog_type = BPF_PROG_TYPE_XDP,
6711 },
6712 {
6713 "XDP pkt read, pkt_data' > pkt_end, good access",
6714 .insns = {
6715 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6716 offsetof(struct xdp_md, data)),
6717 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6718 offsetof(struct xdp_md, data_end)),
6719 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6720 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6721 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6722 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6723 BPF_MOV64_IMM(BPF_REG_0, 0),
6724 BPF_EXIT_INSN(),
6725 },
6726 .result = ACCEPT,
6727 .prog_type = BPF_PROG_TYPE_XDP,
6728 },
6729 {
6730 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
6731 .insns = {
6732 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6733 offsetof(struct xdp_md, data)),
6734 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6735 offsetof(struct xdp_md, data_end)),
6736 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6738 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
6739 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6740 BPF_MOV64_IMM(BPF_REG_0, 0),
6741 BPF_EXIT_INSN(),
6742 },
6743 .errstr = "R1 offset is outside of the packet",
6744 .result = REJECT,
6745 .prog_type = BPF_PROG_TYPE_XDP,
6746 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6747 },
6748 {
6749 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
6750 .insns = {
6751 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6752 offsetof(struct xdp_md, data)),
6753 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6754 offsetof(struct xdp_md, data_end)),
6755 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6756 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6757 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
6758 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6759 BPF_MOV64_IMM(BPF_REG_0, 0),
6760 BPF_EXIT_INSN(),
6761 },
6762 .errstr = "R1 offset is outside of the packet",
6763 .result = REJECT,
6764 .prog_type = BPF_PROG_TYPE_XDP,
6765 },
6766 {
6767 "XDP pkt read, pkt_end > pkt_data', good access",
6768 .insns = {
6769 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6770 offsetof(struct xdp_md, data)),
6771 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6772 offsetof(struct xdp_md, data_end)),
6773 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6775 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6776 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6777 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6778 BPF_MOV64_IMM(BPF_REG_0, 0),
6779 BPF_EXIT_INSN(),
6780 },
6781 .result = ACCEPT,
6782 .prog_type = BPF_PROG_TYPE_XDP,
6783 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6784 },
6785 {
6786 "XDP pkt read, pkt_end > pkt_data', bad access 1",
6787 .insns = {
6788 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6789 offsetof(struct xdp_md, data)),
6790 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6791 offsetof(struct xdp_md, data_end)),
6792 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6794 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6795 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6796 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6798 BPF_EXIT_INSN(),
6799 },
6800 .errstr = "R1 offset is outside of the packet",
6801 .result = REJECT,
6802 .prog_type = BPF_PROG_TYPE_XDP,
6803 },
6804 {
6805 "XDP pkt read, pkt_end > pkt_data', bad access 2",
6806 .insns = {
6807 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6808 offsetof(struct xdp_md, data)),
6809 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6810 offsetof(struct xdp_md, data_end)),
6811 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6812 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6813 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
6814 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6815 BPF_MOV64_IMM(BPF_REG_0, 0),
6816 BPF_EXIT_INSN(),
6817 },
6818 .errstr = "R1 offset is outside of the packet",
6819 .result = REJECT,
6820 .prog_type = BPF_PROG_TYPE_XDP,
6821 },
6822 {
6823 "XDP pkt read, pkt_data' < pkt_end, good access",
6824 .insns = {
6825 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6826 offsetof(struct xdp_md, data)),
6827 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6828 offsetof(struct xdp_md, data_end)),
6829 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6831 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6832 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6833 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6834 BPF_MOV64_IMM(BPF_REG_0, 0),
6835 BPF_EXIT_INSN(),
6836 },
6837 .result = ACCEPT,
6838 .prog_type = BPF_PROG_TYPE_XDP,
6839 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6840 },
6841 {
6842 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
6843 .insns = {
6844 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6845 offsetof(struct xdp_md, data)),
6846 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6847 offsetof(struct xdp_md, data_end)),
6848 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6850 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6851 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6852 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6853 BPF_MOV64_IMM(BPF_REG_0, 0),
6854 BPF_EXIT_INSN(),
6855 },
6856 .errstr = "R1 offset is outside of the packet",
6857 .result = REJECT,
6858 .prog_type = BPF_PROG_TYPE_XDP,
6859 },
6860 {
6861 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
6862 .insns = {
6863 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6864 offsetof(struct xdp_md, data)),
6865 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6866 offsetof(struct xdp_md, data_end)),
6867 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6869 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
6870 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6871 BPF_MOV64_IMM(BPF_REG_0, 0),
6872 BPF_EXIT_INSN(),
6873 },
6874 .errstr = "R1 offset is outside of the packet",
6875 .result = REJECT,
6876 .prog_type = BPF_PROG_TYPE_XDP,
6877 },
6878 {
6879 "XDP pkt read, pkt_end < pkt_data', good access",
6880 .insns = {
6881 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6882 offsetof(struct xdp_md, data)),
6883 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6884 offsetof(struct xdp_md, data_end)),
6885 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6886 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6887 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6888 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6889 BPF_MOV64_IMM(BPF_REG_0, 0),
6890 BPF_EXIT_INSN(),
6891 },
6892 .result = ACCEPT,
6893 .prog_type = BPF_PROG_TYPE_XDP,
6894 },
6895 {
6896 "XDP pkt read, pkt_end < pkt_data', bad access 1",
6897 .insns = {
6898 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6899 offsetof(struct xdp_md, data)),
6900 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6901 offsetof(struct xdp_md, data_end)),
6902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6904 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
6905 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
6906 BPF_MOV64_IMM(BPF_REG_0, 0),
6907 BPF_EXIT_INSN(),
6908 },
6909 .errstr = "R1 offset is outside of the packet",
6910 .result = REJECT,
6911 .prog_type = BPF_PROG_TYPE_XDP,
6912 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6913 },
6914 {
6915 "XDP pkt read, pkt_end < pkt_data', bad access 2",
6916 .insns = {
6917 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6918 offsetof(struct xdp_md, data)),
6919 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6920 offsetof(struct xdp_md, data_end)),
6921 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6923 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
6924 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6925 BPF_MOV64_IMM(BPF_REG_0, 0),
6926 BPF_EXIT_INSN(),
6927 },
6928 .errstr = "R1 offset is outside of the packet",
6929 .result = REJECT,
6930 .prog_type = BPF_PROG_TYPE_XDP,
6931 },
6932 {
6933 "XDP pkt read, pkt_data' >= pkt_end, good access",
6934 .insns = {
6935 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6936 offsetof(struct xdp_md, data)),
6937 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6938 offsetof(struct xdp_md, data_end)),
6939 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6941 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6942 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6943 BPF_MOV64_IMM(BPF_REG_0, 0),
6944 BPF_EXIT_INSN(),
6945 },
6946 .result = ACCEPT,
6947 .prog_type = BPF_PROG_TYPE_XDP,
6948 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6949 },
6950 {
6951 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
6952 .insns = {
6953 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6954 offsetof(struct xdp_md, data)),
6955 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6956 offsetof(struct xdp_md, data_end)),
6957 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6958 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6959 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
6960 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6961 BPF_MOV64_IMM(BPF_REG_0, 0),
6962 BPF_EXIT_INSN(),
6963 },
6964 .errstr = "R1 offset is outside of the packet",
6965 .result = REJECT,
6966 .prog_type = BPF_PROG_TYPE_XDP,
6967 },
6968 {
6969 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
6970 .insns = {
6971 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6972 offsetof(struct xdp_md, data)),
6973 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6974 offsetof(struct xdp_md, data_end)),
6975 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6976 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6977 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
6978 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
6979 BPF_MOV64_IMM(BPF_REG_0, 0),
6980 BPF_EXIT_INSN(),
6981 },
6982 .errstr = "R1 offset is outside of the packet",
6983 .result = REJECT,
6984 .prog_type = BPF_PROG_TYPE_XDP,
6985 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6986 },
6987 {
6988 "XDP pkt read, pkt_end >= pkt_data', good access",
6989 .insns = {
6990 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
6991 offsetof(struct xdp_md, data)),
6992 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
6993 offsetof(struct xdp_md, data_end)),
6994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
6995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
6996 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
6997 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
6998 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
6999 BPF_MOV64_IMM(BPF_REG_0, 0),
7000 BPF_EXIT_INSN(),
7001 },
7002 .result = ACCEPT,
7003 .prog_type = BPF_PROG_TYPE_XDP,
7004 },
7005 {
7006 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7007 .insns = {
7008 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7009 offsetof(struct xdp_md, data)),
7010 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7011 offsetof(struct xdp_md, data_end)),
7012 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7013 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7014 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7015 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7016 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7017 BPF_MOV64_IMM(BPF_REG_0, 0),
7018 BPF_EXIT_INSN(),
7019 },
7020 .errstr = "R1 offset is outside of the packet",
7021 .result = REJECT,
7022 .prog_type = BPF_PROG_TYPE_XDP,
7023 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7024 },
7025 {
7026 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7027 .insns = {
7028 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7029 offsetof(struct xdp_md, data)),
7030 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7031 offsetof(struct xdp_md, data_end)),
7032 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7034 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7035 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7036 BPF_MOV64_IMM(BPF_REG_0, 0),
7037 BPF_EXIT_INSN(),
7038 },
7039 .errstr = "R1 offset is outside of the packet",
7040 .result = REJECT,
7041 .prog_type = BPF_PROG_TYPE_XDP,
7042 },
7043 {
7044 "XDP pkt read, pkt_data' <= pkt_end, good access",
7045 .insns = {
7046 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7047 offsetof(struct xdp_md, data)),
7048 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7049 offsetof(struct xdp_md, data_end)),
7050 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7051 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7052 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7053 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7055 BPF_MOV64_IMM(BPF_REG_0, 0),
7056 BPF_EXIT_INSN(),
7057 },
7058 .result = ACCEPT,
7059 .prog_type = BPF_PROG_TYPE_XDP,
7060 },
7061 {
7062 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7063 .insns = {
7064 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7065 offsetof(struct xdp_md, data)),
7066 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7067 offsetof(struct xdp_md, data_end)),
7068 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7070 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7071 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7072 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7073 BPF_MOV64_IMM(BPF_REG_0, 0),
7074 BPF_EXIT_INSN(),
7075 },
7076 .errstr = "R1 offset is outside of the packet",
7077 .result = REJECT,
7078 .prog_type = BPF_PROG_TYPE_XDP,
7079 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7080 },
7081 {
7082 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7083 .insns = {
7084 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7085 offsetof(struct xdp_md, data)),
7086 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7087 offsetof(struct xdp_md, data_end)),
7088 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7090 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7091 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7092 BPF_MOV64_IMM(BPF_REG_0, 0),
7093 BPF_EXIT_INSN(),
7094 },
7095 .errstr = "R1 offset is outside of the packet",
7096 .result = REJECT,
7097 .prog_type = BPF_PROG_TYPE_XDP,
7098 },
7099 {
7100 "XDP pkt read, pkt_end <= pkt_data', good access",
7101 .insns = {
7102 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7103 offsetof(struct xdp_md, data)),
7104 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7105 offsetof(struct xdp_md, data_end)),
7106 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7107 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7108 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7109 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7110 BPF_MOV64_IMM(BPF_REG_0, 0),
7111 BPF_EXIT_INSN(),
7112 },
7113 .result = ACCEPT,
7114 .prog_type = BPF_PROG_TYPE_XDP,
7115 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7116 },
7117 {
7118 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7119 .insns = {
7120 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7121 offsetof(struct xdp_md, data)),
7122 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7123 offsetof(struct xdp_md, data_end)),
7124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7125 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7126 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7127 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7128 BPF_MOV64_IMM(BPF_REG_0, 0),
7129 BPF_EXIT_INSN(),
7130 },
7131 .errstr = "R1 offset is outside of the packet",
7132 .result = REJECT,
7133 .prog_type = BPF_PROG_TYPE_XDP,
7134 },
7135 {
7136 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7137 .insns = {
7138 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7139 offsetof(struct xdp_md, data)),
7140 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7141 offsetof(struct xdp_md, data_end)),
7142 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7143 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7144 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7145 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7146 BPF_MOV64_IMM(BPF_REG_0, 0),
7147 BPF_EXIT_INSN(),
7148 },
7149 .errstr = "R1 offset is outside of the packet",
7150 .result = REJECT,
7151 .prog_type = BPF_PROG_TYPE_XDP,
7152 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7153 },
6648}; 7154};
6649 7155
6650static int probe_filter_length(const struct bpf_insn *fp) 7156static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 6b214b7b10fb..247b0a1899d7 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -2,14 +2,14 @@
2uname_M := $(shell uname -m 2>/dev/null || echo not) 2uname_M := $(shell uname -m 2>/dev/null || echo not)
3ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) 3ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
4 4
5TEST_GEN_PROGS := step_after_suspend_test
6
5ifeq ($(ARCH),x86) 7ifeq ($(ARCH),x86)
6TEST_GEN_PROGS := breakpoint_test 8TEST_GEN_PROGS += breakpoint_test
7endif 9endif
8ifneq (,$(filter $(ARCH),aarch64 arm64)) 10ifneq (,$(filter $(ARCH),aarch64 arm64))
9TEST_GEN_PROGS := breakpoint_test_arm64 11TEST_GEN_PROGS += breakpoint_test_arm64
10endif 12endif
11 13
12TEST_GEN_PROGS += step_after_suspend_test
13
14include ../lib.mk 14include ../lib.mk
15 15
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
index 2a1cb9908746..a4fd4c851a5b 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -1,6 +1,8 @@
1#!/bin/sh 1#!/bin/sh
2# description: Register/unregister many kprobe events 2# description: Register/unregister many kprobe events
3 3
4[ -f kprobe_events ] || exit_unsupported # this is configurable
5
4# ftrace fentry skip size depends on the machine architecture. 6# ftrace fentry skip size depends on the machine architecture.
5# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le 7# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le
6case `uname -m` in 8case `uname -m` in
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index 7c647f619d63..f0c0369ccb79 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -7,14 +7,17 @@ TEST_PROGS := run.sh
7include ../lib.mk 7include ../lib.mk
8 8
9all: 9all:
10 for DIR in $(SUBDIRS); do \ 10 @for DIR in $(SUBDIRS); do \
11 BUILD_TARGET=$(OUTPUT)/$$DIR; \ 11 BUILD_TARGET=$(OUTPUT)/$$DIR; \
12 mkdir $$BUILD_TARGET -p; \ 12 mkdir $$BUILD_TARGET -p; \
13 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ 13 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
14 if [ -e $$DIR/$(TEST_PROGS) ]; then
15 rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/;
16 fi
14 done 17 done
15 18
16override define RUN_TESTS 19override define RUN_TESTS
17 $(OUTPUT)/run.sh 20 @cd $(OUTPUT); ./run.sh
18endef 21endef
19 22
20override define INSTALL_RULE 23override define INSTALL_RULE
@@ -33,7 +36,7 @@ override define EMIT_TESTS
33endef 36endef
34 37
35override define CLEAN 38override define CLEAN
36 for DIR in $(SUBDIRS); do \ 39 @for DIR in $(SUBDIRS); do \
37 BUILD_TARGET=$(OUTPUT)/$$DIR; \ 40 BUILD_TARGET=$(OUTPUT)/$$DIR; \
38 mkdir $$BUILD_TARGET -p; \ 41 mkdir $$BUILD_TARGET -p; \
39 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ 42 make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
index 849a90ffe8dd..a97e24edde39 100644
--- a/tools/testing/selftests/intel_pstate/Makefile
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -1,7 +1,9 @@
1CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE 1CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
2LDLIBS := $(LDLIBS) -lm 2LDLIBS := $(LDLIBS) -lm
3 3
4ifeq (,$(filter $(ARCH),x86))
4TEST_GEN_FILES := msr aperf 5TEST_GEN_FILES := msr aperf
6endif
5 7
6TEST_PROGS := run.sh 8TEST_PROGS := run.sh
7 9
diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh
index 7868c106b8b1..d3ab48f91cd6 100755
--- a/tools/testing/selftests/intel_pstate/run.sh
+++ b/tools/testing/selftests/intel_pstate/run.sh
@@ -29,13 +29,12 @@
29 29
30EVALUATE_ONLY=0 30EVALUATE_ONLY=0
31 31
32max_cpus=$(($(nproc)-1)) 32if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then
33 echo "$0 # Skipped: Test can only run on x86 architectures."
34 exit 0
35fi
33 36
34# compile programs 37max_cpus=$(($(nproc)-1))
35gcc aperf.c -Wall -D_GNU_SOURCE -o aperf -lm
36[ $? -ne 0 ] && echo "Problem compiling aperf.c." && exit 1
37gcc -o msr msr.c -lm
38[ $? -ne 0 ] && echo "Problem compiling msr.c." && exit 1
39 38
40function run_test () { 39function run_test () {
41 40
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 693616651da5..f65886af7c0c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -6,7 +6,14 @@ ifeq (0,$(MAKELEVEL))
6OUTPUT := $(shell pwd) 6OUTPUT := $(shell pwd)
7endif 7endif
8 8
9# The following are built by lib.mk common compile rules.
10# TEST_CUSTOM_PROGS should be used by tests that require
11# custom build rule and prevent common build rule use.
12# TEST_PROGS are for test shell scripts.
13# TEST_CUSTOM_PROGS and TEST_PROGS will be run by common run_tests
14# and install targets. Common clean doesn't touch them.
9TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS)) 15TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
16TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
10TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) 17TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
11 18
12all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) 19all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
@@ -20,17 +27,28 @@ define RUN_TESTS
20 test_num=`echo $$test_num+1 | bc`; \ 27 test_num=`echo $$test_num+1 | bc`; \
21 echo "selftests: $$BASENAME_TEST"; \ 28 echo "selftests: $$BASENAME_TEST"; \
22 echo "========================================"; \ 29 echo "========================================"; \
23 if [ ! -x $$BASENAME_TEST ]; then \ 30 if [ ! -x $$TEST ]; then \
24 echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\ 31 echo "selftests: Warning: file $$BASENAME_TEST is not executable, correct this.";\
25 echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \ 32 echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; \
26 else \ 33 else \
27 cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\ 34 cd `dirname $$TEST` > /dev/null; (./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo "ok 1..$$test_num selftests: $$BASENAME_TEST [PASS]") || echo "not ok 1..$$test_num selftests: $$BASENAME_TEST [FAIL]"; cd - > /dev/null;\
28 fi; \ 35 fi; \
29 done; 36 done;
30endef 37endef
31 38
32run_tests: all 39run_tests: all
33 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_PROGS)) 40ifneq ($(KBUILD_SRC),)
41 @if [ "X$(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)" != "X" ]; then
42 @rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT)
43 fi
44 @if [ "X$(TEST_PROGS)" != "X" ]; then
45 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS))
46 else
47 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS))
48 fi
49else
50 $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
51endif
34 52
35define INSTALL_RULE 53define INSTALL_RULE
36 @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \ 54 @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
@@ -38,10 +56,10 @@ define INSTALL_RULE
38 echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \ 56 echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
39 rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \ 57 rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
40 fi 58 fi
41 @if [ "X$(TEST_GEN_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \ 59 @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
42 mkdir -p ${INSTALL_PATH}; \ 60 mkdir -p ${INSTALL_PATH}; \
43 echo "rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \ 61 echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
44 rsync -a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \ 62 rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
45 fi 63 fi
46endef 64endef
47 65
@@ -53,15 +71,20 @@ else
53endif 71endif
54 72
55define EMIT_TESTS 73define EMIT_TESTS
56 @for TEST in $(TEST_GEN_PROGS) $(TEST_PROGS); do \ 74 @for TEST in $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS); do \
57 BASENAME_TEST=`basename $$TEST`; \ 75 BASENAME_TEST=`basename $$TEST`; \
58 echo "(./$$BASENAME_TEST && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \ 76 echo "(./$$BASENAME_TEST > /tmp/$$BASENAME_TEST 2>&1 && echo \"selftests: $$BASENAME_TEST [PASS]\") || echo \"selftests: $$BASENAME_TEST [FAIL]\""; \
59 done; 77 done;
60endef 78endef
61 79
62emit_tests: 80emit_tests:
63 $(EMIT_TESTS) 81 $(EMIT_TESTS)
64 82
83# define if isn't already. It is undefined in make O= case.
84ifeq ($(RM),)
85RM := rm -f
86endif
87
65define CLEAN 88define CLEAN
66 $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN) 89 $(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
67endef 90endef
@@ -69,6 +92,15 @@ endef
69clean: 92clean:
70 $(CLEAN) 93 $(CLEAN)
71 94
95# When make O= with kselftest target from main level
96# the following aren't defined.
97#
98ifneq ($(KBUILD_SRC),)
99LINK.c = $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH)
100COMPILE.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c
101LINK.S = $(CC) $(ASFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH)
102endif
103
72$(OUTPUT)/%:%.c 104$(OUTPUT)/%:%.c
73 $(LINK.c) $^ $(LDLIBS) -o $@ 105 $(LINK.c) $^ $(LDLIBS) -o $@
74 106
diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh
index daabb350697c..daabb350697c 100644..100755
--- a/tools/testing/selftests/memfd/run_tests.sh
+++ b/tools/testing/selftests/memfd/run_tests.sh
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
index 79a664aeb8d7..152823b6cb21 100644
--- a/tools/testing/selftests/mqueue/Makefile
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests
5include ../lib.mk 5include ../lib.mk
6 6
7override define RUN_TESTS 7override define RUN_TESTS
8 @./mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" 8 @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]"
9 @./mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" 9 @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]"
10endef 10endef
11 11
12override define EMIT_TESTS 12override define EMIT_TESTS
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 9801253e4802..c612d6e38c62 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -6,3 +6,4 @@ reuseport_bpf
6reuseport_bpf_cpu 6reuseport_bpf_cpu
7reuseport_bpf_numa 7reuseport_bpf_numa
8reuseport_dualstack 8reuseport_dualstack
9reuseaddr_conflict
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index de1f5772b878..d86bca991f45 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,9 +5,9 @@ CFLAGS += -I../../../../usr/include/
5 5
6TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh 6TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
7TEST_GEN_FILES = socket 7TEST_GEN_FILES = socket
8TEST_GEN_FILES += psock_fanout psock_tpacket 8TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
9TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa 9TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
10TEST_GEN_FILES += reuseport_dualstack msg_zerocopy 10TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict
11 11
12include ../lib.mk 12include ../lib.mk
13 13
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 40232af5b023..3ab6ec403905 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -55,7 +55,7 @@
55#include <unistd.h> 55#include <unistd.h>
56 56
57#ifndef SO_EE_ORIGIN_ZEROCOPY 57#ifndef SO_EE_ORIGIN_ZEROCOPY
58#define SO_EE_ORIGIN_ZEROCOPY SO_EE_ORIGIN_UPAGE 58#define SO_EE_ORIGIN_ZEROCOPY 5
59#endif 59#endif
60 60
61#ifndef SO_ZEROCOPY 61#ifndef SO_ZEROCOPY
diff --git a/tools/testing/selftests/net/netdevice.sh b/tools/testing/selftests/net/netdevice.sh
index 4e00568d70c2..90cb903c3381 100755
--- a/tools/testing/selftests/net/netdevice.sh
+++ b/tools/testing/selftests/net/netdevice.sh
@@ -178,7 +178,7 @@ if [ "$(id -u)" -ne 0 ];then
178 exit 0 178 exit 0
179fi 179fi
180 180
181ip -Version 2>/dev/null >/dev/null 181ip link show 2>/dev/null >/dev/null
182if [ $? -ne 0 ];then 182if [ $? -ne 0 ];then
183 echo "SKIP: Could not run test without the ip tool" 183 echo "SKIP: Could not run test without the ip tool"
184 exit 0 184 exit 0
diff --git a/tools/testing/selftests/net/reuseaddr_conflict.c b/tools/testing/selftests/net/reuseaddr_conflict.c
new file mode 100644
index 000000000000..7c5b12664b03
--- /dev/null
+++ b/tools/testing/selftests/net/reuseaddr_conflict.c
@@ -0,0 +1,114 @@
1/*
2 * Test for the regression introduced by
3 *
4 * b9470c27607b ("inet: kill smallest_size and smallest_port")
5 *
6 * If we open an ipv4 socket on a port with reuseaddr we shouldn't reset the tb
7 * when we open the ipv6 conterpart, which is what was happening previously.
8 */
9#include <errno.h>
10#include <error.h>
11#include <arpa/inet.h>
12#include <netinet/in.h>
13#include <stdbool.h>
14#include <stdio.h>
15#include <sys/socket.h>
16#include <sys/types.h>
17#include <unistd.h>
18
19#define PORT 9999
20
21int open_port(int ipv6, int any)
22{
23 int fd = -1;
24 int reuseaddr = 1;
25 int v6only = 1;
26 int addrlen;
27 int ret = -1;
28 struct sockaddr *addr;
29 int family = ipv6 ? AF_INET6 : AF_INET;
30
31 struct sockaddr_in6 addr6 = {
32 .sin6_family = AF_INET6,
33 .sin6_port = htons(PORT),
34 .sin6_addr = in6addr_any
35 };
36 struct sockaddr_in addr4 = {
37 .sin_family = AF_INET,
38 .sin_port = htons(PORT),
39 .sin_addr.s_addr = any ? htonl(INADDR_ANY) : inet_addr("127.0.0.1"),
40 };
41
42
43 if (ipv6) {
44 addr = (struct sockaddr*)&addr6;
45 addrlen = sizeof(addr6);
46 } else {
47 addr = (struct sockaddr*)&addr4;
48 addrlen = sizeof(addr4);
49 }
50
51 if ((fd = socket(family, SOCK_STREAM, IPPROTO_TCP)) < 0) {
52 perror("socket");
53 goto out;
54 }
55
56 if (ipv6 && setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&v6only,
57 sizeof(v6only)) < 0) {
58 perror("setsockopt IPV6_V6ONLY");
59 goto out;
60 }
61
62 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr,
63 sizeof(reuseaddr)) < 0) {
64 perror("setsockopt SO_REUSEADDR");
65 goto out;
66 }
67
68 if (bind(fd, addr, addrlen) < 0) {
69 perror("bind");
70 goto out;
71 }
72
73 if (any)
74 return fd;
75
76 if (listen(fd, 1) < 0) {
77 perror("listen");
78 goto out;
79 }
80 return fd;
81out:
82 close(fd);
83 return ret;
84}
85
86int main(void)
87{
88 int listenfd;
89 int fd1, fd2;
90
91 fprintf(stderr, "Opening 127.0.0.1:%d\n", PORT);
92 listenfd = open_port(0, 0);
93 if (listenfd < 0)
94 error(1, errno, "Couldn't open listen socket");
95 fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT);
96 fd1 = open_port(0, 1);
97 if (fd1 >= 0)
98 error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket");
99 fprintf(stderr, "Opening in6addr_any:%d\n", PORT);
100 fd1 = open_port(1, 1);
101 if (fd1 < 0)
102 error(1, errno, "Couldn't open ipv6 reuseport");
103 fprintf(stderr, "Opening INADDR_ANY:%d\n", PORT);
104 fd2 = open_port(0, 1);
105 if (fd2 >= 0)
106 error(1, 0, "Was allowed to create an ipv4 reuseport on a already bound non-reuseport socket");
107 close(fd1);
108 fprintf(stderr, "Opening INADDR_ANY:%d after closing ipv6 socket\n", PORT);
109 fd1 = open_port(0, 1);
110 if (fd1 >= 0)
111 error(1, 0, "Was allowed to create an ipv4 reuseport on an already bound non-reuseport socket with no ipv6");
112 fprintf(stderr, "Success");
113 return 0;
114}
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
index 00f286661dcd..dd4162fc0419 100644
--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
@@ -341,7 +341,7 @@ int main(int argc, char **argv)
341 return 0; 341 return 0;
342 case 'n': 342 case 'n':
343 t = atoi(optarg); 343 t = atoi(optarg);
344 if (t > ARRAY_SIZE(test_cases)) 344 if (t >= ARRAY_SIZE(test_cases))
345 error(1, 0, "Invalid test case: %d", t); 345 error(1, 0, "Invalid test case: %d", t);
346 all_tests = false; 346 all_tests = false;
347 test_cases[t].enabled = true; 347 test_cases[t].enabled = true;
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 67c3e2764303..24dbf634e2dd 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -6,10 +6,18 @@
6 */ 6 */
7 7
8#include <sys/types.h> 8#include <sys/types.h>
9#include <asm/siginfo.h> 9
10#define __have_siginfo_t 1 10/*
11#define __have_sigval_t 1 11 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
12#define __have_sigevent_t 1 12 * we need to use the kernel's siginfo.h file and trick glibc
13 * into accepting it.
14 */
15#if !__GLIBC_PREREQ(2, 26)
16# include <asm/siginfo.h>
17# define __have_siginfo_t 1
18# define __have_sigval_t 1
19# define __have_sigevent_t 1
20#endif
13 21
14#include <errno.h> 22#include <errno.h>
15#include <linux/filter.h> 23#include <linux/filter.h>
@@ -884,7 +892,7 @@ TEST_F_SIGNAL(TRAP, ign, SIGSYS)
884 syscall(__NR_getpid); 892 syscall(__NR_getpid);
885} 893}
886 894
887static struct siginfo TRAP_info; 895static siginfo_t TRAP_info;
888static volatile int TRAP_nr; 896static volatile int TRAP_nr;
889static void TRAP_action(int nr, siginfo_t *info, void *void_context) 897static void TRAP_action(int nr, siginfo_t *info, void *void_context)
890{ 898{
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
index 7d406c3973ba..97bb150837df 100644
--- a/tools/testing/selftests/sigaltstack/sas.c
+++ b/tools/testing/selftests/sigaltstack/sas.c
@@ -39,7 +39,11 @@ void my_usr1(int sig, siginfo_t *si, void *u)
39 stack_t stk; 39 stack_t stk;
40 struct stk_data *p; 40 struct stk_data *p;
41 41
42#if __s390x__
43 register unsigned long sp asm("%15");
44#else
42 register unsigned long sp asm("sp"); 45 register unsigned long sp asm("sp");
46#endif
43 47
44 if (sp < (unsigned long)sstack || 48 if (sp < (unsigned long)sstack ||
45 sp >= (unsigned long)sstack + SIGSTKSZ) { 49 sp >= (unsigned long)sstack + SIGSTKSZ) {
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
index 4981c6b6d050..8e04d0afcbd7 100644
--- a/tools/testing/selftests/sync/Makefile
+++ b/tools/testing/selftests/sync/Makefile
@@ -2,12 +2,16 @@ CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra
2CFLAGS += -I../../../../usr/include/ 2CFLAGS += -I../../../../usr/include/
3LDFLAGS += -pthread 3LDFLAGS += -pthread
4 4
5TEST_PROGS = sync_test 5.PHONY: all clean
6
7all: $(TEST_PROGS)
8 6
9include ../lib.mk 7include ../lib.mk
10 8
9# lib.mk TEST_CUSTOM_PROGS var is for custom tests that need special
10# build rules. lib.mk will run and install them.
11
12TEST_CUSTOM_PROGS := $(OUTPUT)/sync_test
13all: $(TEST_CUSTOM_PROGS)
14
11OBJS = sync_test.o sync.o 15OBJS = sync_test.o sync.o
12 16
13TESTS += sync_alloc.o 17TESTS += sync_alloc.o
@@ -18,6 +22,16 @@ TESTS += sync_stress_parallelism.o
18TESTS += sync_stress_consumer.o 22TESTS += sync_stress_consumer.o
19TESTS += sync_stress_merge.o 23TESTS += sync_stress_merge.o
20 24
21sync_test: $(OBJS) $(TESTS) 25OBJS := $(patsubst %,$(OUTPUT)/%,$(OBJS))
26TESTS := $(patsubst %,$(OUTPUT)/%,$(TESTS))
27
28$(TEST_CUSTOM_PROGS): $(TESTS) $(OBJS)
29 $(CC) -o $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS) $(CFLAGS) $(LDFLAGS)
30
31$(OBJS): $(OUTPUT)/%.o: %.c
32 $(CC) -c $^ -o $@
33
34$(TESTS): $(OUTPUT)/%.o: %.c
35 $(CC) -c $^ -o $@
22 36
23EXTRA_CLEAN := sync_test $(OBJS) $(TESTS) 37EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(OBJS) $(TESTS)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index c727b96a59b0..5fa02d86b35f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -17,5 +17,26 @@
17 "teardown": [ 17 "teardown": [
18 "$TC qdisc del dev $DEV1 ingress" 18 "$TC qdisc del dev $DEV1 ingress"
19 ] 19 ]
20 },
21 {
22 "id": "d052",
23 "name": "Add 1M filters with the same action",
24 "category": [
25 "filter",
26 "flower"
27 ],
28 "setup": [
29 "$TC qdisc add dev $DEV2 ingress",
30 "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
31 ],
32 "cmdUnderTest": "$TC -b $BATCH_FILE",
33 "expExitCode": "0",
34 "verifyCmd": "$TC actions list action gact",
35 "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
36 "matchCount": "1",
37 "teardown": [
38 "$TC qdisc del dev $DEV2 ingress",
39 "/bin/rm $BATCH_FILE"
40 ]
20 } 41 }
21] \ No newline at end of file 42]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index cd61b7844c0d..a8981c5d0aaf 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -88,7 +88,7 @@ def prepare_env(cmdlist):
88 exit(1) 88 exit(1)
89 89
90 90
91def test_runner(filtered_tests): 91def test_runner(filtered_tests, args):
92 """ 92 """
93 Driver function for the unit tests. 93 Driver function for the unit tests.
94 94
@@ -105,6 +105,8 @@ def test_runner(filtered_tests):
105 for tidx in testlist: 105 for tidx in testlist:
106 result = True 106 result = True
107 tresult = "" 107 tresult = ""
108 if "flower" in tidx["category"] and args.device == None:
109 continue
108 print("Test " + tidx["id"] + ": " + tidx["name"]) 110 print("Test " + tidx["id"] + ": " + tidx["name"])
109 prepare_env(tidx["setup"]) 111 prepare_env(tidx["setup"])
110 (p, procout) = exec_cmd(tidx["cmdUnderTest"]) 112 (p, procout) = exec_cmd(tidx["cmdUnderTest"])
@@ -150,7 +152,11 @@ def ns_create():
150 exec_cmd(cmd, False) 152 exec_cmd(cmd, False)
151 cmd = 'ip link set $DEV0 up' 153 cmd = 'ip link set $DEV0 up'
152 exec_cmd(cmd, False) 154 exec_cmd(cmd, False)
153 cmd = 'ip -s $NS link set $DEV1 up' 155 cmd = 'ip -n $NS link set $DEV1 up'
156 exec_cmd(cmd, False)
157 cmd = 'ip link set $DEV2 netns $NS'
158 exec_cmd(cmd, False)
159 cmd = 'ip -n $NS link set $DEV2 up'
154 exec_cmd(cmd, False) 160 exec_cmd(cmd, False)
155 161
156 162
@@ -211,7 +217,8 @@ def set_args(parser):
211 help='Execute the single test case with specified ID') 217 help='Execute the single test case with specified ID')
212 parser.add_argument('-i', '--id', action='store_true', dest='gen_id', 218 parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
213 help='Generate ID numbers for new test cases') 219 help='Generate ID numbers for new test cases')
214 return parser 220 parser.add_argument('-d', '--device',
221 help='Execute the test case in flower category')
215 return parser 222 return parser
216 223
217 224
@@ -225,6 +232,8 @@ def check_default_settings(args):
225 232
226 if args.path != None: 233 if args.path != None:
227 NAMES['TC'] = args.path 234 NAMES['TC'] = args.path
235 if args.device != None:
236 NAMES['DEV2'] = args.device
228 if not os.path.isfile(NAMES['TC']): 237 if not os.path.isfile(NAMES['TC']):
229 print("The specified tc path " + NAMES['TC'] + " does not exist.") 238 print("The specified tc path " + NAMES['TC'] + " does not exist.")
230 exit(1) 239 exit(1)
@@ -381,14 +390,17 @@ def set_operation_mode(args):
381 if (len(alltests) == 0): 390 if (len(alltests) == 0):
382 print("Cannot find a test case with ID matching " + target_id) 391 print("Cannot find a test case with ID matching " + target_id)
383 exit(1) 392 exit(1)
384 catresults = test_runner(alltests) 393 catresults = test_runner(alltests, args)
385 print("All test results: " + "\n\n" + catresults) 394 print("All test results: " + "\n\n" + catresults)
386 elif (len(target_category) > 0): 395 elif (len(target_category) > 0):
396 if (target_category == "flower") and args.device == None:
397 print("Please specify a NIC device (-d) to run category flower")
398 exit(1)
387 if (target_category not in ucat): 399 if (target_category not in ucat):
388 print("Specified category is not present in this file.") 400 print("Specified category is not present in this file.")
389 exit(1) 401 exit(1)
390 else: 402 else:
391 catresults = test_runner(testcases[target_category]) 403 catresults = test_runner(testcases[target_category], args)
392 print("Category " + target_category + "\n\n" + catresults) 404 print("Category " + target_category + "\n\n" + catresults)
393 405
394 ns_destroy() 406 ns_destroy()
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
new file mode 100755
index 000000000000..707c6bfef689
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -0,0 +1,62 @@
1#!/usr/bin/python3
2
3"""
4tdc_batch.py - a script to generate TC batch file
5
6Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
7"""
8
9import argparse
10
11parser = argparse.ArgumentParser(description='TC batch file generator')
12parser.add_argument("device", help="device name")
13parser.add_argument("file", help="batch file name")
14parser.add_argument("-n", "--number", type=int,
15 help="how many lines in batch file")
16parser.add_argument("-o", "--skip_sw",
17 help="skip_sw (offload), by default skip_hw",
18 action="store_true")
19parser.add_argument("-s", "--share_action",
20 help="all filters share the same action",
21 action="store_true")
22parser.add_argument("-p", "--prio",
23 help="all filters have different prio",
24 action="store_true")
25args = parser.parse_args()
26
27device = args.device
28file = open(args.file, 'w')
29
30number = 1
31if args.number:
32 number = args.number
33
34skip = "skip_hw"
35if args.skip_sw:
36 skip = "skip_sw"
37
38share_action = ""
39if args.share_action:
40 share_action = "index 1"
41
42prio = "prio 1"
43if args.prio:
44 prio = ""
45 if number > 0x4000:
46 number = 0x4000
47
48index = 0
49for i in range(0x100):
50 for j in range(0x100):
51 for k in range(0x100):
52 mac = ("%02x:%02x:%02x" % (i, j, k))
53 src_mac = "e4:11:00:" + mac
54 dst_mac = "e4:12:00:" + mac
55 cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
56 "src_mac %s dst_mac %s action drop %s" %
57 (device, prio, skip, src_mac, dst_mac, share_action))
58 file.write("%s\n" % cmd)
59 index += 1
60 if index >= number:
61 file.close()
62 exit(0)
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 01087375a7c3..b6352515c1b5 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -12,6 +12,8 @@ NAMES = {
12 # Name of veth devices to be created for the namespace 12 # Name of veth devices to be created for the namespace
13 'DEV0': 'v0p0', 13 'DEV0': 'v0p0',
14 'DEV1': 'v0p1', 14 'DEV1': 'v0p1',
15 'DEV2': '',
16 'BATCH_FILE': './batch.txt',
15 # Name of the namespace to use 17 # Name of the namespace to use
16 'NS': 'tcut' 18 'NS': 'tcut'
17 } 19 }
diff --git a/tools/testing/selftests/timers/set-timer-lat.c b/tools/testing/selftests/timers/set-timer-lat.c
index 9c92b7bd5641..50da45437daa 100644
--- a/tools/testing/selftests/timers/set-timer-lat.c
+++ b/tools/testing/selftests/timers/set-timer-lat.c
@@ -143,7 +143,8 @@ int setup_timer(int clock_id, int flags, int interval, timer_t *tm1)
143 printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n", 143 printf("%-22s %s missing CAP_WAKE_ALARM? : [UNSUPPORTED]\n",
144 clockstring(clock_id), 144 clockstring(clock_id),
145 flags ? "ABSTIME":"RELTIME"); 145 flags ? "ABSTIME":"RELTIME");
146 return 0; 146 /* Indicate timer isn't set, so caller doesn't wait */
147 return 1;
147 } 148 }
148 printf("%s - timer_create() failed\n", clockstring(clock_id)); 149 printf("%s - timer_create() failed\n", clockstring(clock_id));
149 return -1; 150 return -1;
@@ -213,8 +214,9 @@ int do_timer(int clock_id, int flags)
213 int err; 214 int err;
214 215
215 err = setup_timer(clock_id, flags, interval, &tm1); 216 err = setup_timer(clock_id, flags, interval, &tm1);
217 /* Unsupported case - return 0 to not fail the test */
216 if (err) 218 if (err)
217 return err; 219 return err == 1 ? 0 : err;
218 220
219 while (alarmcount < 5) 221 while (alarmcount < 5)
220 sleep(1); 222 sleep(1);
@@ -228,18 +230,17 @@ int do_timer_oneshot(int clock_id, int flags)
228 timer_t tm1; 230 timer_t tm1;
229 const int interval = 0; 231 const int interval = 0;
230 struct timeval timeout; 232 struct timeval timeout;
231 fd_set fds;
232 int err; 233 int err;
233 234
234 err = setup_timer(clock_id, flags, interval, &tm1); 235 err = setup_timer(clock_id, flags, interval, &tm1);
236 /* Unsupported case - return 0 to not fail the test */
235 if (err) 237 if (err)
236 return err; 238 return err == 1 ? 0 : err;
237 239
238 memset(&timeout, 0, sizeof(timeout)); 240 memset(&timeout, 0, sizeof(timeout));
239 timeout.tv_sec = 5; 241 timeout.tv_sec = 5;
240 FD_ZERO(&fds);
241 do { 242 do {
242 err = select(FD_SETSIZE, &fds, NULL, NULL, &timeout); 243 err = select(0, NULL, NULL, NULL, &timeout);
243 } while (err == -1 && errno == EINTR); 244 } while (err == -1 && errno == EINTR);
244 245
245 timer_delete(tm1); 246 timer_delete(tm1);
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index a2c53a3d223d..de2f9ec8a87f 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy,
397 } 397 }
398} 398}
399 399
400static int copy_page(int ufd, unsigned long offset) 400static int __copy_page(int ufd, unsigned long offset, bool retry)
401{ 401{
402 struct uffdio_copy uffdio_copy; 402 struct uffdio_copy uffdio_copy;
403 403
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset)
418 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", 418 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n",
419 uffdio_copy.copy), exit(1); 419 uffdio_copy.copy), exit(1);
420 } else { 420 } else {
421 if (test_uffdio_copy_eexist) { 421 if (test_uffdio_copy_eexist && retry) {
422 test_uffdio_copy_eexist = false; 422 test_uffdio_copy_eexist = false;
423 retry_copy_page(ufd, &uffdio_copy, offset); 423 retry_copy_page(ufd, &uffdio_copy, offset);
424 } 424 }
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset)
427 return 0; 427 return 0;
428} 428}
429 429
430static int copy_page_retry(int ufd, unsigned long offset)
431{
432 return __copy_page(ufd, offset, true);
433}
434
435static int copy_page(int ufd, unsigned long offset)
436{
437 return __copy_page(ufd, offset, false);
438}
439
430static void *uffd_poll_thread(void *arg) 440static void *uffd_poll_thread(void *arg)
431{ 441{
432 unsigned long cpu = (unsigned long) arg; 442 unsigned long cpu = (unsigned long) arg;
@@ -544,7 +554,7 @@ static void *background_thread(void *arg)
544 for (page_nr = cpu * nr_pages_per_cpu; 554 for (page_nr = cpu * nr_pages_per_cpu;
545 page_nr < (cpu+1) * nr_pages_per_cpu; 555 page_nr < (cpu+1) * nr_pages_per_cpu;
546 page_nr++) 556 page_nr++)
547 copy_page(uffd, page_nr * page_size); 557 copy_page_retry(uffd, page_nr * page_size);
548 558
549 return NULL; 559 return NULL;
550} 560}
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd,
779 } 789 }
780} 790}
781 791
782static int uffdio_zeropage(int ufd, unsigned long offset) 792static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry)
783{ 793{
784 struct uffdio_zeropage uffdio_zeropage; 794 struct uffdio_zeropage uffdio_zeropage;
785 int ret; 795 int ret;
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
814 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", 824 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n",
815 uffdio_zeropage.zeropage), exit(1); 825 uffdio_zeropage.zeropage), exit(1);
816 } else { 826 } else {
817 if (test_uffdio_zeropage_eexist) { 827 if (test_uffdio_zeropage_eexist && retry) {
818 test_uffdio_zeropage_eexist = false; 828 test_uffdio_zeropage_eexist = false;
819 retry_uffdio_zeropage(ufd, &uffdio_zeropage, 829 retry_uffdio_zeropage(ufd, &uffdio_zeropage,
820 offset); 830 offset);
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset)
830 return 0; 840 return 0;
831} 841}
832 842
843static int uffdio_zeropage(int ufd, unsigned long offset)
844{
845 return __uffdio_zeropage(ufd, offset, false);
846}
847
833/* exercise UFFDIO_ZEROPAGE */ 848/* exercise UFFDIO_ZEROPAGE */
834static int userfaultfd_zeropage_test(void) 849static int userfaultfd_zeropage_test(void)
835{ 850{
diff --git a/tools/testing/selftests/watchdog/Makefile b/tools/testing/selftests/watchdog/Makefile
index f863c664e3d1..ee068511fd0b 100644
--- a/tools/testing/selftests/watchdog/Makefile
+++ b/tools/testing/selftests/watchdog/Makefile
@@ -1,8 +1,3 @@
1TEST_PROGS := watchdog-test 1TEST_GEN_PROGS := watchdog-test
2
3all: $(TEST_PROGS)
4 2
5include ../lib.mk 3include ../lib.mk
6
7clean:
8 rm -fr $(TEST_PROGS)
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 97f187e2663f..0a74a20ca32b 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
20BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) 20BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
21BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) 21BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
22 22
23CFLAGS := -O2 -g -std=gnu99 -pthread -Wall 23CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie
24 24
25UNAME_M := $(shell uname -m) 25UNAME_M := $(shell uname -m)
26CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) 26CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)