aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-bus-xen-backend9
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-xen_memory9
-rw-r--r--Documentation/ABI/testing/sysfs-driver-xen-blkback10
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--Documentation/arm64/sve.txt4
-rw-r--r--Documentation/device-mapper/dm-raid.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt3
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt14
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt6
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt5
-rw-r--r--Documentation/fb/uvesafb.txt5
-rw-r--r--Documentation/filesystems/vfs.txt21
-rw-r--r--Documentation/hwmon/ina2xx2
-rw-r--r--Documentation/i2c/DMA-considerations10
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/process/code-of-conduct.rst81
-rw-r--r--Documentation/process/code-of-conflict.rst28
-rw-r--r--Documentation/process/index.rst2
-rw-r--r--Documentation/scsi/scsi-parameters.txt5
-rw-r--r--Documentation/virtual/kvm/api.txt12
-rw-r--r--Documentation/x86/earlyprintk.txt25
-rw-r--r--MAINTAINERS83
-rw-r--r--Makefile27
-rw-r--r--arch/arc/Kconfig10
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/boot/dts/axc003.dtsi26
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi26
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi7
-rw-r--r--arch/arc/boot/dts/hsdk.dts11
-rw-r--r--arch/arc/configs/axs101_defconfig3
-rw-r--r--arch/arc/configs/axs103_defconfig3
-rw-r--r--arch/arc/configs/axs103_smp_defconfig3
-rw-r--r--arch/arc/configs/haps_hs_defconfig2
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/configs/nps_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/atomic.h2
-rw-r--r--arch/arc/include/asm/dma-mapping.h13
-rw-r--r--arch/arc/kernel/troubleshoot.c13
-rw-r--r--arch/arc/mm/cache.c36
-rw-r--r--arch/arc/mm/dma.c82
-rw-r--r--[-rwxr-xr-x]arch/arm/boot/dts/am335x-osd3358-sm-red.dts0
-rw-r--r--arch/arm/boot/dts/am4372.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts2
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi14
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts90
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts183
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi12
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts20
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/versatile_defconfig14
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c39
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/configs/defconfig3
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c29
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c2
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/crash_core.c19
-rw-r--r--arch/arm64/kernel/machine_kexec.c11
-rw-r--r--arch/arm64/kvm/guest.c55
-rw-r--r--arch/arm64/kvm/hyp/switch.c9
-rw-r--r--arch/arm64/mm/hugetlbpage.c50
-rw-r--r--arch/arm64/mm/mmu.c10
-rw-r--r--arch/hexagon/include/asm/bitops.h4
-rw-r--r--arch/hexagon/kernel/dma.c2
-rw-r--r--arch/m68k/mac/misc.c10
-rw-r--r--arch/m68k/mm/mcfmmu.c2
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h1
-rw-r--r--arch/mips/kernel/vdso.c20
-rw-r--r--arch/mips/kvm/mmu.c10
-rw-r--r--arch/mips/lantiq/xway/dma.c4
-rw-r--r--arch/nds32/Kconfig4
-rw-r--r--arch/nds32/Makefile4
-rw-r--r--arch/nds32/include/asm/elf.h4
-rw-r--r--arch/nds32/include/asm/ftrace.h46
-rw-r--r--arch/nds32/include/asm/nds32.h1
-rw-r--r--arch/nds32/include/asm/uaccess.h229
-rw-r--r--arch/nds32/kernel/Makefile6
-rw-r--r--arch/nds32/kernel/atl2c.c3
-rw-r--r--arch/nds32/kernel/ex-entry.S2
-rw-r--r--arch/nds32/kernel/ex-exit.S4
-rw-r--r--arch/nds32/kernel/ftrace.c309
-rw-r--r--arch/nds32/kernel/module.c4
-rw-r--r--arch/nds32/kernel/stacktrace.c6
-rw-r--r--arch/nds32/kernel/traps.c42
-rw-r--r--arch/nds32/kernel/vmlinux.lds.S12
-rw-r--r--arch/nios2/Kconfig.debug9
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/kernel/iommu.c25
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c97
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c39
-rw-r--r--arch/powerpc/lib/checksum_64.S3
-rw-r--r--arch/powerpc/lib/code-patching.c6
-rw-r--r--arch/powerpc/mm/init_64.c49
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c34
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/mm/pkeys.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c2
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h7
-rw-r--r--arch/riscv/include/asm/tlb.h4
-rw-r--r--arch/riscv/kernel/setup.c9
-rw-r--r--arch/riscv/kernel/sys_riscv.c15
-rw-r--r--arch/s390/crypto/paes_s390.c2
-rw-r--r--arch/s390/include/asm/mmu.h8
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c30
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/sparc/kernel/of_device_32.c4
-rw-r--r--arch/sparc/kernel/of_device_64.c3
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile23
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S19
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S66
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c1
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c1
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/hyperv/hv_apic.c8
-rw-r--r--arch/x86/include/asm/atomic.h12
-rw-r--r--arch/x86/include/asm/atomic64_32.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h12
-rw-r--r--arch/x86/include/asm/fixmap.h10
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h16
-rw-r--r--arch/x86/include/asm/irqflags.h3
-rw-r--r--arch/x86/include/asm/kdebug.h12
-rw-r--r--arch/x86/include/asm/kvm_host.h27
-rw-r--r--arch/x86/include/asm/mem_encrypt.h7
-rw-r--r--arch/x86/include/asm/pgtable-2level.h9
-rw-r--r--arch/x86/include/asm/pgtable-3level.h7
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h23
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/include/asm/signal.h7
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h40
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c46
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h17
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c27
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c53
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c24
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c17
-rw-r--r--arch/x86/kernel/dumpstack.c31
-rw-r--r--arch/x86/kernel/eisa.c10
-rw-r--r--arch/x86/kernel/head64.c20
-rw-r--r--arch/x86/kernel/head_64.S16
-rw-r--r--arch/x86/kernel/kvmclock.c52
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/topology.c4
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S19
-rw-r--r--arch/x86/kvm/lapic.c49
-rw-r--r--arch/x86/kvm/mmu.c35
-rw-r--r--arch/x86/kvm/svm.c26
-rw-r--r--arch/x86/kvm/vmx.c181
-rw-r--r--arch/x86/kvm/x86.c129
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/usercopy.c5
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c24
-rw-r--r--arch/x86/mm/pageattr.c25
-rw-r--r--arch/x86/mm/pgtable.c17
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--arch/x86/mm/tlb.c7
-rw-r--r--arch/x86/platform/efi/efi_32.c7
-rw-r--r--arch/x86/xen/mmu_pv.c17
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/Makefile4
-rw-r--r--arch/xtensa/platforms/iss/setup.c25
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-cgroup.c109
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-mq-tag.c13
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-throttle.c5
-rw-r--r--block/blk-wbt.c89
-rw-r--r--block/bsg.c8
-rw-r--r--block/elevator.c5
-rw-r--r--block/genhd.c6
-rw-r--r--block/partition-generic.c6
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/bus.c13
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/ata/libata-core.c16
-rw-r--r--drivers/ata/pata_ftide010.c27
-rw-r--r--drivers/base/firmware_loader/main.c30
-rw-r--r--drivers/base/memory.c20
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/null_blk.h17
-rw-r--r--drivers/block/null_blk_main.c45
-rw-r--r--drivers/block/null_blk_zoned.c34
-rw-r--r--drivers/block/rbd.c235
-rw-r--r--drivers/block/xen-blkback/blkback.c99
-rw-r--r--drivers/block/xen-blkback/common.h14
-rw-r--r--drivers/block/xen-blkfront.c110
-rw-r--r--drivers/bluetooth/Kconfig1
-rw-r--r--drivers/bluetooth/btmtkuart.c8
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bus/ti-sysc.c37
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c92
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c53
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c32
-rw-r--r--drivers/char/ipmi/kcs_bmc.c7
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clk/clk-npcm7xx.c4
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c18
-rw-r--r--drivers/clk/x86/clk-st.c2
-rw-r--r--drivers/clocksource/timer-atmel-pit.c20
-rw-r--r--drivers/clocksource/timer-fttmr010.c18
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c4
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/crypto/caam/caamalg_qi.c6
-rw-r--r--drivers/crypto/caam/caampkc.c20
-rw-r--r--drivers/crypto/caam/jr.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c57
-rw-r--r--drivers/crypto/ccp/psp-dev.c46
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h5
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c30
-rw-r--r--drivers/crypto/vmx/aes_xts.c21
-rw-r--r--drivers/dax/device.c9
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c8
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/fpga/dfl-fme-pr.c2
-rw-r--r--drivers/gpio/gpio-adp5588.c24
-rw-r--r--drivers/gpio/gpio-dwapb.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c86
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c139
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_panel.c10
-rw-r--r--drivers/gpu/drm/drm_syncobj.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h34
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c34
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c228
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/hid-apple.c9
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c5
-rw-r--r--drivers/hid/hid-multitouch.c19
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c23
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c34
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c2
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/adt7475.c25
-rw-r--r--drivers/hwmon/ina2xx.c13
-rw-r--r--drivers/hwmon/nct6775.c74
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c1
-rw-r--r--drivers/hwtracing/intel_th/core.c16
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c55
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c1
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c15
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c7
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c13
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/infiniband/core/cache.c68
-rw-r--r--drivers/infiniband/core/cma.c12
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/ucma.c8
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c68
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c93
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c6
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c11
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c51
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/keyboard/atakbd.c74
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-pasid.h2
-rw-r--r--drivers/iommu/rockchip-iommu.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-raid.c154
-rw-r--r--drivers/md/dm-thin-metadata.c36
-rw-r--r--drivers/md/dm-thin.c73
-rw-r--r--drivers/md/dm-verity-target.c24
-rw-r--r--drivers/md/md-cluster.c10
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5-log.h5
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/media/i2c/mt9v111.c41
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/memory/ti-aemif.c2
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus.c12
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/hbm.c9
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/queue.c12
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/android-goldfish.c4
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c8
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c10
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/mtd/devices/m25p80.c26
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/raw/denali.c11
-rw-r--r--drivers/mtd/nand/raw/docg4.c4
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/bonding/bond_main.c76
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c24
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c104
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h11
-rw-r--r--drivers/net/ethernet/amd/declance.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c32
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c62
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c26
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c47
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c177
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h6
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c20
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c21
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c28
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c142
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c37
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c73
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c21
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c20
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c211
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c45
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c76
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c110
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c89
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c16
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c49
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h13
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c26
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c241
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c75
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mcr20a.c8
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/phy/sfp.c27
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/tun.c80
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/qmi_wwan.c44
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/smsc75xx.c3
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c14
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c50
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c3
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netfront.c32
-rw-r--r--drivers/nvme/host/multipath.c6
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/nvme/target/rdma.c27
-rw-r--r--drivers/of/base.c50
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/pci-hyperv.c39
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c18
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c143
-rw-r--r--drivers/pinctrl/pinctrl-amd.c33
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c24
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c1
-rw-r--r--drivers/regulator/bd71837-regulator.c19
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c86
-rw-r--r--drivers/s390/net/qeth_core_main.c13
-rw-r--r--drivers/s390/net/qeth_core_mpc.c33
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c71
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.c106
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/lpfc/lpfc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c11
-rw-r--r--drivers/scsi/qedi/qedi.h7
-rw-r--r--drivers/scsi/qedi/qedi_main.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/scsi_lib.c21
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/fsl/qbman/qman.c3
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soundwire/stream.c23
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-rspi.c34
-rw-r--r--drivers/spi/spi-sh-msiof.c28
-rw-r--r--drivers/spi/spi-tegra20-slink.c31
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/erofs/Kconfig2
-rw-r--r--drivers/staging/erofs/super.c4
-rw-r--r--drivers/staging/fbtft/TODO4
-rw-r--r--drivers/staging/gasket/TODO13
-rw-r--r--drivers/staging/media/mt9t031/Kconfig6
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c149
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h2
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/qoriq_thermal.c27
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c11
-rw-r--r--drivers/thermal/rcar_thermal.c16
-rw-r--r--drivers/tty/hvc/hvc_console.c38
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/vt/vt_ioctl.c4
-rw-r--r--drivers/usb/class/cdc-acm.c73
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/common/common.c25
-rw-r--r--drivers/usb/common/roles.c15
-rw-r--r--drivers/usb/core/devio.c24
-rw-r--r--drivers/usb/core/driver.c28
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/of.c26
-rw-r--r--drivers/usb/core/quirks.c10
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c15
-rw-r--r--drivers/usb/gadget/udc/net2280.c16
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-plat.c27
-rw-r--r--drivers/usb/host/xhci.c30
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/misc/yurex.c8
-rw-r--r--drivers/usb/mtu3/mtu3_core.c6
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/scsiglue.c9
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/bus.c7
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/mux.c17
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/grant-table.c27
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-balloon.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--fs/afs/proc.c15
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/extent-tree.c17
-rw-r--r--fs/btrfs/inode.c117
-rw-r--r--fs/btrfs/ioctl.c35
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/tree-log.c48
-rw-r--r--fs/btrfs/tree-log.h10
-rw-r--r--fs/btrfs/volumes.c7
-rw-r--r--fs/buffer.c1
-rw-r--r--fs/ceph/super.c16
-rw-r--r--fs/cifs/Kconfig1
-rw-r--r--fs/cifs/cifs_unicode.c3
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/misc.c8
-rw-r--r--fs/cifs/readdir.c11
-rw-r--r--fs/cifs/smb2misc.c14
-rw-r--r--fs/cifs/smb2ops.c35
-rw-r--r--fs/cifs/smb2pdu.c32
-rw-r--r--fs/dax.c14
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext4/dir.c20
-rw-r--r--fs/ext4/ext4.h20
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode.c20
-rw-r--r--fs/ext4/mmp.c1
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/resize.c23
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/isofs/inode.c7
-rw-r--r--fs/nfs/nfs4proc.c31
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4trace.h4
-rw-r--r--fs/nfs/pnfs.c26
-rw-r--r--fs/nilfs2/alloc.c11
-rw-r--r--fs/nilfs2/alloc.h11
-rw-r--r--fs/nilfs2/bmap.c11
-rw-r--r--fs/nilfs2/bmap.h11
-rw-r--r--fs/nilfs2/btnode.c11
-rw-r--r--fs/nilfs2/btnode.h11
-rw-r--r--fs/nilfs2/btree.c11
-rw-r--r--fs/nilfs2/btree.h11
-rw-r--r--fs/nilfs2/cpfile.c11
-rw-r--r--fs/nilfs2/cpfile.h11
-rw-r--r--fs/nilfs2/dat.c11
-rw-r--r--fs/nilfs2/dat.h11
-rw-r--r--fs/nilfs2/dir.c11
-rw-r--r--fs/nilfs2/direct.c11
-rw-r--r--fs/nilfs2/direct.h11
-rw-r--r--fs/nilfs2/file.c11
-rw-r--r--fs/nilfs2/gcinode.c11
-rw-r--r--fs/nilfs2/ifile.c11
-rw-r--r--fs/nilfs2/ifile.h11
-rw-r--r--fs/nilfs2/inode.c11
-rw-r--r--fs/nilfs2/ioctl.c11
-rw-r--r--fs/nilfs2/mdt.c11
-rw-r--r--fs/nilfs2/mdt.h11
-rw-r--r--fs/nilfs2/namei.c11
-rw-r--r--fs/nilfs2/nilfs.h11
-rw-r--r--fs/nilfs2/page.c11
-rw-r--r--fs/nilfs2/page.h11
-rw-r--r--fs/nilfs2/recovery.c11
-rw-r--r--fs/nilfs2/segbuf.c11
-rw-r--r--fs/nilfs2/segbuf.h11
-rw-r--r--fs/nilfs2/segment.c11
-rw-r--r--fs/nilfs2/segment.h11
-rw-r--r--fs/nilfs2/sufile.c11
-rw-r--r--fs/nilfs2/sufile.h11
-rw-r--r--fs/nilfs2/super.c11
-rw-r--r--fs/nilfs2/sysfs.c11
-rw-r--r--fs/nilfs2/sysfs.h11
-rw-r--r--fs/nilfs2/the_nilfs.c11
-rw-r--r--fs/nilfs2/the_nilfs.h11
-rw-r--r--fs/notify/fsnotify.c13
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/ocfs2/buffer_head_io.c1
-rw-r--r--fs/overlayfs/file.c23
-rw-r--r--fs/overlayfs/inode.c10
-rw-r--r--fs/overlayfs/super.c26
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/pstore/ram.c29
-rw-r--r--fs/pstore/ram_core.c17
-rw-r--r--fs/quota/quota.c14
-rw-r--r--fs/ubifs/super.c7
-rw-r--r--fs/ubifs/xattr.c24
-rw-r--r--fs/udf/super.c93
-rw-r--r--fs/xattr.c24
-rw-r--r--fs/xfs/libxfs/xfs_attr.c28
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c10
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c24
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c30
-rw-r--r--fs/xfs/scrub/alloc.c1
-rw-r--r--fs/xfs/scrub/inode.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c20
-rw-r--r--fs/xfs/xfs_buf_item.c119
-rw-r--r--fs/xfs/xfs_buf_item.h1
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log_recover.c10
-rw-r--r--fs/xfs/xfs_reflink.c137
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c10
-rw-r--r--fs/xfs/xfs_trans_buf.c99
-rw-r--r--include/asm-generic/io.h3
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_panel.h1
-rw-r--r--include/linux/arm-smccc.h38
-rw-r--r--include/linux/blk-cgroup.h45
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/compiler-gcc.h14
-rw-r--r--include/linux/compiler_types.h8
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/mfd/rohm-bd718x7.h33
-rw-r--r--include/linux/mlx5/driver.h8
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mm_types_task.h2
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/of.h33
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/spi/spi-mem.h7
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/timekeeping.h4
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/media/v4l2-fh.h4
-rw-r--r--include/net/act_api.h7
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/pkt_cls.h25
-rw-r--r--include/net/regulatory.h4
-rw-r--r--include/net/tls.h19
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/trace/events/rxrpc.h4
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/rds.h1
-rw-r--r--include/uapi/linux/vhost.h2
-rw-r--r--include/uapi/sound/skl-tplg-interface.h106
-rw-r--r--include/xen/mem-reservation.h7
-rw-r--r--ipc/shm.c1
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/sockmap.c166
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cpu.c37
-rw-r--r--kernel/dma/Kconfig3
-rw-r--r--kernel/dma/direct.c4
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/events/hw_breakpoint.c13
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/locking/lockdep.c1
-rw-r--r--kernel/locking/mutex.c3
-rw-r--r--kernel/locking/test-ww_mutex.c2
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/printk/printk.c13
-rw-r--r--kernel/printk/printk_safe.c4
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sched/topology.c5
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/clocksource.c40
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/watchdog_hld.c2
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/rhashtable.c1
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile3
-rw-r--r--mm/backing-dev.c5
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c81
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/oom_kill.c14
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/readahead.c45
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slub.c1
-rw-r--r--mm/util.c11
-rw-r--r--mm/vmacache.c38
-rw-r--r--mm/vmscan.c11
-rw-r--r--net/batman-adv/bat_v_elp.c10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c10
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c27
-rw-r--r--net/batman-adv/soft-interface.c25
-rw-r--r--net/batman-adv/sysfs.c30
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/batman-adv/tvlv.c8
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/smp.c45
-rw-r--r--net/bluetooth/smp.h3
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/core/dev.c1
-rw-r--r--net/core/devlink.c3
-rw-r--r--net/core/ethtool.c10
-rw-r--r--net/core/filter.c62
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/netpoll.c41
-rw-r--r--net/core/rtnetlink.c18
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/slave.c4
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/igmp.c11
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_gre.c8
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/netfilter/Kconfig8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_bbr.c42
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_minisocks.c3
-rw-r--r--net/ipv4/udp.c49
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_transport.c4
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/af_inet6.c10
-rw-r--r--net/ipv6/ip6_fib.c7
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c23
-rw-r--r--net/ipv6/ip6_vti.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/route.c61
-rw-r--r--net/ipv6/udp.c65
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_mode_transport.c4
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/iucv/af_iucv.c38
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/mac80211/ibss.c22
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/main.c28
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c13
-rw-r--r--net/mac80211/mlme.c70
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/status.c11
-rw-r--r--net/mac80211/tdls.c8
-rw-r--r--net/mac80211/tx.c60
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/mpls/af_mpls.c6
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/nf_conntrack_proto.c26
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c19
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c23
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c21
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/nft_ct.c59
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c28
-rw-r--r--net/netfilter/xt_CHECKSUM.c22
-rw-r--r--net/netfilter/xt_cluster.c14
-rw-r--r--net/netfilter/xt_hashlimit.c18
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/openvswitch/conntrack.c6
-rw-r--r--net/packet/af_packet.c44
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rds/Kconfig2
-rw-r--r--net/rds/bind.c5
-rw-r--r--net/rds/ib.c9
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/rxrpc/ar-internal.h36
-rw-r--r--net/rxrpc/call_accept.c45
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/conn_client.c4
-rw-r--r--net/rxrpc/conn_object.c14
-rw-r--r--net/rxrpc/input.c90
-rw-r--r--net/rxrpc/local_object.c32
-rw-r--r--net/rxrpc/output.c54
-rw-r--r--net/rxrpc/peer_event.c46
-rw-r--r--net/rxrpc/peer_object.c52
-rw-r--r--net/rxrpc/protocol.h15
-rw-r--r--net/sched/act_api.c86
-rw-r--r--net/sched/act_bpf.c8
-rw-r--r--net/sched/act_connmark.c8
-rw-r--r--net/sched/act_csum.c8
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c108
-rw-r--r--net/sched/act_ipt.c18
-rw-r--r--net/sched/act_mirred.c8
-rw-r--r--net/sched/act_nat.c8
-rw-r--r--net/sched/act_pedit.c26
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/act_skbmod.c8
-rw-r--r--net/sched/act_tunnel_key.c36
-rw-r--r--net/sched/act_vlan.c8
-rw-r--r--net/sched/cls_api.c6
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_cake.c24
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/proc.c8
-rw-r--r--net/sctp/socket.c56
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/smc/af_smc.c26
-rw-r--r--net/smc/smc_clc.c14
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/socket.c22
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/diag.c2
-rw-r--r--net/tipc/link.c45
-rw-r--r--net/tipc/link.h3
-rw-r--r--net/tipc/name_table.c10
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/netlink_compat.c5
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/socket.c88
-rw-r--r--net/tipc/socket.h3
-rw-r--r--net/tipc/topsrv.c4
-rw-r--r--net/tls/tls_device.c6
-rw-r--r--net/tls/tls_device_fallback.c2
-rw-r--r--net/tls/tls_main.c31
-rw-r--r--net/tls/tls_sw.c27
-rw-r--r--net/wireless/nl80211.c35
-rw-r--r--net/wireless/reg.c92
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_input.c1
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--net/xfrm/xfrm_user.c15
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/checkpatch.pl3
-rwxr-xr-xscripts/depmod.sh5
-rw-r--r--scripts/kconfig/Makefile1
-rw-r--r--scripts/kconfig/check-pkgconfig.sh8
-rwxr-xr-xscripts/kconfig/gconf-cfg.sh7
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh25
-rw-r--r--scripts/kconfig/mconf.c1
-rw-r--r--scripts/kconfig/nconf-cfg.sh25
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh7
-rwxr-xr-xscripts/recordmcount.pl3
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--scripts/subarch.include13
-rw-r--r--security/Kconfig2
-rw-r--r--security/apparmor/secid.c1
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/bebob/bebob_maudio.c28
-rw-r--r--sound/firewire/digi00x/digi00x.c1
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c9
-rw-r--r--sound/firewire/fireworks/fireworks.c2
-rw-r--r--sound/firewire/oxfw/oxfw.c10
-rw-r--r--sound/firewire/tascam/tascam.c1
-rw-r--r--sound/hda/ext/hdac_ext_stream.c22
-rw-r--r--sound/hda/hdac_controller.c15
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/hda/hda_codec.c3
-rw-r--r--sound/pci/hda/hda_intel.c86
-rw-r--r--sound/pci/hda/hda_intel.h1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c21
-rw-r--r--sound/soc/codecs/cs4265.c4
-rw-r--r--sound/soc/codecs/max98373.c3
-rw-r--r--sound/soc/codecs/rt5514.c8
-rw-r--r--sound/soc/codecs/rt5682.c8
-rw-r--r--sound/soc/codecs/sigmadsp.c3
-rw-r--r--sound/soc/codecs/tas6424.c12
-rw-r--r--sound/soc/codecs/wm8804-i2c.c15
-rw-r--r--sound/soc/codecs/wm9712.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c26
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c4
-rw-r--r--sound/soc/sh/rcar/adg.c5
-rw-r--r--sound/soc/sh/rcar/core.c21
-rw-r--r--sound/soc/sh/rcar/dma.c4
-rw-r--r--sound/soc/sh/rcar/rsnd.h7
-rw-r--r--sound/soc/sh/rcar/ssi.c16
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h37
-rw-r--r--tools/bpf/bpftool/map.c1
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c5
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/include/linux/lockdep.h3
-rw-r--r--tools/include/linux/nmi.h0
-rw-r--r--tools/include/tools/libc_compat.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/drm/drm.h9
-rw-r--r--tools/include/uapi/linux/if_link.h17
-rw-r--r--tools/include/uapi/linux/kvm.h6
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/include/uapi/linux/vhost.h18
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat59
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/libbpf.c20
-rw-r--r--tools/lib/bpf/str_error.c18
-rw-r--r--tools/lib/bpf/str_error.h6
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/perf/Makefile.perf14
-rw-r--r--tools/perf/arch/arm64/Makefile5
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl6
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c4
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h1
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c6
-rw-r--r--tools/perf/arch/x86/tests/bp-modify.c213
-rw-r--r--tools/perf/util/annotate.c32
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/evsel.c5
-rw-r--r--tools/perf/util/map.c11
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-parse.c7
-rw-r--r--tools/testing/selftests/android/Makefile2
-rw-r--r--tools/testing/selftests/android/config (renamed from tools/testing/selftests/android/ion/config)0
-rw-r--r--tools/testing/selftests/android/ion/Makefile2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c10
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c38
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h1
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c205
-rw-r--r--tools/testing/selftests/efivarfs/config1
-rw-r--r--tools/testing/selftests/futex/functional/Makefile1
-rw-r--r--tools/testing/selftests/gpio/Makefile7
-rw-r--r--tools/testing/selftests/kselftest.h1
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile12
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c89
-rw-r--r--tools/testing/selftests/kvm/platform_info_test.c110
-rw-r--r--tools/testing/selftests/lib.mk12
-rw-r--r--tools/testing/selftests/memory-hotplug/config1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh11
-rw-r--r--tools/testing/selftests/net/tls.c49
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile1
-rw-r--r--tools/testing/selftests/rseq/param_test.c19
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json48
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/vm/page-types.c6
-rw-r--r--tools/vm/slabinfo.c4
-rw-r--r--virt/kvm/arm/mmu.c21
-rw-r--r--virt/kvm/arm/trace.h15
1352 files changed, 13739 insertions, 8661 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-xen-backend b/Documentation/ABI/stable/sysfs-bus-xen-backend
index 3d5951c8bf5f..e8b60bd766f7 100644
--- a/Documentation/ABI/stable/sysfs-bus-xen-backend
+++ b/Documentation/ABI/stable/sysfs-bus-xen-backend
@@ -73,3 +73,12 @@ KernelVersion: 3.0
73Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 73Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
74Description: 74Description:
75 Number of sectors written by the frontend. 75 Number of sectors written by the frontend.
76
77What: /sys/bus/xen-backend/devices/*/state
78Date: August 2018
79KernelVersion: 4.19
80Contact: Joe Jin <joe.jin@oracle.com>
81Description:
82 The state of the device. One of: 'Unknown',
83 'Initialising', 'Initialised', 'Connected', 'Closing',
84 'Closed', 'Reconfiguring', 'Reconfigured'.
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
index caa311d59ac1..6d83f95a8a8e 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory
+++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
@@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
75Description: 75Description:
76 Amount (in KiB) of low (or normal) memory in the 76 Amount (in KiB) of low (or normal) memory in the
77 balloon. 77 balloon.
78
79What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
80Date: September 2018
81KernelVersion: 4.20
82Contact: xen-devel@lists.xenproject.org
83Description:
84 Control scrubbing pages before returning them to Xen for others domains
85 use. Can be set with xen_scrub_pages cmdline
86 parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
diff --git a/Documentation/ABI/testing/sysfs-driver-xen-blkback b/Documentation/ABI/testing/sysfs-driver-xen-blkback
index 8bb43b66eb55..4e7babb3ba1f 100644
--- a/Documentation/ABI/testing/sysfs-driver-xen-blkback
+++ b/Documentation/ABI/testing/sysfs-driver-xen-blkback
@@ -15,3 +15,13 @@ Description:
15 blkback. If the frontend tries to use more than 15 blkback. If the frontend tries to use more than
16 max_persistent_grants, the LRU kicks in and starts 16 max_persistent_grants, the LRU kicks in and starts
17 removing 5% of max_persistent_grants every 100ms. 17 removing 5% of max_persistent_grants every 100ms.
18
19What: /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
20Date: August 2018
21KernelVersion: 4.19
22Contact: Roger Pau Monné <roger.pau@citrix.com>
23Description:
24 How long a persistent grant is allowed to remain
25 allocated without being in use. The time is in
26 seconds, 0 means indefinitely long.
27 The default is 60 seconds.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9871e649ffef..92eb1f42240d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3523,6 +3523,12 @@
3523 ramdisk_size= [RAM] Sizes of RAM disks in kilobytes 3523 ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
3524 See Documentation/blockdev/ramdisk.txt. 3524 See Documentation/blockdev/ramdisk.txt.
3525 3525
3526 random.trust_cpu={on,off}
3527 [KNL] Enable or disable trusting the use of the
3528 CPU's random number generator (if available) to
3529 fully seed the kernel's CRNG. Default is controlled
3530 by CONFIG_RANDOM_TRUST_CPU.
3531
3526 ras=option[,option,...] [KNL] RAS-specific options 3532 ras=option[,option,...] [KNL] RAS-specific options
3527 3533
3528 cec_disable [X86] 3534 cec_disable [X86]
@@ -4994,6 +5000,12 @@
4994 Disables the PV optimizations forcing the HVM guest to 5000 Disables the PV optimizations forcing the HVM guest to
4995 run as generic HVM guest with no PV drivers. 5001 run as generic HVM guest with no PV drivers.
4996 5002
5003 xen_scrub_pages= [XEN]
5004 Boolean option to control scrubbing pages before giving them back
5005 to Xen, for use by other domains. Can be also changed at runtime
5006 with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
5007 Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
5008
4997 xirc2ps_cs= [NET,PCMCIA] 5009 xirc2ps_cs= [NET,PCMCIA]
4998 Format: 5010 Format:
4999 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]] 5011 <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index f128f736b4a5..7169a0ec41d8 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
200 thread. 200 thread.
201 201
202 * Changing the vector length causes all of P0..P15, FFR and all bits of 202 * Changing the vector length causes all of P0..P15, FFR and all bits of
203 Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become 203 Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
204 unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current 204 unspecified. Calling PR_SVE_SET_VL with vl equal to the thread's current
205 vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC 205 vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
206 flag, does not constitute a change to the vector length for this purpose. 206 flag, does not constitute a change to the vector length for this purpose.
@@ -500,7 +500,7 @@ References
500[2] arch/arm64/include/uapi/asm/ptrace.h 500[2] arch/arm64/include/uapi/asm/ptrace.h
501 AArch64 Linux ptrace ABI definitions 501 AArch64 Linux ptrace ABI definitions
502 502
503[3] linux/Documentation/arm64/cpu-feature-registers.txt 503[3] Documentation/arm64/cpu-feature-registers.txt
504 504
505[4] ARM IHI0055C 505[4] ARM IHI0055C
506 http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf 506 http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 390c145f01d7..52a719b49afd 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -348,3 +348,7 @@ Version History
3481.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an 3481.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
349 state races. 349 state races.
3501.13.2 Fix raid redundancy validation and avoid keeping raid set frozen 3501.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
3511.14.0 Fix reshape race on small devices. Fix stripe adding reshape
352 deadlock/potential data corruption. Update superblock when
353 specific devices are requested via rebuild. Fix RAID leg
354 rebuild errors.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
index 00e4365d7206..091c8dfd3229 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -3,7 +3,6 @@
3Required properties: 3Required properties:
4- compatible : 4- compatible :
5 - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc 5 - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
6 - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
7- reg : address and length of the lpi2c master registers 6- reg : address and length of the lpi2c master registers
8- interrupts : lpi2c interrupt 7- interrupts : lpi2c interrupt
9- clocks : lpi2c clock specifier 8- clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
11Examples: 10Examples:
12 11
13lpi2c7: lpi2c7@40a50000 { 12lpi2c7: lpi2c7@40a50000 {
14 compatible = "fsl,imx8dv-lpi2c"; 13 compatible = "fsl,imx7ulp-lpi2c";
15 reg = <0x40A50000 0x10000>; 14 reg = <0x40A50000 0x10000>;
16 interrupt-parent = <&intc>; 15 interrupt-parent = <&intc>;
17 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; 16 interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 996ce84352cb..7cccc49b6bea 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -1,4 +1,4 @@
1Device-Tree bindings for input/gpio_keys.c keyboard driver 1Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
2 2
3Required properties: 3Required properties:
4 - compatible = "gpio-keys"; 4 - compatible = "gpio-keys";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
index b0a8af51c388..265b223cd978 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
11attached to every HLIC: software interrupts, the timer interrupt, and external 11attached to every HLIC: software interrupts, the timer interrupt, and external
12interrupts. Software interrupts are used to send IPIs between cores. The 12interrupts. Software interrupts are used to send IPIs between cores. The
13timer interrupt comes from an architecturally mandated real-time timer that is 13timer interrupt comes from an architecturally mandated real-time timer that is
14controller via Supervisor Binary Interface (SBI) calls and CSR reads. External 14controlled via Supervisor Binary Interface (SBI) calls and CSR reads. External
15interrupts connect all other device interrupts to the HLIC, which are routed 15interrupts connect all other device interrupts to the HLIC, which are routed
16via the platform-level interrupt controller (PLIC). 16via the platform-level interrupt controller (PLIC).
17 17
@@ -25,7 +25,15 @@ in the system.
25 25
26Required properties: 26Required properties:
27- compatible : "riscv,cpu-intc" 27- compatible : "riscv,cpu-intc"
28- #interrupt-cells : should be <1> 28- #interrupt-cells : should be <1>. The interrupt sources are defined by the
29 RISC-V supervisor ISA manual, with only the following three interrupts being
30 defined for supervisor mode:
31 - Source 1 is the supervisor software interrupt, which can be sent by an SBI
32 call and is reserved for use by software.
33 - Source 5 is the supervisor timer interrupt, which can be configured by
34 SBI calls and implements a one-shot timer.
35 - Source 9 is the supervisor external interrupt, which chains to all other
36 device interrupts.
29- interrupt-controller : Identifies the node as an interrupt controller 37- interrupt-controller : Identifies the node as an interrupt controller
30 38
31Furthermore, this interrupt-controller MUST be embedded inside the cpu 39Furthermore, this interrupt-controller MUST be embedded inside the cpu
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
38 ... 46 ...
39 cpu1-intc: interrupt-controller { 47 cpu1-intc: interrupt-controller {
40 #interrupt-cells = <1>; 48 #interrupt-cells = <1>;
41 compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc"; 49 compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
42 interrupt-controller; 50 interrupt-controller;
43 }; 51 };
44 }; 52 };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 41089369f891..b3acebe08eb0 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -19,6 +19,10 @@ Required properties:
19- slaves : Specifies number for slaves 19- slaves : Specifies number for slaves
20- active_slave : Specifies the slave to use for time stamping, 20- active_slave : Specifies the slave to use for time stamping,
21 ethtool and SIOCGMIIPHY 21 ethtool and SIOCGMIIPHY
22- cpsw-phy-sel : Specifies the phandle to the CPSW phy mode selection
23 device. See also cpsw-phy-sel.txt for it's binding.
24 Note that in legacy cases cpsw-phy-sel may be
25 a child device instead of a phandle.
22 26
23Optional properties: 27Optional properties:
24- ti,hwmods : Must be "cpgmac0" 28- ti,hwmods : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
75 cpts_clock_mult = <0x80000000>; 79 cpts_clock_mult = <0x80000000>;
76 cpts_clock_shift = <29>; 80 cpts_clock_shift = <29>;
77 syscon = <&cm>; 81 syscon = <&cm>;
82 cpsw-phy-sel = <&phy_sel>;
78 cpsw_emac0: slave@0 { 83 cpsw_emac0: slave@0 {
79 phy_id = <&davinci_mdio>, <0>; 84 phy_id = <&davinci_mdio>, <0>;
80 phy-mode = "rgmii-txid"; 85 phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
103 cpts_clock_mult = <0x80000000>; 108 cpts_clock_mult = <0x80000000>;
104 cpts_clock_shift = <29>; 109 cpts_clock_shift = <29>;
105 syscon = <&cm>; 110 syscon = <&cm>;
111 cpsw-phy-sel = <&phy_sel>;
106 cpsw_emac0: slave@0 { 112 cpsw_emac0: slave@0 {
107 phy_id = <&davinci_mdio>, <0>; 113 phy_id = <&davinci_mdio>, <0>;
108 phy-mode = "rgmii-txid"; 114 phy-mode = "rgmii-txid";
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 457d5ae16f23..3e17ac1d5d58 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -10,6 +10,7 @@ Required properties:
10 Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on 10 Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
11 the Cadence GEM, or the generic form: "cdns,gem". 11 the Cadence GEM, or the generic form: "cdns,gem".
12 Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs. 12 Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
13 Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
13 Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs. 14 Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
14 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. 15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
15 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
index 76db9f13ad96..abc36274227c 100644
--- a/Documentation/devicetree/bindings/net/sh_eth.txt
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -16,6 +16,7 @@ Required properties:
16 "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC. 16 "renesas,ether-r8a7794" if the device is a part of R8A7794 SoC.
17 "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC. 17 "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
18 "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC. 18 "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
19 "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
19 "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device. 20 "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
20 "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1 21 "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
21 device. 22 device.
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index 5d47a262474c..9407212a85a8 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -7,6 +7,7 @@ Required properties:
7 Examples with soctypes are: 7 Examples with soctypes are:
8 - "renesas,r8a7743-wdt" (RZ/G1M) 8 - "renesas,r8a7743-wdt" (RZ/G1M)
9 - "renesas,r8a7745-wdt" (RZ/G1E) 9 - "renesas,r8a7745-wdt" (RZ/G1E)
10 - "renesas,r8a774a1-wdt" (RZ/G2M)
10 - "renesas,r8a7790-wdt" (R-Car H2) 11 - "renesas,r8a7790-wdt" (R-Car H2)
11 - "renesas,r8a7791-wdt" (R-Car M2-W) 12 - "renesas,r8a7791-wdt" (R-Car M2-W)
12 - "renesas,r8a7792-wdt" (R-Car V2H) 13 - "renesas,r8a7792-wdt" (R-Car V2H)
@@ -21,8 +22,8 @@ Required properties:
21 - "renesas,r7s72100-wdt" (RZ/A1) 22 - "renesas,r7s72100-wdt" (RZ/A1)
22 The generic compatible string must be: 23 The generic compatible string must be:
23 - "renesas,rza-wdt" for RZ/A 24 - "renesas,rza-wdt" for RZ/A
24 - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G 25 - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
25 - "renesas,rcar-gen3-wdt" for R-Car Gen3 26 - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
26 27
27- reg : Should contain WDT registers location and length 28- reg : Should contain WDT registers location and length
28- clocks : the clock feeding the watchdog timer. 29- clocks : the clock feeding the watchdog timer.
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index f6362d88763b..aa924196c366 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
15arches. 15arches.
16 16
17v86d source code can be downloaded from the following website: 17v86d source code can be downloaded from the following website:
18 http://dev.gentoo.org/~spock/projects/uvesafb 18
19 https://github.com/mjanusz/v86d
19 20
20Please refer to the v86d documentation for detailed configuration and 21Please refer to the v86d documentation for detailed configuration and
21installation instructions. 22installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
177 178
178-- 179--
179 Michal Januszewski <spock@gentoo.org> 180 Michal Januszewski <spock@gentoo.org>
180 Last updated: 2009-03-30 181 Last updated: 2017-10-10
181 182
182 Documentation of the uvesafb options is loosely based on vesafb.txt. 183 Documentation of the uvesafb options is loosely based on vesafb.txt.
183 184
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 4b2084d0f1fb..a6c6a8af48a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -848,7 +848,7 @@ struct file_operations
848---------------------- 848----------------------
849 849
850This describes how the VFS can manipulate an open file. As of kernel 850This describes how the VFS can manipulate an open file. As of kernel
8514.1, the following members are defined: 8514.18, the following members are defined:
852 852
853struct file_operations { 853struct file_operations {
854 struct module *owner; 854 struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
858 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); 858 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
859 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); 859 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
860 int (*iterate) (struct file *, struct dir_context *); 860 int (*iterate) (struct file *, struct dir_context *);
861 int (*iterate_shared) (struct file *, struct dir_context *);
861 __poll_t (*poll) (struct file *, struct poll_table_struct *); 862 __poll_t (*poll) (struct file *, struct poll_table_struct *);
862 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 863 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
863 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 864 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
864 int (*mmap) (struct file *, struct vm_area_struct *); 865 int (*mmap) (struct file *, struct vm_area_struct *);
865 int (*mremap)(struct file *, struct vm_area_struct *);
866 int (*open) (struct inode *, struct file *); 866 int (*open) (struct inode *, struct file *);
867 int (*flush) (struct file *, fl_owner_t id); 867 int (*flush) (struct file *, fl_owner_t id);
868 int (*release) (struct inode *, struct file *); 868 int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
882#ifndef CONFIG_MMU 882#ifndef CONFIG_MMU
883 unsigned (*mmap_capabilities)(struct file *); 883 unsigned (*mmap_capabilities)(struct file *);
884#endif 884#endif
885 ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
886 int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
887 int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
888 int (*fadvise)(struct file *, loff_t, loff_t, int);
885}; 889};
886 890
887Again, all methods are called without any locks being held, unless 891Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
899 903
900 iterate: called when the VFS needs to read the directory contents 904 iterate: called when the VFS needs to read the directory contents
901 905
906 iterate_shared: called when the VFS needs to read the directory contents
907 when filesystem supports concurrent dir iterators
908
902 poll: called by the VFS when a process wants to check if there is 909 poll: called by the VFS when a process wants to check if there is
903 activity on this file and (optionally) go to sleep until there 910 activity on this file and (optionally) go to sleep until there
904 is activity. Called by the select(2) and poll(2) system calls 911 is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
951 958
952 fallocate: called by the VFS to preallocate blocks or punch a hole. 959 fallocate: called by the VFS to preallocate blocks or punch a hole.
953 960
961 copy_file_range: called by the copy_file_range(2) system call.
962
963 clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
964 FICLONE commands.
965
966 dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
967 command.
968
969 fadvise: possibly called by the fadvise64() system call.
970
954Note that the file operations are implemented by the specific 971Note that the file operations are implemented by the specific
955filesystem in which the inode resides. When opening a device node 972filesystem in which the inode resides. When opening a device node
956(character or block special) most filesystems will call special 973(character or block special) most filesystems will call special
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index 72d16f08e431..b8df81f6d6bc 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@ Supported chips:
32 Datasheet: Publicly available at the Texas Instruments website 32 Datasheet: Publicly available at the Texas Instruments website
33 http://www.ti.com/ 33 http://www.ti.com/
34 34
35Author: Lothar Felten <l-felten@ti.com> 35Author: Lothar Felten <lothar.felten@gmail.com>
36 36
37Description 37Description
38----------- 38-----------
diff --git a/Documentation/i2c/DMA-considerations b/Documentation/i2c/DMA-considerations
index 966610aa4620..203002054120 100644
--- a/Documentation/i2c/DMA-considerations
+++ b/Documentation/i2c/DMA-considerations
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
50returned buffer. If NULL is returned, the threshold was not met or a bounce 50returned buffer. If NULL is returned, the threshold was not met or a bounce
51buffer could not be allocated. Fall back to PIO in that case. 51buffer could not be allocated. Fall back to PIO in that case.
52 52
53In any case, a buffer obtained from above needs to be released. It ensures data 53In any case, a buffer obtained from above needs to be released. Another helper
54is copied back to the message and a potentially used bounce buffer is freed:: 54function ensures a potentially used bounce buffer is freed::
55 55
56 i2c_release_dma_safe_msg_buf(msg, dma_buf); 56 i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
57
58The last argument 'xferred' controls if the buffer is synced back to the
59message or not. No syncing is needed in cases setting up DMA had an error and
60there was no data transferred.
57 61
58The bounce buffer handling from the core is generic and simple. It will always 62The bounce buffer handling from the core is generic and simple. It will always
59allocate a new bounce buffer. If you want a more sophisticated handling (e.g. 63allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 3f4f6c9ffad7..a4222b6cd2d3 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -33,4 +33,3 @@ Video Function Calls
33 video-clear-buffer 33 video-clear-buffer
34 video-set-streamtype 34 video-set-streamtype
35 video-set-format 35 video-set-format
36 video-set-attributes
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8313a636dd53..960de8fe3f40 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
425 1 - Disabled by default, enabled when an ICMP black hole detected 425 1 - Disabled by default, enabled when an ICMP black hole detected
426 2 - Always enabled, use initial MSS of tcp_base_mss. 426 2 - Always enabled, use initial MSS of tcp_base_mss.
427 427
428tcp_probe_interval - INTEGER 428tcp_probe_interval - UNSIGNED INTEGER
429 Controls how often to start TCP Packetization-Layer Path MTU 429 Controls how often to start TCP Packetization-Layer Path MTU
430 Discovery reprobe. The default is reprobing every 10 minutes as 430 Discovery reprobe. The default is reprobing every 10 minutes as
431 per RFC4821. 431 per RFC4821.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 61f918b10a0c..d1bf143b446f 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -86,7 +86,7 @@ pkg-config
86 86
87The build system, as of 4.18, requires pkg-config to check for installed 87The build system, as of 4.18, requires pkg-config to check for installed
88kconfig tools and to determine flags settings for use in 88kconfig tools and to determine flags settings for use in
89'make {menu,n,g,x}config'. Previously pkg-config was being used but not 89'make {g,x}config'. Previously pkg-config was being used but not
90verified or documented. 90verified or documented.
91 91
92Flex 92Flex
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
new file mode 100644
index 000000000000..ab7c24b5478c
--- /dev/null
+++ b/Documentation/process/code-of-conduct.rst
@@ -0,0 +1,81 @@
1Contributor Covenant Code of Conduct
2++++++++++++++++++++++++++++++++++++
3
4Our Pledge
5==========
6
7In the interest of fostering an open and welcoming environment, we as
8contributors and maintainers pledge to making participation in our project and
9our community a harassment-free experience for everyone, regardless of age, body
10size, disability, ethnicity, sex characteristics, gender identity and
11expression, level of experience, education, socio-economic status, nationality,
12personal appearance, race, religion, or sexual identity and orientation.
13
14Our Standards
15=============
16
17Examples of behavior that contributes to creating a positive environment
18include:
19
20* Using welcoming and inclusive language
21* Being respectful of differing viewpoints and experiences
22* Gracefully accepting constructive criticism
23* Focusing on what is best for the community
24* Showing empathy towards other community members
25
26
27Examples of unacceptable behavior by participants include:
28
29* The use of sexualized language or imagery and unwelcome sexual attention or
30 advances
31* Trolling, insulting/derogatory comments, and personal or political attacks
32* Public or private harassment
33* Publishing others’ private information, such as a physical or electronic
34 address, without explicit permission
35* Other conduct which could reasonably be considered inappropriate in a
36 professional setting
37
38
39Our Responsibilities
40====================
41
42Maintainers are responsible for clarifying the standards of acceptable behavior
43and are expected to take appropriate and fair corrective action in response to
44any instances of unacceptable behavior.
45
46Maintainers have the right and responsibility to remove, edit, or reject
47comments, commits, code, wiki edits, issues, and other contributions that are
48not aligned to this Code of Conduct, or to ban temporarily or permanently any
49contributor for other behaviors that they deem inappropriate, threatening,
50offensive, or harmful.
51
52Scope
53=====
54
55This Code of Conduct applies both within project spaces and in public spaces
56when an individual is representing the project or its community. Examples of
57representing a project or community include using an official project e-mail
58address, posting via an official social media account, or acting as an appointed
59representative at an online or offline event. Representation of a project may be
60further defined and clarified by project maintainers.
61
62Enforcement
63===========
64
65Instances of abusive, harassing, or otherwise unacceptable behavior may be
66reported by contacting the Technical Advisory Board (TAB) at
67<tab@lists.linux-foundation.org>. All complaints will be reviewed and
68investigated and will result in a response that is deemed necessary and
69appropriate to the circumstances. The TAB is obligated to maintain
70confidentiality with regard to the reporter of an incident. Further details of
71specific enforcement policies may be posted separately.
72
73Maintainers who do not follow or enforce the Code of Conduct in good faith may
74face temporary or permanent repercussions as determined by other members of the
75project’s leadership.
76
77Attribution
78===========
79
80This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
81available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst
deleted file mode 100644
index 47b6de763203..000000000000
--- a/Documentation/process/code-of-conflict.rst
+++ /dev/null
@@ -1,28 +0,0 @@
1Code of Conflict
2----------------
3
4The Linux kernel development effort is a very personal process compared
5to "traditional" ways of developing software. Your code and ideas
6behind it will be carefully reviewed, often resulting in critique and
7criticism. The review will almost always require improvements to the
8code before it can be included in the kernel. Know that this happens
9because everyone involved wants to see the best possible solution for
10the overall success of Linux. This development process has been proven
11to create the most robust operating system kernel ever, and we do not
12want to do anything to cause the quality of submission and eventual
13result to ever decrease.
14
15If however, anyone feels personally abused, threatened, or otherwise
16uncomfortable due to this process, that is not acceptable. If so,
17please contact the Linux Foundation's Technical Advisory Board at
18<tab@lists.linux-foundation.org>, or the individual members, and they
19will work to resolve the issue to the best of their ability. For more
20information on who is on the Technical Advisory Board and what their
21role is, please see:
22
23 - http://www.linuxfoundation.org/projects/linux/tab
24
25As a reviewer of code, please strive to keep things civil and focused on
26the technical issues involved. We are all humans, and frustrations can
27be high on both sides of the process. Try to keep in mind the immortal
28words of Bill and Ted, "Be excellent to each other."
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 37bd0628b6ee..9ae3e317bddf 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
20 :maxdepth: 1 20 :maxdepth: 1
21 21
22 howto 22 howto
23 code-of-conflict 23 code-of-conduct
24 development-process 24 development-process
25 submitting-patches 25 submitting-patches
26 coding-style 26 coding-style
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 25a4b4cf04a6..92999d4e0cb8 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
97 allowing boot to proceed. none ignores them, expecting 97 allowing boot to proceed. none ignores them, expecting
98 user space to do the scan. 98 user space to do the scan.
99 99
100 scsi_mod.use_blk_mq=
101 [SCSI] use blk-mq I/O path by default
102 See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
103 Format: <y/n>
104
100 sim710= [SCSI,HW] 105 sim710= [SCSI,HW]
101 See header of drivers/scsi/sim710.c. 106 See header of drivers/scsi/sim710.c.
102 107
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c664064f76fb..647f94128a85 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
4510Architectures: s390 4510Architectures: s390
4511Parameters: none 4511Parameters: none
4512Returns: 0 on success, -EINVAL if hpage module parameter was not set 4512Returns: 0 on success, -EINVAL if hpage module parameter was not set
4513 or cmma is enabled 4513 or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
4514 flag set
4514 4515
4515With this capability the KVM support for memory backing with 1m pages 4516With this capability the KVM support for memory backing with 1m pages
4516through hugetlbfs can be enabled for a VM. After the capability is 4517through hugetlbfs can be enabled for a VM. After the capability is
@@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned.
4521While it is generally possible to create a huge page backed VM without 4522While it is generally possible to create a huge page backed VM without
4522this capability, the VM will not be able to run. 4523this capability, the VM will not be able to run.
4523 4524
45257.14 KVM_CAP_MSR_PLATFORM_INFO
4526
4527Architectures: x86
4528Parameters: args[0] whether feature should be enabled or not
4529
4530With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
4531a #GP would be raised when the guest tries to access. Currently, this
4532capability does not enable write permissions of this MSR for the guest.
4533
45248. Other capabilities. 45348. Other capabilities.
4525---------------------- 4535----------------------
4526 4536
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
index 688e3eeed21d..46933e06c972 100644
--- a/Documentation/x86/earlyprintk.txt
+++ b/Documentation/x86/earlyprintk.txt
@@ -35,25 +35,25 @@ and two USB cables, connected like this:
35( If your system does not list a debug port capability then you probably 35( If your system does not list a debug port capability then you probably
36 won't be able to use the USB debug key. ) 36 won't be able to use the USB debug key. )
37 37
38 b.) You also need a Netchip USB debug cable/key: 38 b.) You also need a NetChip USB debug cable/key:
39 39
40 http://www.plxtech.com/products/NET2000/NET20DC/default.asp 40 http://www.plxtech.com/products/NET2000/NET20DC/default.asp
41 41
42 This is a small blue plastic connector with two USB connections, 42 This is a small blue plastic connector with two USB connections;
43 it draws power from its USB connections. 43 it draws power from its USB connections.
44 44
45 c.) You need a second client/console system with a high speed USB 2.0 45 c.) You need a second client/console system with a high speed USB 2.0
46 port. 46 port.
47 47
48 d.) The Netchip device must be plugged directly into the physical 48 d.) The NetChip device must be plugged directly into the physical
49 debug port on the "host/target" system. You cannot use a USB hub in 49 debug port on the "host/target" system. You cannot use a USB hub in
50 between the physical debug port and the "host/target" system. 50 between the physical debug port and the "host/target" system.
51 51
52 The EHCI debug controller is bound to a specific physical USB 52 The EHCI debug controller is bound to a specific physical USB
53 port and the Netchip device will only work as an early printk 53 port and the NetChip device will only work as an early printk
54 device in this port. The EHCI host controllers are electrically 54 device in this port. The EHCI host controllers are electrically
55 wired such that the EHCI debug controller is hooked up to the 55 wired such that the EHCI debug controller is hooked up to the
56 first physical and there is no way to change this via software. 56 first physical port and there is no way to change this via software.
57 You can find the physical port through experimentation by trying 57 You can find the physical port through experimentation by trying
58 each physical port on the system and rebooting. Or you can try 58 each physical port on the system and rebooting. Or you can try
59 and use lsusb or look at the kernel info messages emitted by the 59 and use lsusb or look at the kernel info messages emitted by the
@@ -65,9 +65,9 @@ and two USB cables, connected like this:
65 to the hardware vendor, because there is no reason not to wire 65 to the hardware vendor, because there is no reason not to wire
66 this port into one of the physically accessible ports. 66 this port into one of the physically accessible ports.
67 67
68 e.) It is also important to note, that many versions of the Netchip 68 e.) It is also important to note, that many versions of the NetChip
69 device require the "client/console" system to be plugged into the 69 device require the "client/console" system to be plugged into the
70 right and side of the device (with the product logo facing up and 70 right hand side of the device (with the product logo facing up and
71 readable left to right). The reason being is that the 5 volt 71 readable left to right). The reason being is that the 5 volt
72 power supply is taken from only one side of the device and it 72 power supply is taken from only one side of the device and it
73 must be the side that does not get rebooted. 73 must be the side that does not get rebooted.
@@ -81,13 +81,18 @@ and two USB cables, connected like this:
81 CONFIG_EARLY_PRINTK_DBGP=y 81 CONFIG_EARLY_PRINTK_DBGP=y
82 82
83 And you need to add the boot command line: "earlyprintk=dbgp". 83 And you need to add the boot command line: "earlyprintk=dbgp".
84
84 (If you are using Grub, append it to the 'kernel' line in 85 (If you are using Grub, append it to the 'kernel' line in
85 /etc/grub.conf) 86 /etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
87 append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
88 using Grub2 on an EFI firmware system, append it to the 'linux'
89 or 'linuxefi' line in /boot/grub2/grub.cfg or
90 /boot/efi/EFI/<distro>/grub.cfg.)
86 91
87 On systems with more than one EHCI debug controller you must 92 On systems with more than one EHCI debug controller you must
88 specify the correct EHCI debug controller number. The ordering 93 specify the correct EHCI debug controller number. The ordering
89 comes from the PCI bus enumeration of the EHCI controllers. The 94 comes from the PCI bus enumeration of the EHCI controllers. The
90 default with no number argument is "0" the first EHCI debug 95 default with no number argument is "0" or the first EHCI debug
91 controller. To use the second EHCI debug controller, you would 96 controller. To use the second EHCI debug controller, you would
92 use the command line: "earlyprintk=dbgp1" 97 use the command line: "earlyprintk=dbgp1"
93 98
@@ -111,7 +116,7 @@ and two USB cables, connected like this:
111 see the raw output. 116 see the raw output.
112 117
113 c.) On Nvidia Southbridge based systems: the kernel will try to probe 118 c.) On Nvidia Southbridge based systems: the kernel will try to probe
114 and find out which port has debug device connected. 119 and find out which port has a debug device connected.
115 120
1163. Testing that it works fine: 1213. Testing that it works fine:
117 122
diff --git a/MAINTAINERS b/MAINTAINERS
index a5b256b25905..22065048d89d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1251,7 +1251,7 @@ N: meson
1251 1251
1252ARM/Annapurna Labs ALPINE ARCHITECTURE 1252ARM/Annapurna Labs ALPINE ARCHITECTURE
1253M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 1253M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
1254M: Antoine Tenart <antoine.tenart@free-electrons.com> 1254M: Antoine Tenart <antoine.tenart@bootlin.com>
1255L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1255L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1256S: Maintained 1256S: Maintained
1257F: arch/arm/mach-alpine/ 1257F: arch/arm/mach-alpine/
@@ -2311,6 +2311,7 @@ F: drivers/clocksource/cadence_ttc_timer.c
2311F: drivers/i2c/busses/i2c-cadence.c 2311F: drivers/i2c/busses/i2c-cadence.c
2312F: drivers/mmc/host/sdhci-of-arasan.c 2312F: drivers/mmc/host/sdhci-of-arasan.c
2313F: drivers/edac/synopsys_edac.c 2313F: drivers/edac/synopsys_edac.c
2314F: drivers/i2c/busses/i2c-xiic.c
2314 2315
2315ARM64 PORT (AARCH64 ARCHITECTURE) 2316ARM64 PORT (AARCH64 ARCHITECTURE)
2316M: Catalin Marinas <catalin.marinas@arm.com> 2317M: Catalin Marinas <catalin.marinas@arm.com>
@@ -2955,7 +2956,6 @@ F: include/linux/bcm963xx_tag.h
2955 2956
2956BROADCOM BNX2 GIGABIT ETHERNET DRIVER 2957BROADCOM BNX2 GIGABIT ETHERNET DRIVER
2957M: Rasesh Mody <rasesh.mody@cavium.com> 2958M: Rasesh Mody <rasesh.mody@cavium.com>
2958M: Harish Patil <harish.patil@cavium.com>
2959M: Dept-GELinuxNICDev@cavium.com 2959M: Dept-GELinuxNICDev@cavium.com
2960L: netdev@vger.kernel.org 2960L: netdev@vger.kernel.org
2961S: Supported 2961S: Supported
@@ -2976,6 +2976,7 @@ F: drivers/scsi/bnx2i/
2976 2976
2977BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER 2977BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
2978M: Ariel Elior <ariel.elior@cavium.com> 2978M: Ariel Elior <ariel.elior@cavium.com>
2979M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
2979M: everest-linux-l2@cavium.com 2980M: everest-linux-l2@cavium.com
2980L: netdev@vger.kernel.org 2981L: netdev@vger.kernel.org
2981S: Supported 2982S: Supported
@@ -5469,7 +5470,8 @@ S: Odd Fixes
5469F: drivers/net/ethernet/agere/ 5470F: drivers/net/ethernet/agere/
5470 5471
5471ETHERNET BRIDGE 5472ETHERNET BRIDGE
5472M: Stephen Hemminger <stephen@networkplumber.org> 5473M: Roopa Prabhu <roopa@cumulusnetworks.com>
5474M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
5473L: bridge@lists.linux-foundation.org (moderated for non-subscribers) 5475L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
5474L: netdev@vger.kernel.org 5476L: netdev@vger.kernel.org
5475W: http://www.linuxfoundation.org/en/Net:Bridge 5477W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -5624,6 +5626,8 @@ F: lib/fault-inject.c
5624 5626
5625FBTFT Framebuffer drivers 5627FBTFT Framebuffer drivers
5626M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 5628M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5629L: dri-devel@lists.freedesktop.org
5630L: linux-fbdev@vger.kernel.org
5627S: Maintained 5631S: Maintained
5628F: drivers/staging/fbtft/ 5632F: drivers/staging/fbtft/
5629 5633
@@ -6059,7 +6063,7 @@ F: Documentation/gcc-plugins.txt
6059 6063
6060GASKET DRIVER FRAMEWORK 6064GASKET DRIVER FRAMEWORK
6061M: Rob Springer <rspringer@google.com> 6065M: Rob Springer <rspringer@google.com>
6062M: John Joseph <jnjoseph@google.com> 6066M: Todd Poynor <toddpoynor@google.com>
6063M: Ben Chan <benchan@chromium.org> 6067M: Ben Chan <benchan@chromium.org>
6064S: Maintained 6068S: Maintained
6065F: drivers/staging/gasket/ 6069F: drivers/staging/gasket/
@@ -7015,6 +7019,20 @@ F: drivers/crypto/vmx/aes*
7015F: drivers/crypto/vmx/ghash* 7019F: drivers/crypto/vmx/ghash*
7016F: drivers/crypto/vmx/ppc-xlate.pl 7020F: drivers/crypto/vmx/ppc-xlate.pl
7017 7021
7022IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
7023M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
7024L: linux-pci@vger.kernel.org
7025L: linuxppc-dev@lists.ozlabs.org
7026S: Supported
7027F: drivers/pci/hotplug/rpaphp*
7028
7029IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
7030M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
7031L: linux-pci@vger.kernel.org
7032L: linuxppc-dev@lists.ozlabs.org
7033S: Supported
7034F: drivers/pci/hotplug/rpadlpar*
7035
7018IBM ServeRAID RAID DRIVER 7036IBM ServeRAID RAID DRIVER
7019S: Orphan 7037S: Orphan
7020F: drivers/scsi/ips.* 7038F: drivers/scsi/ips.*
@@ -8255,9 +8273,9 @@ F: drivers/ata/pata_arasan_cf.c
8255 8273
8256LIBATA PATA DRIVERS 8274LIBATA PATA DRIVERS
8257M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 8275M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
8258M: Jens Axboe <kernel.dk> 8276M: Jens Axboe <axboe@kernel.dk>
8259L: linux-ide@vger.kernel.org 8277L: linux-ide@vger.kernel.org
8260T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8278T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8261S: Maintained 8279S: Maintained
8262F: drivers/ata/pata_*.c 8280F: drivers/ata/pata_*.c
8263F: drivers/ata/ata_generic.c 8281F: drivers/ata/ata_generic.c
@@ -8275,7 +8293,7 @@ LIBATA SATA AHCI PLATFORM devices support
8275M: Hans de Goede <hdegoede@redhat.com> 8293M: Hans de Goede <hdegoede@redhat.com>
8276M: Jens Axboe <axboe@kernel.dk> 8294M: Jens Axboe <axboe@kernel.dk>
8277L: linux-ide@vger.kernel.org 8295L: linux-ide@vger.kernel.org
8278T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8296T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8279S: Maintained 8297S: Maintained
8280F: drivers/ata/ahci_platform.c 8298F: drivers/ata/ahci_platform.c
8281F: drivers/ata/libahci_platform.c 8299F: drivers/ata/libahci_platform.c
@@ -8291,7 +8309,7 @@ F: drivers/ata/sata_promise.*
8291LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) 8309LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
8292M: Jens Axboe <axboe@kernel.dk> 8310M: Jens Axboe <axboe@kernel.dk>
8293L: linux-ide@vger.kernel.org 8311L: linux-ide@vger.kernel.org
8294T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 8312T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
8295S: Maintained 8313S: Maintained
8296F: drivers/ata/ 8314F: drivers/ata/
8297F: include/linux/ata.h 8315F: include/linux/ata.h
@@ -8299,7 +8317,7 @@ F: include/linux/libata.h
8299F: Documentation/devicetree/bindings/ata/ 8317F: Documentation/devicetree/bindings/ata/
8300 8318
8301LIBLOCKDEP 8319LIBLOCKDEP
8302M: Sasha Levin <alexander.levin@verizon.com> 8320M: Sasha Levin <alexander.levin@microsoft.com>
8303S: Maintained 8321S: Maintained
8304F: tools/lib/lockdep/ 8322F: tools/lib/lockdep/
8305 8323
@@ -9699,13 +9717,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
9699S: Maintained 9717S: Maintained
9700F: drivers/media/dvb-frontends/mn88473* 9718F: drivers/media/dvb-frontends/mn88473*
9701 9719
9702PCI DRIVER FOR MOBIVEIL PCIE IP
9703M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
9704L: linux-pci@vger.kernel.org
9705S: Supported
9706F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
9707F: drivers/pci/controller/pcie-mobiveil.c
9708
9709MODULE SUPPORT 9720MODULE SUPPORT
9710M: Jessica Yu <jeyu@kernel.org> 9721M: Jessica Yu <jeyu@kernel.org>
9711T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 9722T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10932,7 +10943,7 @@ M: Willy Tarreau <willy@haproxy.com>
10932M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com> 10943M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
10933S: Odd Fixes 10944S: Odd Fixes
10934F: Documentation/auxdisplay/lcd-panel-cgram.txt 10945F: Documentation/auxdisplay/lcd-panel-cgram.txt
10935F: drivers/misc/panel.c 10946F: drivers/auxdisplay/panel.c
10936 10947
10937PARALLEL PORT SUBSYSTEM 10948PARALLEL PORT SUBSYSTEM
10938M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 10949M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11120,6 +11131,13 @@ F: include/uapi/linux/switchtec_ioctl.h
11120F: include/linux/switchtec.h 11131F: include/linux/switchtec.h
11121F: drivers/ntb/hw/mscc/ 11132F: drivers/ntb/hw/mscc/
11122 11133
11134PCI DRIVER FOR MOBIVEIL PCIE IP
11135M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
11136L: linux-pci@vger.kernel.org
11137S: Supported
11138F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
11139F: drivers/pci/controller/pcie-mobiveil.c
11140
11123PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) 11141PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
11124M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 11142M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
11125M: Jason Cooper <jason@lakedaemon.net> 11143M: Jason Cooper <jason@lakedaemon.net>
@@ -11153,7 +11171,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
11153 11171
11154PCI DRIVER FOR SYNOPSYS DESIGNWARE 11172PCI DRIVER FOR SYNOPSYS DESIGNWARE
11155M: Jingoo Han <jingoohan1@gmail.com> 11173M: Jingoo Han <jingoohan1@gmail.com>
11156M: Joao Pinto <Joao.Pinto@synopsys.com> 11174M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
11157L: linux-pci@vger.kernel.org 11175L: linux-pci@vger.kernel.org
11158S: Maintained 11176S: Maintained
11159F: Documentation/devicetree/bindings/pci/designware-pcie.txt 11177F: Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11186,8 +11204,14 @@ F: tools/pci/
11186 11204
11187PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC 11205PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
11188M: Russell Currey <ruscur@russell.cc> 11206M: Russell Currey <ruscur@russell.cc>
11207M: Sam Bobroff <sbobroff@linux.ibm.com>
11208M: Oliver O'Halloran <oohall@gmail.com>
11189L: linuxppc-dev@lists.ozlabs.org 11209L: linuxppc-dev@lists.ozlabs.org
11190S: Supported 11210S: Supported
11211F: Documentation/PCI/pci-error-recovery.txt
11212F: drivers/pci/pcie/aer.c
11213F: drivers/pci/pcie/dpc.c
11214F: drivers/pci/pcie/err.c
11191F: Documentation/powerpc/eeh-pci-error-recovery.txt 11215F: Documentation/powerpc/eeh-pci-error-recovery.txt
11192F: arch/powerpc/kernel/eeh*.c 11216F: arch/powerpc/kernel/eeh*.c
11193F: arch/powerpc/platforms/*/eeh*.c 11217F: arch/powerpc/platforms/*/eeh*.c
@@ -11345,10 +11369,10 @@ S: Maintained
11345F: drivers/platform/x86/peaq-wmi.c 11369F: drivers/platform/x86/peaq-wmi.c
11346 11370
11347PER-CPU MEMORY ALLOCATOR 11371PER-CPU MEMORY ALLOCATOR
11372M: Dennis Zhou <dennis@kernel.org>
11348M: Tejun Heo <tj@kernel.org> 11373M: Tejun Heo <tj@kernel.org>
11349M: Christoph Lameter <cl@linux.com> 11374M: Christoph Lameter <cl@linux.com>
11350M: Dennis Zhou <dennisszhou@gmail.com> 11375T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
11351T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
11352S: Maintained 11376S: Maintained
11353F: include/linux/percpu*.h 11377F: include/linux/percpu*.h
11354F: mm/percpu*.c 11378F: mm/percpu*.c
@@ -11956,7 +11980,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
11956F: drivers/scsi/qla4xxx/ 11980F: drivers/scsi/qla4xxx/
11957 11981
11958QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 11982QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
11959M: Harish Patil <harish.patil@cavium.com> 11983M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
11960M: Manish Chopra <manish.chopra@cavium.com> 11984M: Manish Chopra <manish.chopra@cavium.com>
11961M: Dept-GELinuxNICDev@cavium.com 11985M: Dept-GELinuxNICDev@cavium.com
11962L: netdev@vger.kernel.org 11986L: netdev@vger.kernel.org
@@ -11964,7 +11988,6 @@ S: Supported
11964F: drivers/net/ethernet/qlogic/qlcnic/ 11988F: drivers/net/ethernet/qlogic/qlcnic/
11965 11989
11966QLOGIC QLGE 10Gb ETHERNET DRIVER 11990QLOGIC QLGE 10Gb ETHERNET DRIVER
11967M: Harish Patil <harish.patil@cavium.com>
11968M: Manish Chopra <manish.chopra@cavium.com> 11991M: Manish Chopra <manish.chopra@cavium.com>
11969M: Dept-GELinuxNICDev@cavium.com 11992M: Dept-GELinuxNICDev@cavium.com
11970L: netdev@vger.kernel.org 11993L: netdev@vger.kernel.org
@@ -12243,6 +12266,7 @@ F: Documentation/networking/rds.txt
12243 12266
12244RDT - RESOURCE ALLOCATION 12267RDT - RESOURCE ALLOCATION
12245M: Fenghua Yu <fenghua.yu@intel.com> 12268M: Fenghua Yu <fenghua.yu@intel.com>
12269M: Reinette Chatre <reinette.chatre@intel.com>
12246L: linux-kernel@vger.kernel.org 12270L: linux-kernel@vger.kernel.org
12247S: Supported 12271S: Supported
12248F: arch/x86/kernel/cpu/intel_rdt* 12272F: arch/x86/kernel/cpu/intel_rdt*
@@ -13432,9 +13456,8 @@ F: drivers/i2c/busses/i2c-synquacer.c
13432F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt 13456F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
13433 13457
13434SOCIONEXT UNIPHIER SOUND DRIVER 13458SOCIONEXT UNIPHIER SOUND DRIVER
13435M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
13436L: alsa-devel@alsa-project.org (moderated for non-subscribers) 13459L: alsa-devel@alsa-project.org (moderated for non-subscribers)
13437S: Maintained 13460S: Orphan
13438F: sound/soc/uniphier/ 13461F: sound/soc/uniphier/
13439 13462
13440SOEKRIS NET48XX LED SUPPORT 13463SOEKRIS NET48XX LED SUPPORT
@@ -15372,7 +15395,7 @@ S: Maintained
15372UVESAFB DRIVER 15395UVESAFB DRIVER
15373M: Michal Januszewski <spock@gentoo.org> 15396M: Michal Januszewski <spock@gentoo.org>
15374L: linux-fbdev@vger.kernel.org 15397L: linux-fbdev@vger.kernel.org
15375W: http://dev.gentoo.org/~spock/projects/uvesafb/ 15398W: https://github.com/mjanusz/v86d
15376S: Maintained 15399S: Maintained
15377F: Documentation/fb/uvesafb.txt 15400F: Documentation/fb/uvesafb.txt
15378F: drivers/video/fbdev/uvesafb.* 15401F: drivers/video/fbdev/uvesafb.*
@@ -15896,6 +15919,7 @@ F: net/x25/
15896X86 ARCHITECTURE (32-BIT AND 64-BIT) 15919X86 ARCHITECTURE (32-BIT AND 64-BIT)
15897M: Thomas Gleixner <tglx@linutronix.de> 15920M: Thomas Gleixner <tglx@linutronix.de>
15898M: Ingo Molnar <mingo@redhat.com> 15921M: Ingo Molnar <mingo@redhat.com>
15922M: Borislav Petkov <bp@alien8.de>
15899R: "H. Peter Anvin" <hpa@zytor.com> 15923R: "H. Peter Anvin" <hpa@zytor.com>
15900M: x86@kernel.org 15924M: x86@kernel.org
15901L: linux-kernel@vger.kernel.org 15925L: linux-kernel@vger.kernel.org
@@ -15924,6 +15948,15 @@ M: Borislav Petkov <bp@alien8.de>
15924S: Maintained 15948S: Maintained
15925F: arch/x86/kernel/cpu/microcode/* 15949F: arch/x86/kernel/cpu/microcode/*
15926 15950
15951X86 MM
15952M: Dave Hansen <dave.hansen@linux.intel.com>
15953M: Andy Lutomirski <luto@kernel.org>
15954M: Peter Zijlstra <peterz@infradead.org>
15955L: linux-kernel@vger.kernel.org
15956T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
15957S: Maintained
15958F: arch/x86/mm/
15959
15927X86 PLATFORM DRIVERS 15960X86 PLATFORM DRIVERS
15928M: Darren Hart <dvhart@infradead.org> 15961M: Darren Hart <dvhart@infradead.org>
15929M: Andy Shevchenko <andy@infradead.org> 15962M: Andy Shevchenko <andy@infradead.org>
diff --git a/Makefile b/Makefile
index 2b458801ba74..6c3da3e10f07 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 19 3PATCHLEVEL = 19
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc6
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
299KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION) 299KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
300export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION 300export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
301 301
302# SUBARCH tells the usermode build what the underlying arch is. That is set 302include scripts/subarch.include
303# first, and if a usermode build is happening, the "ARCH=um" on the command
304# line overrides the setting of ARCH below. If a native build is happening,
305# then ARCH is assigned, getting whatever value it gets normally, and
306# SUBARCH is subsequently ignored.
307
308SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
309 -e s/sun4u/sparc64/ \
310 -e s/arm.*/arm/ -e s/sa110/arm/ \
311 -e s/s390x/s390/ -e s/parisc64/parisc/ \
312 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
313 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
314 -e s/riscv.*/riscv/)
315 303
316# Cross compiling and selecting different set of gcc/bin-utils 304# Cross compiling and selecting different set of gcc/bin-utils
317# --------------------------------------------------------------------------- 305# ---------------------------------------------------------------------------
@@ -616,6 +604,11 @@ CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
616 $(call cc-disable-warning,maybe-uninitialized,) 604 $(call cc-disable-warning,maybe-uninitialized,)
617export CFLAGS_GCOV 605export CFLAGS_GCOV
618 606
607# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
608ifdef CONFIG_FUNCTION_TRACER
609 CC_FLAGS_FTRACE := -pg
610endif
611
619# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default 612# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
620# values of the respective KBUILD_* variables 613# values of the respective KBUILD_* variables
621ARCH_CPPFLAGS := 614ARCH_CPPFLAGS :=
@@ -755,9 +748,6 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
755endif 748endif
756 749
757ifdef CONFIG_FUNCTION_TRACER 750ifdef CONFIG_FUNCTION_TRACER
758ifndef CC_FLAGS_FTRACE
759CC_FLAGS_FTRACE := -pg
760endif
761ifdef CONFIG_FTRACE_MCOUNT_RECORD 751ifdef CONFIG_FTRACE_MCOUNT_RECORD
762 # gcc 5 supports generating the mcount tables directly 752 # gcc 5 supports generating the mcount tables directly
763 ifeq ($(call cc-option-yn,-mrecord-mcount),y) 753 ifeq ($(call cc-option-yn,-mrecord-mcount),y)
@@ -807,6 +797,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
807# disable pointer signed / unsigned warnings in gcc 4.0 797# disable pointer signed / unsigned warnings in gcc 4.0
808KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) 798KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
809 799
800# disable stringop warnings in gcc 8+
801KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
802
810# disable invalid "can't wrap" optimizations for signed / pointers 803# disable invalid "can't wrap" optimizations for signed / pointers
811KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) 804KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
812 805
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 6d5eb8267e42..b4441b0764d7 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -9,6 +9,7 @@
9config ARC 9config ARC
10 def_bool y 10 def_bool y
11 select ARC_TIMERS 11 select ARC_TIMERS
12 select ARCH_HAS_PTE_SPECIAL
12 select ARCH_HAS_SYNC_DMA_FOR_CPU 13 select ARCH_HAS_SYNC_DMA_FOR_CPU
13 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 14 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
14 select ARCH_HAS_SG_CHAIN 15 select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
28 select GENERIC_SMP_IDLE_THREAD 29 select GENERIC_SMP_IDLE_THREAD
29 select HAVE_ARCH_KGDB 30 select HAVE_ARCH_KGDB
30 select HAVE_ARCH_TRACEHOOK 31 select HAVE_ARCH_TRACEHOOK
32 select HAVE_DEBUG_STACKOVERFLOW
31 select HAVE_FUTEX_CMPXCHG if FUTEX 33 select HAVE_FUTEX_CMPXCHG if FUTEX
34 select HAVE_GENERIC_DMA_COHERENT
32 select HAVE_IOREMAP_PROT 35 select HAVE_IOREMAP_PROT
36 select HAVE_KERNEL_GZIP
37 select HAVE_KERNEL_LZMA
33 select HAVE_KPROBES 38 select HAVE_KPROBES
34 select HAVE_KRETPROBES 39 select HAVE_KRETPROBES
35 select HAVE_MEMBLOCK 40 select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
44 select OF_EARLY_FLATTREE 49 select OF_EARLY_FLATTREE
45 select OF_RESERVED_MEM 50 select OF_RESERVED_MEM
46 select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING 51 select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
47 select HAVE_DEBUG_STACKOVERFLOW
48 select HAVE_GENERIC_DMA_COHERENT
49 select HAVE_KERNEL_GZIP
50 select HAVE_KERNEL_LZMA
51 select ARCH_HAS_PTE_SPECIAL
52 52
53config ARCH_HAS_CACHE_LINE_SIZE 53config ARCH_HAS_CACHE_LINE_SIZE
54 def_bool y 54 def_bool y
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index fb026196aaab..99cce77ab98f 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -43,10 +43,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
43LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h 43LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
44endif 44endif
45 45
46upto_gcc44 := $(call cc-ifversion, -le, 0404, y) 46cflags-y += -fsection-anchors
47atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
48
49cflags-$(atleast_gcc44) += -fsection-anchors
50 47
51cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 48cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
52cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 49cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
@@ -82,11 +79,6 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
82cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian 79cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
83ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB 80ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
84 81
85# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
86# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
87# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
88ldflags-$(upto_gcc44) += -marclinux
89
90LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 82LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
91 83
92# Modules with short calls might break for calls into builtin-kernel 84# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index dc91c663bcc0..d75d65ddf8e3 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -94,6 +94,32 @@
94 }; 94 };
95 95
96 /* 96 /*
97 * Mark DMA peripherals connected via IOC port as dma-coherent. We do
98 * it via overlay because peripherals defined in axs10x_mb.dtsi are
99 * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
100 * only AXS103 board has HW-coherent DMA peripherals)
101 * We don't need to mark pgu@17000 as dma-coherent because it uses
102 * external DMA buffer located outside of IOC aperture.
103 */
104 axs10x_mb {
105 ethernet@0x18000 {
106 dma-coherent;
107 };
108
109 ehci@0x40000 {
110 dma-coherent;
111 };
112
113 ohci@0x60000 {
114 dma-coherent;
115 };
116
117 mmc@0x15000 {
118 dma-coherent;
119 };
120 };
121
122 /*
97 * The DW APB ICTL intc on MB is connected to CPU intc via a 123 * The DW APB ICTL intc on MB is connected to CPU intc via a
98 * DT "invisible" DW APB GPIO block, configured to simply pass thru 124 * DT "invisible" DW APB GPIO block, configured to simply pass thru
99 * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c) 125 * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 69ff4895f2ba..a05bb737ea63 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -101,6 +101,32 @@
101 }; 101 };
102 102
103 /* 103 /*
104 * Mark DMA peripherals connected via IOC port as dma-coherent. We do
105 * it via overlay because peripherals defined in axs10x_mb.dtsi are
106 * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
107 * only AXS103 board has HW-coherent DMA peripherals)
108 * We don't need to mark pgu@17000 as dma-coherent because it uses
109 * external DMA buffer located outside of IOC aperture.
110 */
111 axs10x_mb {
112 ethernet@0x18000 {
113 dma-coherent;
114 };
115
116 ehci@0x40000 {
117 dma-coherent;
118 };
119
120 ohci@0x60000 {
121 dma-coherent;
122 };
123
124 mmc@0x15000 {
125 dma-coherent;
126 };
127 };
128
129 /*
104 * This INTC is actually connected to DW APB GPIO 130 * This INTC is actually connected to DW APB GPIO
105 * which acts as a wire between MB INTC and CPU INTC. 131 * which acts as a wire between MB INTC and CPU INTC.
106 * GPIO INTC is configured in platform init code 132 * GPIO INTC is configured in platform init code
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 47b74fbc403c..37bafd44e36d 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -9,6 +9,10 @@
9 */ 9 */
10 10
11/ { 11/ {
12 aliases {
13 ethernet = &gmac;
14 };
15
12 axs10x_mb { 16 axs10x_mb {
13 compatible = "simple-bus"; 17 compatible = "simple-bus";
14 #address-cells = <1>; 18 #address-cells = <1>;
@@ -68,7 +72,7 @@
68 }; 72 };
69 }; 73 };
70 74
71 ethernet@0x18000 { 75 gmac: ethernet@0x18000 {
72 #interrupt-cells = <1>; 76 #interrupt-cells = <1>;
73 compatible = "snps,dwmac"; 77 compatible = "snps,dwmac";
74 reg = < 0x18000 0x2000 >; 78 reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
81 max-speed = <100>; 85 max-speed = <100>;
82 resets = <&creg_rst 5>; 86 resets = <&creg_rst 5>;
83 reset-names = "stmmaceth"; 87 reset-names = "stmmaceth";
88 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
84 }; 89 };
85 90
86 ehci@0x40000 { 91 ehci@0x40000 {
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 006aa3de5348..ef149f59929a 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -25,6 +25,10 @@
25 bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1"; 25 bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
26 }; 26 };
27 27
28 aliases {
29 ethernet = &gmac;
30 };
31
28 cpus { 32 cpus {
29 #address-cells = <1>; 33 #address-cells = <1>;
30 #size-cells = <0>; 34 #size-cells = <0>;
@@ -163,7 +167,7 @@
163 #clock-cells = <0>; 167 #clock-cells = <0>;
164 }; 168 };
165 169
166 ethernet@8000 { 170 gmac: ethernet@8000 {
167 #interrupt-cells = <1>; 171 #interrupt-cells = <1>;
168 compatible = "snps,dwmac"; 172 compatible = "snps,dwmac";
169 reg = <0x8000 0x2000>; 173 reg = <0x8000 0x2000>;
@@ -176,6 +180,8 @@
176 phy-handle = <&phy0>; 180 phy-handle = <&phy0>;
177 resets = <&cgu_rst HSDK_ETH_RESET>; 181 resets = <&cgu_rst HSDK_ETH_RESET>;
178 reset-names = "stmmaceth"; 182 reset-names = "stmmaceth";
183 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
184 dma-coherent;
179 185
180 mdio { 186 mdio {
181 #address-cells = <1>; 187 #address-cells = <1>;
@@ -194,12 +200,14 @@
194 compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; 200 compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
195 reg = <0x60000 0x100>; 201 reg = <0x60000 0x100>;
196 interrupts = <15>; 202 interrupts = <15>;
203 dma-coherent;
197 }; 204 };
198 205
199 ehci@40000 { 206 ehci@40000 {
200 compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; 207 compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
201 reg = <0x40000 0x100>; 208 reg = <0x40000 0x100>;
202 interrupts = <15>; 209 interrupts = <15>;
210 dma-coherent;
203 }; 211 };
204 212
205 mmc@a000 { 213 mmc@a000 {
@@ -212,6 +220,7 @@
212 clock-names = "biu", "ciu"; 220 clock-names = "biu", "ciu";
213 interrupts = <12>; 221 interrupts = <12>;
214 bus-width = <4>; 222 bus-width = <4>;
223 dma-coherent;
215 }; 224 };
216 }; 225 };
217 226
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index a635ea972304..41bc08be6a3b 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,5 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set 3# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
63CONFIG_MOUSE_SERIAL=y 61CONFIG_MOUSE_SERIAL=y
64CONFIG_MOUSE_SYNAPTICS_USB=y 62CONFIG_MOUSE_SYNAPTICS_USB=y
65# CONFIG_LEGACY_PTYS is not set 63# CONFIG_LEGACY_PTYS is not set
66# CONFIG_DEVKMEM is not set
67CONFIG_SERIAL_8250=y 64CONFIG_SERIAL_8250=y
68CONFIG_SERIAL_8250_CONSOLE=y 65CONFIG_SERIAL_8250_CONSOLE=y
69CONFIG_SERIAL_8250_DW=y 66CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index aa507e423075..1e1c4a8011b5 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,5 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set 3# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
64CONFIG_MOUSE_SERIAL=y 62CONFIG_MOUSE_SERIAL=y
65CONFIG_MOUSE_SYNAPTICS_USB=y 63CONFIG_MOUSE_SYNAPTICS_USB=y
66# CONFIG_LEGACY_PTYS is not set 64# CONFIG_LEGACY_PTYS is not set
67# CONFIG_DEVKMEM is not set
68CONFIG_SERIAL_8250=y 65CONFIG_SERIAL_8250=y
69CONFIG_SERIAL_8250_CONSOLE=y 66CONFIG_SERIAL_8250_CONSOLE=y
70CONFIG_SERIAL_8250_DW=y 67CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index eba07f468654..6b0c0cfd5c30 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,5 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set 3# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
65CONFIG_MOUSE_SERIAL=y 63CONFIG_MOUSE_SERIAL=y
66CONFIG_MOUSE_SYNAPTICS_USB=y 64CONFIG_MOUSE_SYNAPTICS_USB=y
67# CONFIG_LEGACY_PTYS is not set 65# CONFIG_LEGACY_PTYS is not set
68# CONFIG_DEVKMEM is not set
69CONFIG_SERIAL_8250=y 66CONFIG_SERIAL_8250=y
70CONFIG_SERIAL_8250_CONSOLE=y 67CONFIG_SERIAL_8250_CONSOLE=y
71CONFIG_SERIAL_8250_DW=y 68CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index 098b19fbaa51..240dd2cd5148 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -1,4 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
57# CONFIG_SERIO_SERPORT is not set 56# CONFIG_SERIO_SERPORT is not set
58CONFIG_SERIO_ARC_PS2=y 57CONFIG_SERIO_ARC_PS2=y
59# CONFIG_LEGACY_PTYS is not set 58# CONFIG_LEGACY_PTYS is not set
60# CONFIG_DEVKMEM is not set
61CONFIG_SERIAL_8250=y 59CONFIG_SERIAL_8250=y
62CONFIG_SERIAL_8250_CONSOLE=y 60CONFIG_SERIAL_8250_CONSOLE=y
63CONFIG_SERIAL_8250_NR_UARTS=1 61CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index 0104c404d897..14ae7e5acc7c 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -1,4 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
60# CONFIG_SERIO_SERPORT is not set 59# CONFIG_SERIO_SERPORT is not set
61CONFIG_SERIO_ARC_PS2=y 60CONFIG_SERIO_ARC_PS2=y
62# CONFIG_LEGACY_PTYS is not set 61# CONFIG_LEGACY_PTYS is not set
63# CONFIG_DEVKMEM is not set
64CONFIG_SERIAL_8250=y 62CONFIG_SERIAL_8250=y
65CONFIG_SERIAL_8250_CONSOLE=y 63CONFIG_SERIAL_8250_CONSOLE=y
66CONFIG_SERIAL_8250_NR_UARTS=1 64CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 6491be0ddbc9..1dec2b4bc5e6 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -1,4 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
3# CONFIG_CROSS_MEMORY_ATTACH is not set 2# CONFIG_CROSS_MEMORY_ATTACH is not set
4CONFIG_NO_HZ_IDLE=y 3CONFIG_NO_HZ_IDLE=y
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 7c9c706ae7f6..31ba224bbfb4 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
59# CONFIG_INPUT_MOUSE is not set 59# CONFIG_INPUT_MOUSE is not set
60# CONFIG_SERIO is not set 60# CONFIG_SERIO is not set
61# CONFIG_LEGACY_PTYS is not set 61# CONFIG_LEGACY_PTYS is not set
62# CONFIG_DEVKMEM is not set
63CONFIG_SERIAL_8250=y 62CONFIG_SERIAL_8250=y
64CONFIG_SERIAL_8250_CONSOLE=y 63CONFIG_SERIAL_8250_CONSOLE=y
65CONFIG_SERIAL_8250_NR_UARTS=1 64CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 99e05cf63fca..8e0b8b134cd9 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 4CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
44# CONFIG_INPUT_MOUSE is not set 43# CONFIG_INPUT_MOUSE is not set
45# CONFIG_SERIO is not set 44# CONFIG_SERIO is not set
46# CONFIG_LEGACY_PTYS is not set 45# CONFIG_LEGACY_PTYS is not set
47# CONFIG_DEVKMEM is not set
48CONFIG_SERIAL_ARC=y 46CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 47CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 48# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index 0dc4f9b737e7..739b90e5e893 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 4CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
45# CONFIG_INPUT_MOUSE is not set 44# CONFIG_INPUT_MOUSE is not set
46# CONFIG_SERIO is not set 45# CONFIG_SERIO is not set
47# CONFIG_LEGACY_PTYS is not set 46# CONFIG_LEGACY_PTYS is not set
48# CONFIG_DEVKMEM is not set
49CONFIG_SERIAL_ARC=y 47CONFIG_SERIAL_ARC=y
50CONFIG_SERIAL_ARC_CONSOLE=y 48CONFIG_SERIAL_ARC_CONSOLE=y
51# CONFIG_HW_RANDOM is not set 49# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index be3c30a15e54..b5895bdf3a93 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4# CONFIG_CROSS_MEMORY_ATTACH is not set 3# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_HIGH_RES_TIMERS=y 4CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
44# CONFIG_INPUT_MOUSE is not set 43# CONFIG_INPUT_MOUSE is not set
45# CONFIG_SERIO is not set 44# CONFIG_SERIO is not set
46# CONFIG_LEGACY_PTYS is not set 45# CONFIG_LEGACY_PTYS is not set
47# CONFIG_DEVKMEM is not set
48CONFIG_SERIAL_ARC=y 46CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 47CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 48# CONFIG_HW_RANDOM is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 3a74b9b21772..f14eeff7d308 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
48# CONFIG_SERIO_SERPORT is not set 47# CONFIG_SERIO_SERPORT is not set
49CONFIG_SERIO_ARC_PS2=y 48CONFIG_SERIO_ARC_PS2=y
50# CONFIG_LEGACY_PTYS is not set 49# CONFIG_LEGACY_PTYS is not set
51# CONFIG_DEVKMEM is not set
52CONFIG_SERIAL_8250=y 50CONFIG_SERIAL_8250=y
53CONFIG_SERIAL_8250_CONSOLE=y 51CONFIG_SERIAL_8250_CONSOLE=y
54CONFIG_SERIAL_8250_NR_UARTS=1 52CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index ea2834b4dc1d..025298a48305 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
47# CONFIG_SERIO_SERPORT is not set 46# CONFIG_SERIO_SERPORT is not set
48CONFIG_SERIO_ARC_PS2=y 47CONFIG_SERIO_ARC_PS2=y
49# CONFIG_LEGACY_PTYS is not set 48# CONFIG_LEGACY_PTYS is not set
50# CONFIG_DEVKMEM is not set
51CONFIG_SERIAL_8250=y 49CONFIG_SERIAL_8250=y
52CONFIG_SERIAL_8250_CONSOLE=y 50CONFIG_SERIAL_8250_CONSOLE=y
53CONFIG_SERIAL_8250_NR_UARTS=1 51CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 80a5a1b4924b..df7b77b13b82 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,3 @@
1CONFIG_DEFAULT_HOSTNAME="ARCLinux"
2# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
4# CONFIG_CROSS_MEMORY_ATTACH is not set 3# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
58# CONFIG_SERIO_SERPORT is not set 57# CONFIG_SERIO_SERPORT is not set
59CONFIG_SERIO_ARC_PS2=y 58CONFIG_SERIO_ARC_PS2=y
60# CONFIG_LEGACY_PTYS is not set 59# CONFIG_LEGACY_PTYS is not set
61# CONFIG_DEVKMEM is not set
62CONFIG_SERIAL_8250=y 60CONFIG_SERIAL_8250=y
63CONFIG_SERIAL_8250_CONSOLE=y 61CONFIG_SERIAL_8250_CONSOLE=y
64CONFIG_SERIAL_8250_NR_UARTS=1 62CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 2cc87f909747..a7f65313f84a 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
57# CONFIG_SERIO is not set 57# CONFIG_SERIO is not set
58# CONFIG_VT is not set 58# CONFIG_VT is not set
59# CONFIG_LEGACY_PTYS is not set 59# CONFIG_LEGACY_PTYS is not set
60# CONFIG_DEVKMEM is not set
61CONFIG_SERIAL_8250=y 60CONFIG_SERIAL_8250=y
62CONFIG_SERIAL_8250_CONSOLE=y 61CONFIG_SERIAL_8250_CONSOLE=y
63CONFIG_SERIAL_8250_NR_UARTS=1 62CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index f629493929ea..db47c3541f15 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_CROSS_MEMORY_ATTACH is not set 2# CONFIG_CROSS_MEMORY_ATTACH is not set
4CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
5CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
53CONFIG_MOUSE_PS2_TOUCHKIT=y 52CONFIG_MOUSE_PS2_TOUCHKIT=y
54CONFIG_SERIO_ARC_PS2=y 53CONFIG_SERIO_ARC_PS2=y
55# CONFIG_LEGACY_PTYS is not set 54# CONFIG_LEGACY_PTYS is not set
56# CONFIG_DEVKMEM is not set
57CONFIG_SERIAL_8250=y 55CONFIG_SERIAL_8250=y
58CONFIG_SERIAL_8250_CONSOLE=y 56CONFIG_SERIAL_8250_CONSOLE=y
59CONFIG_SERIAL_8250_DW=y 57CONFIG_SERIAL_8250_DW=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 21f0ca26a05d..a8ac5e917d9a 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,5 +1,4 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_CROSS_MEMORY_ATTACH is not set 2# CONFIG_CROSS_MEMORY_ATTACH is not set
4CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
5CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 4e0072730241..158af079838d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
84 "1: llock %[orig], [%[ctr]] \n" \ 84 "1: llock %[orig], [%[ctr]] \n" \
85 " " #asm_op " %[val], %[orig], %[i] \n" \ 85 " " #asm_op " %[val], %[orig], %[i] \n" \
86 " scond %[val], [%[ctr]] \n" \ 86 " scond %[val], [%[ctr]] \n" \
87 " \n" \ 87 " bnz 1b \n" \
88 : [val] "=&r" (val), \ 88 : [val] "=&r" (val), \
89 [orig] "=&r" (orig) \ 89 [orig] "=&r" (orig) \
90 : [ctr] "r" (&v->counter), \ 90 : [ctr] "r" (&v->counter), \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..c946c0a83e76
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0
2// (C) 2018 Synopsys, Inc. (www.synopsys.com)
3
4#ifndef ASM_ARC_DMA_MAPPING_H
5#define ASM_ARC_DMA_MAPPING_H
6
7#include <asm-generic/dma-mapping.h>
8
9void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
10 const struct iommu_ops *iommu, bool coherent);
11#define arch_setup_dma_ops arch_setup_dma_ops
12
13#endif
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 783b20354f8b..e8d9fb452346 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -83,9 +83,6 @@ done:
83static void show_faulting_vma(unsigned long address, char *buf) 83static void show_faulting_vma(unsigned long address, char *buf)
84{ 84{
85 struct vm_area_struct *vma; 85 struct vm_area_struct *vma;
86 struct inode *inode;
87 unsigned long ino = 0;
88 dev_t dev = 0;
89 char *nm = buf; 86 char *nm = buf;
90 struct mm_struct *active_mm = current->active_mm; 87 struct mm_struct *active_mm = current->active_mm;
91 88
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
99 * if the container VMA is not found 96 * if the container VMA is not found
100 */ 97 */
101 if (vma && (vma->vm_start <= address)) { 98 if (vma && (vma->vm_start <= address)) {
102 struct file *file = vma->vm_file; 99 if (vma->vm_file) {
103 if (file) { 100 nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
104 nm = file_path(file, buf, PAGE_SIZE - 1); 101 if (IS_ERR(nm))
105 inode = file_inode(vma->vm_file); 102 nm = "?";
106 dev = inode->i_sb->s_dev;
107 ino = inode->i_ino;
108 } 103 }
109 pr_info(" @off 0x%lx in [%s]\n" 104 pr_info(" @off 0x%lx in [%s]\n"
110 " VMA: 0x%08lx to 0x%08lx\n", 105 " VMA: 0x%08lx to 0x%08lx\n",
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 25c631942500..f2701c13a66b 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
65 65
66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n", 66 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
67 perip_base, 67 perip_base,
68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency ")); 68 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
69 69
70 return buf; 70 return buf;
71} 71}
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
897} 897}
898 898
899/* 899/*
900 * DMA ops for systems with IOC
901 * IOC hardware snoops all DMA traffic keeping the caches consistent with
902 * memory - eliding need for any explicit cache maintenance of DMA buffers
903 */
904static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
905static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
906static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
907
908/*
909 * Exported DMA API 900 * Exported DMA API
910 */ 901 */
911void dma_cache_wback_inv(phys_addr_t start, unsigned long sz) 902void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
1153{ 1144{
1154 unsigned int ioc_base, mem_sz; 1145 unsigned int ioc_base, mem_sz;
1155 1146
1147 /*
1148 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
1149 * simultaneously. This happens because as of today IOC aperture covers
1150 * only ZONE_NORMAL (low mem) and any dma transactions outside this
1151 * region won't be HW coherent.
1152 * If we want to use both IOC and ZONE_HIGHMEM we can use
1153 * bounce_buffer to handle dma transactions to HIGHMEM.
1154 * Also it is possible to modify dma_direct cache ops or increase IOC
1155 * aperture size if we are planning to use HIGHMEM without PAE.
1156 */
1157 if (IS_ENABLED(CONFIG_HIGHMEM))
1158 panic("IOC and HIGHMEM can't be used simultaneously");
1159
1156 /* Flush + invalidate + disable L1 dcache */ 1160 /* Flush + invalidate + disable L1 dcache */
1157 __dc_disable(); 1161 __dc_disable();
1158 1162
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
1264 if (is_isa_arcv2() && ioc_enable) 1268 if (is_isa_arcv2() && ioc_enable)
1265 arc_ioc_setup(); 1269 arc_ioc_setup();
1266 1270
1267 if (is_isa_arcv2() && ioc_enable) { 1271 if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1268 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1269 __dma_cache_inv = __dma_cache_inv_ioc;
1270 __dma_cache_wback = __dma_cache_wback_ioc;
1271 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1272 __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 1272 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1273 __dma_cache_inv = __dma_cache_inv_slc; 1273 __dma_cache_inv = __dma_cache_inv_slc;
1274 __dma_cache_wback = __dma_cache_wback_slc; 1274 __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
1277 __dma_cache_inv = __dma_cache_inv_l1; 1277 __dma_cache_inv = __dma_cache_inv_l1;
1278 __dma_cache_wback = __dma_cache_wback_l1; 1278 __dma_cache_wback = __dma_cache_wback_l1;
1279 } 1279 }
1280 /*
1281 * In case of IOC (say IOC+SLC case), pointers above could still be set
1282 * but end up not being relevant as the first function in chain is not
1283 * called at all for @dma_direct_ops
1284 * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
1285 */
1280} 1286}
1281 1287
1282void __ref arc_cache_init(void) 1288void __ref arc_cache_init(void)
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index ec47e6079f5d..c75d5c3470e3 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -6,20 +6,17 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9/*
10 * DMA Coherent API Notes
11 *
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
15 *
16 * The default DMA address == Phy address which is 0x8000_0000 based.
17 */
18
19#include <linux/dma-noncoherent.h> 9#include <linux/dma-noncoherent.h>
20#include <asm/cache.h> 10#include <asm/cache.h>
21#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
22 12
13/*
14 * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
15 * - hardware IOC not available (or "dma-coherent" not set for device in DT)
16 * - But still handle both coherent and non-coherent requests from caller
17 *
18 * For DMA coherent hardware (IOC) generic code suffices
19 */
23void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 20void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs) 21 gfp_t gfp, unsigned long attrs)
25{ 22{
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
27 struct page *page; 24 struct page *page;
28 phys_addr_t paddr; 25 phys_addr_t paddr;
29 void *kvaddr; 26 void *kvaddr;
30 int need_coh = 1, need_kvaddr = 0; 27 bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
31
32 page = alloc_pages(gfp, order);
33 if (!page)
34 return NULL;
35 28
36 /* 29 /*
37 * IOC relies on all data (even coherent DMA data) being in cache 30 * __GFP_HIGHMEM flag is cleared by upper layer functions
38 * Thus allocate normal cached memory 31 * (in include/linux/dma-mapping.h) so we should never get a
39 * 32 * __GFP_HIGHMEM here.
40 * The gains with IOC are two pronged:
41 * -For streaming data, elides need for cache maintenance, saving
42 * cycles in flush code, and bus bandwidth as all the lines of a
43 * buffer need to be flushed out to memory
44 * -For coherent data, Read/Write to buffers terminate early in cache
45 * (vs. always going to memory - thus are faster)
46 */ 33 */
47 if ((is_isa_arcv2() && ioc_enable) || 34 BUG_ON(gfp & __GFP_HIGHMEM);
48 (attrs & DMA_ATTR_NON_CONSISTENT))
49 need_coh = 0;
50 35
51 /* 36 page = alloc_pages(gfp, order);
52 * - A coherent buffer needs MMU mapping to enforce non-cachability 37 if (!page)
53 * - A highmem page needs a virtual handle (hence MMU mapping) 38 return NULL;
54 * independent of cachability
55 */
56 if (PageHighMem(page) || need_coh)
57 need_kvaddr = 1;
58 39
59 /* This is linear addr (0x8000_0000 based) */ 40 /* This is linear addr (0x8000_0000 based) */
60 paddr = page_to_phys(page); 41 paddr = page_to_phys(page);
61 42
62 *dma_handle = paddr; 43 *dma_handle = paddr;
63 44
64 /* This is kernel Virtual address (0x7000_0000 based) */ 45 /*
65 if (need_kvaddr) { 46 * A coherent buffer needs MMU mapping to enforce non-cachability.
47 * kvaddr is kernel Virtual address (0x7000_0000 based).
48 */
49 if (need_coh) {
66 kvaddr = ioremap_nocache(paddr, size); 50 kvaddr = ioremap_nocache(paddr, size);
67 if (kvaddr == NULL) { 51 if (kvaddr == NULL) {
68 __free_pages(page, order); 52 __free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
93{ 77{
94 phys_addr_t paddr = dma_handle; 78 phys_addr_t paddr = dma_handle;
95 struct page *page = virt_to_page(paddr); 79 struct page *page = virt_to_page(paddr);
96 int is_non_coh = 1;
97
98 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
99 (is_isa_arcv2() && ioc_enable);
100 80
101 if (PageHighMem(page) || !is_non_coh) 81 if (!(attrs & DMA_ATTR_NON_CONSISTENT))
102 iounmap((void __force __iomem *)vaddr); 82 iounmap((void __force __iomem *)vaddr);
103 83
104 __free_pages(page, get_order(size)); 84 __free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
185 break; 165 break;
186 } 166 }
187} 167}
168
169/*
170 * Plug in coherent or noncoherent dma ops
171 */
172void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
173 const struct iommu_ops *iommu, bool coherent)
174{
175 /*
176 * IOC hardware snoops all DMA traffic keeping the caches consistent
177 * with memory - eliding need for any explicit cache maintenance of
178 * DMA buffers - so we can use dma_direct cache ops.
179 */
180 if (is_isa_arcv2() && ioc_enable && coherent) {
181 set_dma_ops(dev, &dma_direct_ops);
182 dev_info(dev, "use dma_direct_ops cache ops\n");
183 } else {
184 set_dma_ops(dev, &dma_noncoherent_ops);
185 dev_info(dev, "use dma_noncoherent_ops cache ops\n");
186 }
187}
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index 4d969013f99a..4d969013f99a 100755..100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index f0cbd86312dc..d4b7c59eec68 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -469,6 +469,7 @@
469 ti,hwmods = "rtc"; 469 ti,hwmods = "rtc";
470 clocks = <&clk_32768_ck>; 470 clocks = <&clk_32768_ck>;
471 clock-names = "int-clk"; 471 clock-names = "int-clk";
472 system-power-controller;
472 status = "disabled"; 473 status = "disabled";
473 }; 474 };
474 475
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index b10dccd0958f..3b1baa8605a7 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -11,6 +11,7 @@
11#include "sama5d2-pinfunc.h" 11#include "sama5d2-pinfunc.h"
12#include <dt-bindings/mfd/atmel-flexcom.h> 12#include <dt-bindings/mfd/atmel-flexcom.h>
13#include <dt-bindings/gpio/gpio.h> 13#include <dt-bindings/gpio/gpio.h>
14#include <dt-bindings/pinctrl/at91.h>
14 15
15/ { 16/ {
16 model = "Atmel SAMA5D2 PTC EK"; 17 model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@
299 <PIN_PA30__NWE_NANDWE>, 300 <PIN_PA30__NWE_NANDWE>,
300 <PIN_PB2__NRD_NANDOE>; 301 <PIN_PB2__NRD_NANDOE>;
301 bias-pull-up; 302 bias-pull-up;
303 atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
302 }; 304 };
303 305
304 ale_cle_rdy_cs { 306 ale_cle_rdy_cs {
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index 43ee992ccdcf..6df61518776f 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -106,21 +106,23 @@
106 global_timer: timer@1e200 { 106 global_timer: timer@1e200 {
107 compatible = "arm,cortex-a9-global-timer"; 107 compatible = "arm,cortex-a9-global-timer";
108 reg = <0x1e200 0x20>; 108 reg = <0x1e200 0x20>;
109 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 109 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
110 clocks = <&axi_clk>; 110 clocks = <&axi_clk>;
111 }; 111 };
112 112
113 local_timer: local-timer@1e600 { 113 local_timer: local-timer@1e600 {
114 compatible = "arm,cortex-a9-twd-timer"; 114 compatible = "arm,cortex-a9-twd-timer";
115 reg = <0x1e600 0x20>; 115 reg = <0x1e600 0x20>;
116 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 116 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
117 IRQ_TYPE_EDGE_RISING)>;
117 clocks = <&axi_clk>; 118 clocks = <&axi_clk>;
118 }; 119 };
119 120
120 twd_watchdog: watchdog@1e620 { 121 twd_watchdog: watchdog@1e620 {
121 compatible = "arm,cortex-a9-twd-wdt"; 122 compatible = "arm,cortex-a9-twd-wdt";
122 reg = <0x1e620 0x20>; 123 reg = <0x1e620 0x20>;
123 interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; 124 interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
125 IRQ_TYPE_LEVEL_HIGH)>;
124 }; 126 };
125 127
126 armpll: armpll { 128 armpll: armpll {
@@ -158,7 +160,7 @@
158 serial0: serial@600 { 160 serial0: serial@600 {
159 compatible = "brcm,bcm6345-uart"; 161 compatible = "brcm,bcm6345-uart";
160 reg = <0x600 0x1b>; 162 reg = <0x600 0x1b>;
161 interrupts = <GIC_SPI 32 0>; 163 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
162 clocks = <&periph_clk>; 164 clocks = <&periph_clk>;
163 clock-names = "periph"; 165 clock-names = "periph";
164 status = "disabled"; 166 status = "disabled";
@@ -167,7 +169,7 @@
167 serial1: serial@620 { 169 serial1: serial@620 {
168 compatible = "brcm,bcm6345-uart"; 170 compatible = "brcm,bcm6345-uart";
169 reg = <0x620 0x1b>; 171 reg = <0x620 0x1b>;
170 interrupts = <GIC_SPI 33 0>; 172 interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
171 clocks = <&periph_clk>; 173 clocks = <&periph_clk>;
172 clock-names = "periph"; 174 clock-names = "periph";
173 status = "disabled"; 175 status = "disabled";
@@ -180,7 +182,7 @@
180 reg = <0x2000 0x600>, <0xf0 0x10>; 182 reg = <0x2000 0x600>, <0xf0 0x10>;
181 reg-names = "nand", "nand-int-base"; 183 reg-names = "nand", "nand-int-base";
182 status = "disabled"; 184 status = "disabled";
183 interrupts = <GIC_SPI 38 0>; 185 interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
184 interrupt-names = "nand"; 186 interrupt-names = "nand";
185 }; 187 };
186 188
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 9fb47724b9c1..ad2ae25b7b4d 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -13,6 +13,43 @@
13 reg = <0x40000000 0x08000000>; 13 reg = <0x40000000 0x08000000>;
14 }; 14 };
15 15
16 reg_vddio_sd0: regulator-vddio-sd0 {
17 compatible = "regulator-fixed";
18 regulator-name = "vddio-sd0";
19 regulator-min-microvolt = <3300000>;
20 regulator-max-microvolt = <3300000>;
21 gpio = <&gpio1 29 0>;
22 };
23
24 reg_lcd_3v3: regulator-lcd-3v3 {
25 compatible = "regulator-fixed";
26 regulator-name = "lcd-3v3";
27 regulator-min-microvolt = <3300000>;
28 regulator-max-microvolt = <3300000>;
29 gpio = <&gpio1 18 0>;
30 enable-active-high;
31 };
32
33 reg_lcd_5v: regulator-lcd-5v {
34 compatible = "regulator-fixed";
35 regulator-name = "lcd-5v";
36 regulator-min-microvolt = <5000000>;
37 regulator-max-microvolt = <5000000>;
38 };
39
40 panel {
41 compatible = "sii,43wvf1g";
42 backlight = <&backlight_display>;
43 dvdd-supply = <&reg_lcd_3v3>;
44 avdd-supply = <&reg_lcd_5v>;
45
46 port {
47 panel_in: endpoint {
48 remote-endpoint = <&display_out>;
49 };
50 };
51 };
52
16 apb@80000000 { 53 apb@80000000 {
17 apbh@80000000 { 54 apbh@80000000 {
18 gpmi-nand@8000c000 { 55 gpmi-nand@8000c000 {
@@ -52,31 +89,11 @@
52 lcdif@80030000 { 89 lcdif@80030000 {
53 pinctrl-names = "default"; 90 pinctrl-names = "default";
54 pinctrl-0 = <&lcdif_24bit_pins_a>; 91 pinctrl-0 = <&lcdif_24bit_pins_a>;
55 lcd-supply = <&reg_lcd_3v3>;
56 display = <&display0>;
57 status = "okay"; 92 status = "okay";
58 93
59 display0: display0 { 94 port {
60 bits-per-pixel = <32>; 95 display_out: endpoint {
61 bus-width = <24>; 96 remote-endpoint = <&panel_in>;
62
63 display-timings {
64 native-mode = <&timing0>;
65 timing0: timing0 {
66 clock-frequency = <9200000>;
67 hactive = <480>;
68 vactive = <272>;
69 hback-porch = <15>;
70 hfront-porch = <8>;
71 vback-porch = <12>;
72 vfront-porch = <4>;
73 hsync-len = <1>;
74 vsync-len = <1>;
75 hsync-active = <0>;
76 vsync-active = <0>;
77 de-active = <1>;
78 pixelclk-active = <0>;
79 };
80 }; 97 };
81 }; 98 };
82 }; 99 };
@@ -118,32 +135,7 @@
118 }; 135 };
119 }; 136 };
120 137
121 regulators { 138 backlight_display: backlight {
122 compatible = "simple-bus";
123 #address-cells = <1>;
124 #size-cells = <0>;
125
126 reg_vddio_sd0: regulator@0 {
127 compatible = "regulator-fixed";
128 reg = <0>;
129 regulator-name = "vddio-sd0";
130 regulator-min-microvolt = <3300000>;
131 regulator-max-microvolt = <3300000>;
132 gpio = <&gpio1 29 0>;
133 };
134
135 reg_lcd_3v3: regulator@1 {
136 compatible = "regulator-fixed";
137 reg = <1>;
138 regulator-name = "lcd-3v3";
139 regulator-min-microvolt = <3300000>;
140 regulator-max-microvolt = <3300000>;
141 gpio = <&gpio1 18 0>;
142 enable-active-high;
143 };
144 };
145
146 backlight {
147 compatible = "pwm-backlight"; 139 compatible = "pwm-backlight";
148 pwms = <&pwm 2 5000000>; 140 pwms = <&pwm 2 5000000>;
149 brightness-levels = <0 4 8 16 32 64 128 255>; 141 brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 6b0ae667640f..93ab5bdfe068 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -13,6 +13,87 @@
13 reg = <0x40000000 0x08000000>; 13 reg = <0x40000000 0x08000000>;
14 }; 14 };
15 15
16
17 reg_3p3v: regulator-3p3v {
18 compatible = "regulator-fixed";
19 regulator-name = "3P3V";
20 regulator-min-microvolt = <3300000>;
21 regulator-max-microvolt = <3300000>;
22 regulator-always-on;
23 };
24
25 reg_vddio_sd0: regulator-vddio-sd0 {
26 compatible = "regulator-fixed";
27 regulator-name = "vddio-sd0";
28 regulator-min-microvolt = <3300000>;
29 regulator-max-microvolt = <3300000>;
30 gpio = <&gpio3 28 0>;
31 };
32
33 reg_fec_3v3: regulator-fec-3v3 {
34 compatible = "regulator-fixed";
35 regulator-name = "fec-3v3";
36 regulator-min-microvolt = <3300000>;
37 regulator-max-microvolt = <3300000>;
38 gpio = <&gpio2 15 0>;
39 };
40
41 reg_usb0_vbus: regulator-usb0-vbus {
42 compatible = "regulator-fixed";
43 regulator-name = "usb0_vbus";
44 regulator-min-microvolt = <5000000>;
45 regulator-max-microvolt = <5000000>;
46 gpio = <&gpio3 9 0>;
47 enable-active-high;
48 };
49
50 reg_usb1_vbus: regulator-usb1-vbus {
51 compatible = "regulator-fixed";
52 regulator-name = "usb1_vbus";
53 regulator-min-microvolt = <5000000>;
54 regulator-max-microvolt = <5000000>;
55 gpio = <&gpio3 8 0>;
56 enable-active-high;
57 };
58
59 reg_lcd_3v3: regulator-lcd-3v3 {
60 compatible = "regulator-fixed";
61 regulator-name = "lcd-3v3";
62 regulator-min-microvolt = <3300000>;
63 regulator-max-microvolt = <3300000>;
64 gpio = <&gpio3 30 0>;
65 enable-active-high;
66 };
67
68 reg_can_3v3: regulator-can-3v3 {
69 compatible = "regulator-fixed";
70 regulator-name = "can-3v3";
71 regulator-min-microvolt = <3300000>;
72 regulator-max-microvolt = <3300000>;
73 gpio = <&gpio2 13 0>;
74 enable-active-high;
75 };
76
77 reg_lcd_5v: regulator-lcd-5v {
78 compatible = "regulator-fixed";
79 regulator-name = "lcd-5v";
80 regulator-min-microvolt = <5000000>;
81 regulator-max-microvolt = <5000000>;
82 };
83
84 panel {
85 compatible = "sii,43wvf1g";
86 backlight = <&backlight_display>;
87 dvdd-supply = <&reg_lcd_3v3>;
88 avdd-supply = <&reg_lcd_5v>;
89
90 port {
91 panel_in: endpoint {
92 remote-endpoint = <&display_out>;
93 };
94 };
95 };
96
16 apb@80000000 { 97 apb@80000000 {
17 apbh@80000000 { 98 apbh@80000000 {
18 gpmi-nand@8000c000 { 99 gpmi-nand@8000c000 {
@@ -116,31 +197,11 @@
116 pinctrl-names = "default"; 197 pinctrl-names = "default";
117 pinctrl-0 = <&lcdif_24bit_pins_a 198 pinctrl-0 = <&lcdif_24bit_pins_a
118 &lcdif_pins_evk>; 199 &lcdif_pins_evk>;
119 lcd-supply = <&reg_lcd_3v3>;
120 display = <&display0>;
121 status = "okay"; 200 status = "okay";
122 201
123 display0: display0 { 202 port {
124 bits-per-pixel = <32>; 203 display_out: endpoint {
125 bus-width = <24>; 204 remote-endpoint = <&panel_in>;
126
127 display-timings {
128 native-mode = <&timing0>;
129 timing0: timing0 {
130 clock-frequency = <33500000>;
131 hactive = <800>;
132 vactive = <480>;
133 hback-porch = <89>;
134 hfront-porch = <164>;
135 vback-porch = <23>;
136 vfront-porch = <10>;
137 hsync-len = <10>;
138 vsync-len = <10>;
139 hsync-active = <0>;
140 vsync-active = <0>;
141 de-active = <1>;
142 pixelclk-active = <0>;
143 };
144 }; 205 };
145 }; 206 };
146 }; 207 };
@@ -269,80 +330,6 @@
269 }; 330 };
270 }; 331 };
271 332
272 regulators {
273 compatible = "simple-bus";
274 #address-cells = <1>;
275 #size-cells = <0>;
276
277 reg_3p3v: regulator@0 {
278 compatible = "regulator-fixed";
279 reg = <0>;
280 regulator-name = "3P3V";
281 regulator-min-microvolt = <3300000>;
282 regulator-max-microvolt = <3300000>;
283 regulator-always-on;
284 };
285
286 reg_vddio_sd0: regulator@1 {
287 compatible = "regulator-fixed";
288 reg = <1>;
289 regulator-name = "vddio-sd0";
290 regulator-min-microvolt = <3300000>;
291 regulator-max-microvolt = <3300000>;
292 gpio = <&gpio3 28 0>;
293 };
294
295 reg_fec_3v3: regulator@2 {
296 compatible = "regulator-fixed";
297 reg = <2>;
298 regulator-name = "fec-3v3";
299 regulator-min-microvolt = <3300000>;
300 regulator-max-microvolt = <3300000>;
301 gpio = <&gpio2 15 0>;
302 };
303
304 reg_usb0_vbus: regulator@3 {
305 compatible = "regulator-fixed";
306 reg = <3>;
307 regulator-name = "usb0_vbus";
308 regulator-min-microvolt = <5000000>;
309 regulator-max-microvolt = <5000000>;
310 gpio = <&gpio3 9 0>;
311 enable-active-high;
312 };
313
314 reg_usb1_vbus: regulator@4 {
315 compatible = "regulator-fixed";
316 reg = <4>;
317 regulator-name = "usb1_vbus";
318 regulator-min-microvolt = <5000000>;
319 regulator-max-microvolt = <5000000>;
320 gpio = <&gpio3 8 0>;
321 enable-active-high;
322 };
323
324 reg_lcd_3v3: regulator@5 {
325 compatible = "regulator-fixed";
326 reg = <5>;
327 regulator-name = "lcd-3v3";
328 regulator-min-microvolt = <3300000>;
329 regulator-max-microvolt = <3300000>;
330 gpio = <&gpio3 30 0>;
331 enable-active-high;
332 };
333
334 reg_can_3v3: regulator@6 {
335 compatible = "regulator-fixed";
336 reg = <6>;
337 regulator-name = "can-3v3";
338 regulator-min-microvolt = <3300000>;
339 regulator-max-microvolt = <3300000>;
340 gpio = <&gpio2 13 0>;
341 enable-active-high;
342 };
343
344 };
345
346 sound { 333 sound {
347 compatible = "fsl,imx28-evk-sgtl5000", 334 compatible = "fsl,imx28-evk-sgtl5000",
348 "fsl,mxs-audio-sgtl5000"; 335 "fsl,mxs-audio-sgtl5000";
@@ -363,7 +350,7 @@
363 }; 350 };
364 }; 351 };
365 352
366 backlight { 353 backlight_display: backlight {
367 compatible = "pwm-backlight"; 354 compatible = "pwm-backlight";
368 pwms = <&pwm 2 5000000>; 355 pwms = <&pwm 2 5000000>;
369 brightness-levels = <0 4 8 16 32 64 128 255>; 356 brightness-levels = <0 4 8 16 32 64 128 255>;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 7cbc2ffa4b3a..7234e8330a57 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -126,10 +126,14 @@
126 interrupt-names = "msi"; 126 interrupt-names = "msi";
127 #interrupt-cells = <1>; 127 #interrupt-cells = <1>;
128 interrupt-map-mask = <0 0 0 0x7>; 128 interrupt-map-mask = <0 0 0 0x7>;
129 interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 129 /*
130 <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 130 * Reference manual lists pci irqs incorrectly
131 <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, 131 * Real hardware ordering is same as imx6: D+MSI, C, B, A
132 <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; 132 */
133 interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
134 <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
135 <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
136 <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
133 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, 137 clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
134 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, 138 <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
135 <&clks IMX7D_PCIE_PHY_ROOT_CLK>; 139 <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 12d6822f0057..04758a2a87f0 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -354,7 +354,7 @@
354&mmc2 { 354&mmc2 {
355 vmmc-supply = <&vsdio>; 355 vmmc-supply = <&vsdio>;
356 bus-width = <8>; 356 bus-width = <8>;
357 non-removable; 357 ti,non-removable;
358}; 358};
359 359
360&mmc3 { 360&mmc3 {
@@ -621,15 +621,6 @@
621 OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */ 621 OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1) /* abe_mcbsp3_fsx */
622 >; 622 >;
623 }; 623 };
624};
625
626&omap4_pmx_wkup {
627 usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
628 /* gpio_wk0 */
629 pinctrl-single,pins = <
630 OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
631 >;
632 };
633 624
634 vibrator_direction_pin: pinmux_vibrator_direction_pin { 625 vibrator_direction_pin: pinmux_vibrator_direction_pin {
635 pinctrl-single,pins = < 626 pinctrl-single,pins = <
@@ -644,6 +635,15 @@
644 }; 635 };
645}; 636};
646 637
638&omap4_pmx_wkup {
639 usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
640 /* gpio_wk0 */
641 pinctrl-single,pins = <
642 OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
643 >;
644 };
645};
646
647/* 647/*
648 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for 648 * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
649 * uart1 wakeirq. 649 * uart1 wakeirq.
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235ef0fb6..6e9e1c2f9def 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
41 }; 41 };
42 42
43 macb1: ethernet@f802c000 { 43 macb1: ethernet@f802c000 {
44 compatible = "cdns,at91sam9260-macb", "cdns,macb"; 44 compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
45 reg = <0xf802c000 0x100>; 45 reg = <0xf802c000 0x100>;
46 interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>; 46 interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
47 pinctrl-names = "default"; 47 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 661be948ab74..185541a5b69f 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -1078,8 +1078,8 @@
1078 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; 1078 interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
1079 clocks = <&rcc SPI6_K>; 1079 clocks = <&rcc SPI6_K>;
1080 resets = <&rcc SPI6_R>; 1080 resets = <&rcc SPI6_R>;
1081 dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>, 1081 dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
1082 <&mdma1 35 0x0 0x40002 0x0 0x0 0>; 1082 <&mdma1 35 0x0 0x40002 0x0 0x0>;
1083 dma-names = "rx", "tx"; 1083 dma-names = "rx", "tx";
1084 status = "disabled"; 1084 status = "disabled";
1085 }; 1085 };
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index ffd9f00f74a4..5f547c161baf 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -800,8 +800,7 @@
800 }; 800 };
801 801
802 hdmi_phy: hdmi-phy@1ef0000 { 802 hdmi_phy: hdmi-phy@1ef0000 {
803 compatible = "allwinner,sun8i-r40-hdmi-phy", 803 compatible = "allwinner,sun8i-r40-hdmi-phy";
804 "allwinner,sun50i-a64-hdmi-phy";
805 reg = <0x01ef0000 0x10000>; 804 reg = <0x01ef0000 0x10000>;
806 clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>, 805 clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
807 <&ccu 7>, <&ccu 16>; 806 <&ccu 7>, <&ccu 16>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index e2c127608bcc..7eca43ff69bb 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
257CONFIG_DRM=y 257CONFIG_DRM=y
258CONFIG_DRM_PANEL_LVDS=y 258CONFIG_DRM_PANEL_LVDS=y
259CONFIG_DRM_PANEL_SIMPLE=y 259CONFIG_DRM_PANEL_SIMPLE=y
260CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
260CONFIG_DRM_DW_HDMI_AHB_AUDIO=m 261CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
261CONFIG_DRM_DW_HDMI_CEC=y 262CONFIG_DRM_DW_HDMI_CEC=y
262CONFIG_DRM_IMX=y 263CONFIG_DRM_IMX=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 148226e36152..7b8212857535 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
95CONFIG_REGULATOR=y 95CONFIG_REGULATOR=y
96CONFIG_REGULATOR_FIXED_VOLTAGE=y 96CONFIG_REGULATOR_FIXED_VOLTAGE=y
97CONFIG_DRM=y 97CONFIG_DRM=y
98CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
98CONFIG_DRM_MXSFB=y 99CONFIG_DRM_MXSFB=y
99CONFIG_FB_MODE_HELPERS=y 100CONFIG_FB_MODE_HELPERS=y
100CONFIG_BACKLIGHT_LCD_SUPPORT=y 101CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index df68dc4056e5..5282324c7cef 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y 6CONFIG_BLK_DEV_INITRD=y
7CONFIG_SLAB=y 7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10CONFIG_PARTITION_ADVANCED=y
11# CONFIG_ARCH_MULTI_V7 is not set 8# CONFIG_ARCH_MULTI_V7 is not set
12CONFIG_ARCH_VERSATILE=y 9CONFIG_ARCH_VERSATILE=y
13CONFIG_AEABI=y 10CONFIG_AEABI=y
14CONFIG_OABI_COMPAT=y 11CONFIG_OABI_COMPAT=y
15CONFIG_CMA=y
16CONFIG_ZBOOT_ROM_TEXT=0x0 12CONFIG_ZBOOT_ROM_TEXT=0x0
17CONFIG_ZBOOT_ROM_BSS=0x0 13CONFIG_ZBOOT_ROM_BSS=0x0
18CONFIG_CMDLINE="root=1f03 mem=32M" 14CONFIG_CMDLINE="root=1f03 mem=32M"
19CONFIG_FPE_NWFPE=y 15CONFIG_FPE_NWFPE=y
20CONFIG_VFP=y 16CONFIG_VFP=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_PARTITION_ADVANCED=y
20CONFIG_CMA=y
21CONFIG_NET=y 21CONFIG_NET=y
22CONFIG_PACKET=y 22CONFIG_PACKET=y
23CONFIG_UNIX=y 23CONFIG_UNIX=y
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
59CONFIG_DRM=y 59CONFIG_DRM=y
60CONFIG_DRM_PANEL_ARM_VERSATILE=y 60CONFIG_DRM_PANEL_ARM_VERSATILE=y
61CONFIG_DRM_PANEL_SIMPLE=y 61CONFIG_DRM_PANEL_SIMPLE=y
62CONFIG_DRM_DUMB_VGA_DAC=y
62CONFIG_DRM_PL111=y 63CONFIG_DRM_PL111=y
63CONFIG_FB_MODE_HELPERS=y 64CONFIG_FB_MODE_HELPERS=y
64CONFIG_BACKLIGHT_LCD_SUPPORT=y 65CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -89,9 +90,10 @@ CONFIG_NFSD=y
89CONFIG_NFSD_V3=y 90CONFIG_NFSD_V3=y
90CONFIG_NLS_CODEPAGE_850=m 91CONFIG_NLS_CODEPAGE_850=m
91CONFIG_NLS_ISO8859_1=m 92CONFIG_NLS_ISO8859_1=m
93CONFIG_FONTS=y
94CONFIG_FONT_ACORN_8x8=y
95CONFIG_DEBUG_FS=y
92CONFIG_MAGIC_SYSRQ=y 96CONFIG_MAGIC_SYSRQ=y
93CONFIG_DEBUG_KERNEL=y 97CONFIG_DEBUG_KERNEL=y
94CONFIG_DEBUG_USER=y 98CONFIG_DEBUG_USER=y
95CONFIG_DEBUG_LL=y 99CONFIG_DEBUG_LL=y
96CONFIG_FONTS=y
97CONFIG_FONT_ACORN_8x8=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 79906cecb091..3ad482d2f1eb 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
223 struct kvm_vcpu_events *events); 223 struct kvm_vcpu_events *events);
224 224
225#define KVM_ARCH_WANT_MMU_NOTIFIER 225#define KVM_ARCH_WANT_MMU_NOTIFIER
226int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
227int kvm_unmap_hva_range(struct kvm *kvm, 226int kvm_unmap_hva_range(struct kvm *kvm,
228 unsigned long start, unsigned long end); 227 unsigned long start, unsigned long end);
229void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 228void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2ceffd85dd3d..cd65ea4e9c54 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2161} 2161}
2162 2162
2163/** 2163/**
2164 * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
2165 *
2166 * @oh: struct omap_hwmod *
2167 * @np: struct device_node *
2168 *
2169 * Fix up module register offsets for modules with mpu_rt_idx.
2170 * Only needed for cpsw with interconnect target module defined
2171 * in device tree while still using legacy hwmod platform data
2172 * for rev, sysc and syss registers.
2173 *
2174 * Can be removed when all cpsw hwmod platform data has been
2175 * dropped.
2176 */
2177static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
2178 struct device_node *np,
2179 struct resource *res)
2180{
2181 struct device_node *child = NULL;
2182 int error;
2183
2184 child = of_get_next_child(np, child);
2185 if (!child)
2186 return;
2187
2188 error = of_address_to_resource(child, oh->mpu_rt_idx, res);
2189 if (error)
2190 pr_err("%s: error mapping mpu_rt_idx: %i\n",
2191 __func__, error);
2192}
2193
2194/**
2164 * omap_hwmod_parse_module_range - map module IO range from device tree 2195 * omap_hwmod_parse_module_range - map module IO range from device tree
2165 * @oh: struct omap_hwmod * 2196 * @oh: struct omap_hwmod *
2166 * @np: struct device_node * 2197 * @np: struct device_node *
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
2220 size = be32_to_cpup(ranges); 2251 size = be32_to_cpup(ranges);
2221 2252
2222 pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n", 2253 pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
2223 oh->name, np->name, base, size); 2254 oh ? oh->name : "", np->name, base, size);
2255
2256 if (oh && oh->mpu_rt_idx) {
2257 omap_hwmod_fix_mpu_rt_idx(oh, np, res);
2258
2259 return 0;
2260 }
2224 2261
2225 res->start = base; 2262 res->start = base;
2226 res->end = base + size - 1; 2263 res->end = base + size - 1;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 29e75b47becd..1b1a0e95c751 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
763 763
764config HOLES_IN_ZONE 764config HOLES_IN_ZONE
765 def_bool y 765 def_bool y
766 depends on NUMA
767 766
768source kernel/Kconfig.hz 767source kernel/Kconfig.hz
769 768
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index ceffc40810ee..48daec7f78ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -46,6 +46,7 @@
46 pinctrl-0 = <&mmc0_pins>; 46 pinctrl-0 = <&mmc0_pins>;
47 vmmc-supply = <&reg_cldo1>; 47 vmmc-supply = <&reg_cldo1>;
48 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; 48 cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
49 bus-width = <4>;
49 status = "okay"; 50 status = "okay";
50}; 51};
51 52
@@ -56,6 +57,7 @@
56 vqmmc-supply = <&reg_bldo2>; 57 vqmmc-supply = <&reg_bldo2>;
57 non-removable; 58 non-removable;
58 cap-mmc-hw-reset; 59 cap-mmc-hw-reset;
60 bus-width = <8>;
59 status = "okay"; 61 status = "okay";
60}; 62};
61 63
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f67e8d5e93ad..db8d364f8476 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
38CONFIG_ARCH_BERLIN=y 38CONFIG_ARCH_BERLIN=y
39CONFIG_ARCH_BRCMSTB=y 39CONFIG_ARCH_BRCMSTB=y
40CONFIG_ARCH_EXYNOS=y 40CONFIG_ARCH_EXYNOS=y
41CONFIG_ARCH_K3=y
41CONFIG_ARCH_LAYERSCAPE=y 42CONFIG_ARCH_LAYERSCAPE=y
42CONFIG_ARCH_LG1K=y 43CONFIG_ARCH_LG1K=y
43CONFIG_ARCH_HISI=y 44CONFIG_ARCH_HISI=y
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
605CONFIG_ARCH_TEGRA_210_SOC=y 606CONFIG_ARCH_TEGRA_210_SOC=y
606CONFIG_ARCH_TEGRA_186_SOC=y 607CONFIG_ARCH_TEGRA_186_SOC=y
607CONFIG_ARCH_TEGRA_194_SOC=y 608CONFIG_ARCH_TEGRA_194_SOC=y
609CONFIG_ARCH_K3_AM6_SOC=y
610CONFIG_SOC_TI=y
608CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y 611CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
609CONFIG_EXTCON_USB_GPIO=y 612CONFIG_EXTCON_USB_GPIO=y
610CONFIG_EXTCON_USBC_CROS_EC=y 613CONFIG_EXTCON_USBC_CROS_EC=y
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 6e9f33d14930..067d8937d5af 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
417 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 417 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
418 put_unaligned_be32(2, iv + GCM_IV_SIZE); 418 put_unaligned_be32(2, iv + GCM_IV_SIZE);
419 419
420 while (walk.nbytes >= AES_BLOCK_SIZE) { 420 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
421 int blocks = walk.nbytes / AES_BLOCK_SIZE; 421 int blocks = walk.nbytes / AES_BLOCK_SIZE;
422 u8 *dst = walk.dst.virt.addr; 422 u8 *dst = walk.dst.virt.addr;
423 u8 *src = walk.src.virt.addr; 423 u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
437 NULL); 437 NULL);
438 438
439 err = skcipher_walk_done(&walk, 439 err = skcipher_walk_done(&walk,
440 walk.nbytes % AES_BLOCK_SIZE); 440 walk.nbytes % (2 * AES_BLOCK_SIZE));
441 } 441 }
442 if (walk.nbytes) 442 if (walk.nbytes) {
443 __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv, 443 __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
444 nrounds); 444 nrounds);
445 if (walk.nbytes > AES_BLOCK_SIZE) {
446 crypto_inc(iv, AES_BLOCK_SIZE);
447 __aes_arm64_encrypt(ctx->aes_key.key_enc,
448 ks + AES_BLOCK_SIZE, iv,
449 nrounds);
450 }
451 }
445 } 452 }
446 453
447 /* handle the tail */ 454 /* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
545 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); 552 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
546 put_unaligned_be32(2, iv + GCM_IV_SIZE); 553 put_unaligned_be32(2, iv + GCM_IV_SIZE);
547 554
548 while (walk.nbytes >= AES_BLOCK_SIZE) { 555 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
549 int blocks = walk.nbytes / AES_BLOCK_SIZE; 556 int blocks = walk.nbytes / AES_BLOCK_SIZE;
550 u8 *dst = walk.dst.virt.addr; 557 u8 *dst = walk.dst.virt.addr;
551 u8 *src = walk.src.virt.addr; 558 u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
564 } while (--blocks > 0); 571 } while (--blocks > 0);
565 572
566 err = skcipher_walk_done(&walk, 573 err = skcipher_walk_done(&walk,
567 walk.nbytes % AES_BLOCK_SIZE); 574 walk.nbytes % (2 * AES_BLOCK_SIZE));
568 } 575 }
569 if (walk.nbytes) 576 if (walk.nbytes) {
577 if (walk.nbytes > AES_BLOCK_SIZE) {
578 u8 *iv2 = iv + AES_BLOCK_SIZE;
579
580 memcpy(iv2, iv, AES_BLOCK_SIZE);
581 crypto_inc(iv2, AES_BLOCK_SIZE);
582
583 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
584 iv2, nrounds);
585 }
570 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv, 586 __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
571 nrounds); 587 nrounds);
588 }
572 } 589 }
573 590
574 /* handle the tail */ 591 /* handle the tail */
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index b7fb5274b250..0c4fc223f225 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
69 crypto_unregister_alg(&sm4_ce_alg); 69 crypto_unregister_alg(&sm4_ce_alg);
70} 70}
71 71
72module_cpu_feature_match(SM3, sm4_ce_mod_init); 72module_cpu_feature_match(SM4, sm4_ce_mod_init);
73module_exit(sm4_ce_mod_fini); 73module_exit(sm4_ce_mod_fini);
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e843c3a..7e2b3e360086 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
28 28
29static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 29static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30{ 30{
31 asm goto("1: nop\n\t" 31 asm_volatile_goto("1: nop\n\t"
32 ".pushsection __jump_table, \"aw\"\n\t" 32 ".pushsection __jump_table, \"aw\"\n\t"
33 ".align 3\n\t" 33 ".align 3\n\t"
34 ".quad 1b, %l[l_yes], %c0\n\t" 34 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
42 42
43static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) 43static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
44{ 44{
45 asm goto("1: b %l[l_yes]\n\t" 45 asm_volatile_goto("1: b %l[l_yes]\n\t"
46 ".pushsection __jump_table, \"aw\"\n\t" 46 ".pushsection __jump_table, \"aw\"\n\t"
47 ".align 3\n\t" 47 ".align 3\n\t"
48 ".quad 1b, %l[l_yes], %c0\n\t" 48 ".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f26055f2306e..3d6d7336f871 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -61,8 +61,7 @@ struct kvm_arch {
61 u64 vmid_gen; 61 u64 vmid_gen;
62 u32 vmid; 62 u32 vmid;
63 63
64 /* 1-level 2nd stage table and lock */ 64 /* 1-level 2nd stage table, protected by kvm->mmu_lock */
65 spinlock_t pgd_lock;
66 pgd_t *pgd; 65 pgd_t *pgd;
67 66
68 /* VTTBR value associated with above pgd and vmid */ 67 /* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
357 struct kvm_vcpu_events *events); 356 struct kvm_vcpu_events *events);
358 357
359#define KVM_ARCH_WANT_MMU_NOTIFIER 358#define KVM_ARCH_WANT_MMU_NOTIFIER
360int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
361int kvm_unmap_hva_range(struct kvm *kvm, 359int kvm_unmap_hva_range(struct kvm *kvm,
362 unsigned long start, unsigned long end); 360 unsigned long start, unsigned long end);
363void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 361void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 95ac7374d723..4c8b13bede80 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
54arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o 54arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
55arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o 55arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
56arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 56arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
57arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
57arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o 58arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
58arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o 59arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
59 60
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644
index 000000000000..ca4c3e12d8c5
--- /dev/null
+++ b/arch/arm64/kernel/crash_core.c
@@ -0,0 +1,19 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Linaro.
4 * Copyright (C) Huawei Futurewei Technologies.
5 */
6
7#include <linux/crash_core.h>
8#include <asm/memory.h>
9
10void arch_crash_save_vmcoreinfo(void)
11{
12 VMCOREINFO_NUMBER(VA_BITS);
13 /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
14 vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
15 kimage_voffset);
16 vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
17 PHYS_OFFSET);
18 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
19}
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f6a5c6bc1434..922add8adb74 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
358 } 358 }
359} 359}
360#endif /* CONFIG_HIBERNATION */ 360#endif /* CONFIG_HIBERNATION */
361
362void arch_crash_save_vmcoreinfo(void)
363{
364 VMCOREINFO_NUMBER(VA_BITS);
365 /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
366 vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
367 kimage_voffset);
368 vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
369 PHYS_OFFSET);
370 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
371}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 07256b08226c..a6c9fbaeaefc 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 57 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
58} 58}
59 59
60static int validate_core_offset(const struct kvm_one_reg *reg)
61{
62 u64 off = core_reg_offset_from_id(reg->id);
63 int size;
64
65 switch (off) {
66 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
67 KVM_REG_ARM_CORE_REG(regs.regs[30]):
68 case KVM_REG_ARM_CORE_REG(regs.sp):
69 case KVM_REG_ARM_CORE_REG(regs.pc):
70 case KVM_REG_ARM_CORE_REG(regs.pstate):
71 case KVM_REG_ARM_CORE_REG(sp_el1):
72 case KVM_REG_ARM_CORE_REG(elr_el1):
73 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
74 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
75 size = sizeof(__u64);
76 break;
77
78 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
79 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
80 size = sizeof(__uint128_t);
81 break;
82
83 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
84 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
85 size = sizeof(__u32);
86 break;
87
88 default:
89 return -EINVAL;
90 }
91
92 if (KVM_REG_SIZE(reg->id) == size &&
93 IS_ALIGNED(off, size / sizeof(__u32)))
94 return 0;
95
96 return -EINVAL;
97}
98
60static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 99static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
61{ 100{
62 /* 101 /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
76 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 115 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
77 return -ENOENT; 116 return -ENOENT;
78 117
118 if (validate_core_offset(reg))
119 return -EINVAL;
120
79 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 121 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
80 return -EFAULT; 122 return -EFAULT;
81 123
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
98 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 140 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
99 return -ENOENT; 141 return -ENOENT;
100 142
143 if (validate_core_offset(reg))
144 return -EINVAL;
145
101 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 146 if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
102 return -EINVAL; 147 return -EINVAL;
103 148
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
107 } 152 }
108 153
109 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 154 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
110 u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK; 155 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
111 switch (mode) { 156 switch (mode) {
112 case PSR_AA32_MODE_USR: 157 case PSR_AA32_MODE_USR:
158 if (!system_supports_32bit_el0())
159 return -EINVAL;
160 break;
113 case PSR_AA32_MODE_FIQ: 161 case PSR_AA32_MODE_FIQ:
114 case PSR_AA32_MODE_IRQ: 162 case PSR_AA32_MODE_IRQ:
115 case PSR_AA32_MODE_SVC: 163 case PSR_AA32_MODE_SVC:
116 case PSR_AA32_MODE_ABT: 164 case PSR_AA32_MODE_ABT:
117 case PSR_AA32_MODE_UND: 165 case PSR_AA32_MODE_UND:
166 if (!vcpu_el1_is_32bit(vcpu))
167 return -EINVAL;
168 break;
118 case PSR_MODE_EL0t: 169 case PSR_MODE_EL0t:
119 case PSR_MODE_EL1t: 170 case PSR_MODE_EL1t:
120 case PSR_MODE_EL1h: 171 case PSR_MODE_EL1h:
172 if (vcpu_el1_is_32bit(vcpu))
173 return -EINVAL;
121 break; 174 break;
122 default: 175 default:
123 err = -EINVAL; 176 err = -EINVAL;
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index d496ef579859..ca46153d7915 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
98 val = read_sysreg(cpacr_el1); 98 val = read_sysreg(cpacr_el1);
99 val |= CPACR_EL1_TTA; 99 val |= CPACR_EL1_TTA;
100 val &= ~CPACR_EL1_ZEN; 100 val &= ~CPACR_EL1_ZEN;
101 if (!update_fp_enabled(vcpu)) 101 if (!update_fp_enabled(vcpu)) {
102 val &= ~CPACR_EL1_FPEN; 102 val &= ~CPACR_EL1_FPEN;
103 __activate_traps_fpsimd32(vcpu);
104 }
103 105
104 write_sysreg(val, cpacr_el1); 106 write_sysreg(val, cpacr_el1);
105 107
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
114 116
115 val = CPTR_EL2_DEFAULT; 117 val = CPTR_EL2_DEFAULT;
116 val |= CPTR_EL2_TTA | CPTR_EL2_TZ; 118 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
117 if (!update_fp_enabled(vcpu)) 119 if (!update_fp_enabled(vcpu)) {
118 val |= CPTR_EL2_TFP; 120 val |= CPTR_EL2_TFP;
121 __activate_traps_fpsimd32(vcpu);
122 }
119 123
120 write_sysreg(val, cptr_el2); 124 write_sysreg(val, cptr_el2);
121} 125}
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
129 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) 133 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
130 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); 134 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
131 135
132 __activate_traps_fpsimd32(vcpu);
133 if (has_vhe()) 136 if (has_vhe())
134 activate_traps_vhe(vcpu); 137 activate_traps_vhe(vcpu);
135 else 138 else
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 192b3ba07075..f58ea503ad01 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
117 117
118 /* 118 /*
119 * If HW_AFDBM is enabled, then the HW could turn on 119 * If HW_AFDBM is enabled, then the HW could turn on
120 * the dirty bit for any page in the set, so check 120 * the dirty or accessed bit for any page in the set,
121 * them all. All hugetlb entries are already young. 121 * so check them all.
122 */ 122 */
123 if (pte_dirty(pte)) 123 if (pte_dirty(pte))
124 orig_pte = pte_mkdirty(orig_pte); 124 orig_pte = pte_mkdirty(orig_pte);
125
126 if (pte_young(pte))
127 orig_pte = pte_mkyoung(orig_pte);
125 } 128 }
126 129
127 if (valid) { 130 if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
320 return get_clear_flush(mm, addr, ptep, pgsize, ncontig); 323 return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
321} 324}
322 325
326/*
327 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
328 * and write permission.
329 *
330 * For a contiguous huge pte range we need to check whether or not write
331 * permission has to change only on the first pte in the set. Then for
332 * all the contiguous ptes we need to check whether or not there is a
333 * discrepancy between dirty or young.
334 */
335static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
336{
337 int i;
338
339 if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
340 return 1;
341
342 for (i = 0; i < ncontig; i++) {
343 pte_t orig_pte = huge_ptep_get(ptep + i);
344
345 if (pte_dirty(pte) != pte_dirty(orig_pte))
346 return 1;
347
348 if (pte_young(pte) != pte_young(orig_pte))
349 return 1;
350 }
351
352 return 0;
353}
354
323int huge_ptep_set_access_flags(struct vm_area_struct *vma, 355int huge_ptep_set_access_flags(struct vm_area_struct *vma,
324 unsigned long addr, pte_t *ptep, 356 unsigned long addr, pte_t *ptep,
325 pte_t pte, int dirty) 357 pte_t pte, int dirty)
326{ 358{
327 int ncontig, i, changed = 0; 359 int ncontig, i;
328 size_t pgsize = 0; 360 size_t pgsize = 0;
329 unsigned long pfn = pte_pfn(pte), dpfn; 361 unsigned long pfn = pte_pfn(pte), dpfn;
330 pgprot_t hugeprot; 362 pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
336 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); 368 ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
337 dpfn = pgsize >> PAGE_SHIFT; 369 dpfn = pgsize >> PAGE_SHIFT;
338 370
371 if (!__cont_access_flags_changed(ptep, pte, ncontig))
372 return 0;
373
339 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); 374 orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
340 if (!pte_same(orig_pte, pte))
341 changed = 1;
342 375
343 /* Make sure we don't lose the dirty state */ 376 /* Make sure we don't lose the dirty or young state */
344 if (pte_dirty(orig_pte)) 377 if (pte_dirty(orig_pte))
345 pte = pte_mkdirty(pte); 378 pte = pte_mkdirty(pte);
346 379
380 if (pte_young(orig_pte))
381 pte = pte_mkyoung(pte);
382
347 hugeprot = pte_pgprot(pte); 383 hugeprot = pte_pgprot(pte);
348 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) 384 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
349 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); 385 set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
350 386
351 return changed; 387 return 1;
352} 388}
353 389
354void huge_ptep_set_wrprotect(struct mm_struct *mm, 390void huge_ptep_set_wrprotect(struct mm_struct *mm,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 65f86271f02b..8080c9f489c3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
985 985
986 pmd = READ_ONCE(*pmdp); 986 pmd = READ_ONCE(*pmdp);
987 987
988 /* No-op for empty entry and WARN_ON for valid entry */ 988 if (!pmd_present(pmd))
989 if (!pmd_present(pmd) || !pmd_table(pmd)) { 989 return 1;
990 if (!pmd_table(pmd)) {
990 VM_WARN_ON(!pmd_table(pmd)); 991 VM_WARN_ON(!pmd_table(pmd));
991 return 1; 992 return 1;
992 } 993 }
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1007 1008
1008 pud = READ_ONCE(*pudp); 1009 pud = READ_ONCE(*pudp);
1009 1010
1010 /* No-op for empty entry and WARN_ON for valid entry */ 1011 if (!pud_present(pud))
1011 if (!pud_present(pud) || !pud_table(pud)) { 1012 return 1;
1013 if (!pud_table(pud)) {
1012 VM_WARN_ON(!pud_table(pud)); 1014 VM_WARN_ON(!pud_table(pud));
1013 return 1; 1015 return 1;
1014 } 1016 }
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b3ec1b..2691a1857d20 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@ static inline long ffz(int x)
211 * This is defined the same way as ffs. 211 * This is defined the same way as ffs.
212 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 212 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
213 */ 213 */
214static inline long fls(int x) 214static inline int fls(int x)
215{ 215{
216 int r; 216 int r;
217 217
@@ -232,7 +232,7 @@ static inline long fls(int x)
232 * the libc and compiler builtin ffs routines, therefore 232 * the libc and compiler builtin ffs routines, therefore
233 * differs in spirit from the above ffz (man ffs). 233 * differs in spirit from the above ffz (man ffs).
234 */ 234 */
235static inline long ffs(int x) 235static inline int ffs(int x)
236{ 236{
237 int r; 237 int r;
238 238
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 77459df34e2e..7ebe7ad19d15 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
60 panic("Can't create %s() memory pool!", __func__); 60 panic("Can't create %s() memory pool!", __func__);
61 else 61 else
62 gen_pool_add(coherent_pool, 62 gen_pool_add(coherent_pool,
63 pfn_to_virt(max_low_pfn), 63 (unsigned long)pfn_to_virt(max_low_pfn),
64 hexagon_coherent_pool_size, -1); 64 hexagon_coherent_pool_size, -1);
65 } 65 }
66 66
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 3534aa6a4dc2..1b083c500b9a 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
98 98
99 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) 99 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
100 return 0; 100 return 0;
101 while (!req.complete) 101 pmu_wait_complete(&req);
102 pmu_poll();
103 102
104 time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) | 103 time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
105 (req.reply[3] << 8) | req.reply[4]); 104 (req.reply[2] << 8) | req.reply[3]);
106 105
107 return time - RTC_OFFSET; 106 return time - RTC_OFFSET;
108} 107}
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
116 (data >> 24) & 0xFF, (data >> 16) & 0xFF, 115 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
117 (data >> 8) & 0xFF, data & 0xFF) < 0) 116 (data >> 8) & 0xFF, data & 0xFF) < 0)
118 return; 117 return;
119 while (!req.complete) 118 pmu_wait_complete(&req);
120 pmu_poll();
121} 119}
122 120
123static __u8 pmu_read_pram(int offset) 121static __u8 pmu_read_pram(int offset)
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 70dde040779b..f5453d944ff5 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
172 high_memory = (void *)_ramend; 172 high_memory = (void *)_ramend;
173 173
174 /* Reserve kernel text/data/bss */ 174 /* Reserve kernel text/data/bss */
175 memblock_reserve(memstart, memstart - _rambase); 175 memblock_reserve(_rambase, memstart - _rambase);
176 176
177 m68k_virt_to_node_shift = fls(_ramend - 1) - 6; 177 m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
178 module_fixup(NULL, __start_fixup, __stop_fixup); 178 module_fixup(NULL, __start_fixup, __stop_fixup);
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a9af1d2dcd69..2c1c53d12179 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
931 bool write); 931 bool write);
932 932
933#define KVM_ARCH_WANT_MMU_NOTIFIER 933#define KVM_ARCH_WANT_MMU_NOTIFIER
934int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
935int kvm_unmap_hva_range(struct kvm *kvm, 934int kvm_unmap_hva_range(struct kvm *kvm,
936 unsigned long start, unsigned long end); 935 unsigned long start, unsigned long end);
937void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 936void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 4901833498f7..8441b2698e64 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
40 int desc; /* the current descriptor */ 40 int desc; /* the current descriptor */
41 struct ltq_dma_desc *desc_base; /* the descriptor base */ 41 struct ltq_dma_desc *desc_base; /* the descriptor base */
42 int phys; /* physical addr */ 42 int phys; /* physical addr */
43 struct device *dev;
43}; 44};
44 45
45enum { 46enum {
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 019035d7225c..8f845f6e5f42 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -13,6 +13,7 @@
13#include <linux/err.h> 13#include <linux/err.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/ioport.h> 15#include <linux/ioport.h>
16#include <linux/kernel.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -20,6 +21,7 @@
20 21
21#include <asm/abi.h> 22#include <asm/abi.h>
22#include <asm/mips-cps.h> 23#include <asm/mips-cps.h>
24#include <asm/page.h>
23#include <asm/vdso.h> 25#include <asm/vdso.h>
24 26
25/* Kernel-provided data used by the VDSO. */ 27/* Kernel-provided data used by the VDSO. */
@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
128 vvar_size = gic_size + PAGE_SIZE; 130 vvar_size = gic_size + PAGE_SIZE;
129 size = vvar_size + image->size; 131 size = vvar_size + image->size;
130 132
133 /*
134 * Find a region that's large enough for us to perform the
135 * colour-matching alignment below.
136 */
137 if (cpu_has_dc_aliases)
138 size += shm_align_mask + 1;
139
131 base = get_unmapped_area(NULL, 0, size, 0, 0); 140 base = get_unmapped_area(NULL, 0, size, 0, 0);
132 if (IS_ERR_VALUE(base)) { 141 if (IS_ERR_VALUE(base)) {
133 ret = base; 142 ret = base;
134 goto out; 143 goto out;
135 } 144 }
136 145
146 /*
147 * If we suffer from dcache aliasing, ensure that the VDSO data page
148 * mapping is coloured the same as the kernel's mapping of that memory.
149 * This ensures that when the kernel updates the VDSO data userland
150 * will observe it without requiring cache invalidations.
151 */
152 if (cpu_has_dc_aliases) {
153 base = __ALIGN_MASK(base, shm_align_mask);
154 base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
155 }
156
137 data_addr = base + gic_size; 157 data_addr = base + gic_size;
138 vdso_addr = data_addr + PAGE_SIZE; 158 vdso_addr = data_addr + PAGE_SIZE;
139 159
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index ee64db032793..d8dcdb350405 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
512 return 1; 512 return 1;
513} 513}
514 514
515int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
516{
517 unsigned long end = hva + PAGE_SIZE;
518
519 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
520
521 kvm_mips_callbacks->flush_shadow_all(kvm);
522 return 0;
523}
524
525int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 515int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
526{ 516{
527 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); 517 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 4b9fbb6744ad..664f2f7f55c1 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
130 unsigned long flags; 130 unsigned long flags;
131 131
132 ch->desc = 0; 132 ch->desc = 0;
133 ch->desc_base = dma_zalloc_coherent(NULL, 133 ch->desc_base = dma_zalloc_coherent(ch->dev,
134 LTQ_DESC_NUM * LTQ_DESC_SIZE, 134 LTQ_DESC_NUM * LTQ_DESC_SIZE,
135 &ch->phys, GFP_ATOMIC); 135 &ch->phys, GFP_ATOMIC);
136 136
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
182 if (!ch->desc_base) 182 if (!ch->desc_base)
183 return; 183 return;
184 ltq_dma_close(ch); 184 ltq_dma_close(ch);
185 dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, 185 dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
186 ch->desc_base, ch->phys); 186 ch->desc_base, ch->phys);
187} 187}
188EXPORT_SYMBOL_GPL(ltq_dma_free); 188EXPORT_SYMBOL_GPL(ltq_dma_free);
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 1d4248fa55e9..7068f341133d 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -40,6 +40,10 @@ config NDS32
40 select NO_IOPORT_MAP 40 select NO_IOPORT_MAP
41 select RTC_LIB 41 select RTC_LIB
42 select THREAD_INFO_IN_TASK 42 select THREAD_INFO_IN_TASK
43 select HAVE_FUNCTION_TRACER
44 select HAVE_FUNCTION_GRAPH_TRACER
45 select HAVE_FTRACE_MCOUNT_RECORD
46 select HAVE_DYNAMIC_FTRACE
43 help 47 help
44 Andes(nds32) Linux support. 48 Andes(nds32) Linux support.
45 49
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 63f4f173e5f4..3509fac10491 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
5 5
6comma = , 6comma = ,
7 7
8ifdef CONFIG_FUNCTION_TRACER
9arch-y += -malways-save-lp -mno-relax
10endif
11
8KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog) 12KBUILD_CFLAGS += $(call cc-option, -mno-sched-prolog-epilog)
9KBUILD_CFLAGS += -mcmodel=large 13KBUILD_CFLAGS += -mcmodel=large
10 14
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
index 56c479058802..f5f9cf7e0544 100644
--- a/arch/nds32/include/asm/elf.h
+++ b/arch/nds32/include/asm/elf.h
@@ -121,9 +121,9 @@ struct elf32_hdr;
121 */ 121 */
122#define ELF_CLASS ELFCLASS32 122#define ELF_CLASS ELFCLASS32
123#ifdef __NDS32_EB__ 123#ifdef __NDS32_EB__
124#define ELF_DATA ELFDATA2MSB; 124#define ELF_DATA ELFDATA2MSB
125#else 125#else
126#define ELF_DATA ELFDATA2LSB; 126#define ELF_DATA ELFDATA2LSB
127#endif 127#endif
128#define ELF_ARCH EM_NDS32 128#define ELF_ARCH EM_NDS32
129#define USE_ELF_CORE_DUMP 129#define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644
index 000000000000..2f96cc96aa35
--- /dev/null
+++ b/arch/nds32/include/asm/ftrace.h
@@ -0,0 +1,46 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __ASM_NDS32_FTRACE_H
4#define __ASM_NDS32_FTRACE_H
5
6#ifdef CONFIG_FUNCTION_TRACER
7
8#define HAVE_FUNCTION_GRAPH_FP_TEST
9
10#define MCOUNT_ADDR ((unsigned long)(_mcount))
11/* mcount call is composed of three instructions:
12 * sethi + ori + jral
13 */
14#define MCOUNT_INSN_SIZE 12
15
16extern void _mcount(unsigned long parent_ip);
17
18#ifdef CONFIG_DYNAMIC_FTRACE
19
20#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
21
22#ifdef __NDS32_EL__
23#define INSN_NOP 0x09000040
24#define INSN_SIZE(insn) (((insn & 0x00000080) == 0) ? 4 : 2)
25#define IS_SETHI(insn) ((insn & 0x000000fe) == 0x00000046)
26#define ENDIAN_CONVERT(insn) be32_to_cpu(insn)
27#else /* __NDS32_EB__ */
28#define INSN_NOP 0x40000009
29#define INSN_SIZE(insn) (((insn & 0x80000000) == 0) ? 4 : 2)
30#define IS_SETHI(insn) ((insn & 0xfe000000) == 0x46000000)
31#define ENDIAN_CONVERT(insn) (insn)
32#endif
33
34extern void _ftrace_caller(unsigned long parent_ip);
35static inline unsigned long ftrace_call_adjust(unsigned long addr)
36{
37 return addr;
38}
39struct dyn_arch_ftrace {
40};
41
42#endif /* CONFIG_DYNAMIC_FTRACE */
43
44#endif /* CONFIG_FUNCTION_TRACER */
45
46#endif /* __ASM_NDS32_FTRACE_H */
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h
index 19b19394a936..68c38151c3e4 100644
--- a/arch/nds32/include/asm/nds32.h
+++ b/arch/nds32/include/asm/nds32.h
@@ -17,6 +17,7 @@
17#else 17#else
18#define FP_OFFSET (-2) 18#define FP_OFFSET (-2)
19#endif 19#endif
20#define LP_OFFSET (-1)
20 21
21extern void __init early_trap_init(void); 22extern void __init early_trap_init(void);
22static inline void GIE_ENABLE(void) 23static inline void GIE_ENABLE(void)
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 18a009f3804d..362a32d9bd16 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -38,7 +38,7 @@ struct exception_table_entry {
38extern int fixup_exception(struct pt_regs *regs); 38extern int fixup_exception(struct pt_regs *regs);
39 39
40#define KERNEL_DS ((mm_segment_t) { ~0UL }) 40#define KERNEL_DS ((mm_segment_t) { ~0UL })
41#define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) 41#define USER_DS ((mm_segment_t) {TASK_SIZE - 1})
42 42
43#define get_ds() (KERNEL_DS) 43#define get_ds() (KERNEL_DS)
44#define get_fs() (current_thread_info()->addr_limit) 44#define get_fs() (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
49 current_thread_info()->addr_limit = fs; 49 current_thread_info()->addr_limit = fs;
50} 50}
51 51
52#define segment_eq(a, b) ((a) == (b)) 52#define segment_eq(a, b) ((a) == (b))
53 53
54#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size)) 54#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
55 55
56#define access_ok(type, addr, size) \ 56#define access_ok(type, addr, size) \
57 __range_ok((unsigned long)addr, (unsigned long)size) 57 __range_ok((unsigned long)addr, (unsigned long)size)
58/* 58/*
59 * Single-value transfer routines. They automatically use the right 59 * Single-value transfer routines. They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
75 * versions are void (ie, don't return a value as such). 75 * versions are void (ie, don't return a value as such).
76 */ 76 */
77 77
78#define get_user(x,p) \ 78#define get_user __get_user \
79({ \ 79
80 long __e = -EFAULT; \ 80#define __get_user(x, ptr) \
81 if(likely(access_ok(VERIFY_READ, p, sizeof(*p)))) { \
82 __e = __get_user(x,p); \
83 } else \
84 x = 0; \
85 __e; \
86})
87#define __get_user(x,ptr) \
88({ \ 81({ \
89 long __gu_err = 0; \ 82 long __gu_err = 0; \
90 __get_user_err((x),(ptr),__gu_err); \ 83 __get_user_check((x), (ptr), __gu_err); \
91 __gu_err; \ 84 __gu_err; \
92}) 85})
93 86
94#define __get_user_error(x,ptr,err) \ 87#define __get_user_error(x, ptr, err) \
95({ \ 88({ \
96 __get_user_err((x),(ptr),err); \ 89 __get_user_check((x), (ptr), (err)); \
97 (void) 0; \ 90 (void)0; \
98}) 91})
99 92
100#define __get_user_err(x,ptr,err) \ 93#define __get_user_check(x, ptr, err) \
94({ \
95 const __typeof__(*(ptr)) __user *__p = (ptr); \
96 might_fault(); \
97 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
98 __get_user_err((x), __p, (err)); \
99 } else { \
100 (x) = 0; (err) = -EFAULT; \
101 } \
102})
103
104#define __get_user_err(x, ptr, err) \
101do { \ 105do { \
102 unsigned long __gu_addr = (unsigned long)(ptr); \
103 unsigned long __gu_val; \ 106 unsigned long __gu_val; \
104 __chk_user_ptr(ptr); \ 107 __chk_user_ptr(ptr); \
105 switch (sizeof(*(ptr))) { \ 108 switch (sizeof(*(ptr))) { \
106 case 1: \ 109 case 1: \
107 __get_user_asm("lbi",__gu_val,__gu_addr,err); \ 110 __get_user_asm("lbi", __gu_val, (ptr), (err)); \
108 break; \ 111 break; \
109 case 2: \ 112 case 2: \
110 __get_user_asm("lhi",__gu_val,__gu_addr,err); \ 113 __get_user_asm("lhi", __gu_val, (ptr), (err)); \
111 break; \ 114 break; \
112 case 4: \ 115 case 4: \
113 __get_user_asm("lwi",__gu_val,__gu_addr,err); \ 116 __get_user_asm("lwi", __gu_val, (ptr), (err)); \
114 break; \ 117 break; \
115 case 8: \ 118 case 8: \
116 __get_user_asm_dword(__gu_val,__gu_addr,err); \ 119 __get_user_asm_dword(__gu_val, (ptr), (err)); \
117 break; \ 120 break; \
118 default: \ 121 default: \
119 BUILD_BUG(); \ 122 BUILD_BUG(); \
120 break; \ 123 break; \
121 } \ 124 } \
122 (x) = (__typeof__(*(ptr)))__gu_val; \ 125 (x) = (__force __typeof__(*(ptr)))__gu_val; \
123} while (0) 126} while (0)
124 127
125#define __get_user_asm(inst,x,addr,err) \ 128#define __get_user_asm(inst, x, addr, err) \
126 asm volatile( \ 129 __asm__ __volatile__ ( \
127 "1: "inst" %1,[%2]\n" \ 130 "1: "inst" %1,[%2]\n" \
128 "2:\n" \ 131 "2:\n" \
129 " .section .fixup,\"ax\"\n" \ 132 " .section .fixup,\"ax\"\n" \
130 " .align 2\n" \ 133 " .align 2\n" \
131 "3: move %0, %3\n" \ 134 "3: move %0, %3\n" \
132 " move %1, #0\n" \ 135 " move %1, #0\n" \
133 " b 2b\n" \ 136 " b 2b\n" \
134 " .previous\n" \ 137 " .previous\n" \
135 " .section __ex_table,\"a\"\n" \ 138 " .section __ex_table,\"a\"\n" \
136 " .align 3\n" \ 139 " .align 3\n" \
137 " .long 1b, 3b\n" \ 140 " .long 1b, 3b\n" \
138 " .previous" \ 141 " .previous" \
139 : "+r" (err), "=&r" (x) \ 142 : "+r" (err), "=&r" (x) \
140 : "r" (addr), "i" (-EFAULT) \ 143 : "r" (addr), "i" (-EFAULT) \
141 : "cc") 144 : "cc")
142 145
143#ifdef __NDS32_EB__ 146#ifdef __NDS32_EB__
144#define __gu_reg_oper0 "%H1" 147#define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do { \
149#endif 152#endif
150 153
151#define __get_user_asm_dword(x, addr, err) \ 154#define __get_user_asm_dword(x, addr, err) \
152 asm volatile( \ 155 __asm__ __volatile__ ( \
153 "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \ 156 "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n" \
154 "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \ 157 "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n" \
155 "3:\n" \ 158 "3:\n" \
156 " .section .fixup,\"ax\"\n" \ 159 " .section .fixup,\"ax\"\n" \
157 " .align 2\n" \ 160 " .align 2\n" \
158 "4: move %0, %3\n" \ 161 "4: move %0, %3\n" \
159 " b 3b\n" \ 162 " b 3b\n" \
160 " .previous\n" \ 163 " .previous\n" \
161 " .section __ex_table,\"a\"\n" \ 164 " .section __ex_table,\"a\"\n" \
162 " .align 3\n" \ 165 " .align 3\n" \
163 " .long 1b, 4b\n" \ 166 " .long 1b, 4b\n" \
164 " .long 2b, 4b\n" \ 167 " .long 2b, 4b\n" \
165 " .previous" \ 168 " .previous" \
166 : "+r"(err), "=&r"(x) \ 169 : "+r"(err), "=&r"(x) \
167 : "r"(addr), "i"(-EFAULT) \ 170 : "r"(addr), "i"(-EFAULT) \
168 : "cc") 171 : "cc")
169#define put_user(x,p) \ 172
170({ \ 173#define put_user __put_user \
171 long __e = -EFAULT; \ 174
172 if(likely(access_ok(VERIFY_WRITE, p, sizeof(*p)))) { \ 175#define __put_user(x, ptr) \
173 __e = __put_user(x,p); \
174 } \
175 __e; \
176})
177#define __put_user(x,ptr) \
178({ \ 176({ \
179 long __pu_err = 0; \ 177 long __pu_err = 0; \
180 __put_user_err((x),(ptr),__pu_err); \ 178 __put_user_err((x), (ptr), __pu_err); \
181 __pu_err; \ 179 __pu_err; \
182}) 180})
183 181
184#define __put_user_error(x,ptr,err) \ 182#define __put_user_error(x, ptr, err) \
183({ \
184 __put_user_err((x), (ptr), (err)); \
185 (void)0; \
186})
187
188#define __put_user_check(x, ptr, err) \
185({ \ 189({ \
186 __put_user_err((x),(ptr),err); \ 190 __typeof__(*(ptr)) __user *__p = (ptr); \
187 (void) 0; \ 191 might_fault(); \
192 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
193 __put_user_err((x), __p, (err)); \
194 } else { \
195 (err) = -EFAULT; \
196 } \
188}) 197})
189 198
190#define __put_user_err(x,ptr,err) \ 199#define __put_user_err(x, ptr, err) \
191do { \ 200do { \
192 unsigned long __pu_addr = (unsigned long)(ptr); \
193 __typeof__(*(ptr)) __pu_val = (x); \ 201 __typeof__(*(ptr)) __pu_val = (x); \
194 __chk_user_ptr(ptr); \ 202 __chk_user_ptr(ptr); \
195 switch (sizeof(*(ptr))) { \ 203 switch (sizeof(*(ptr))) { \
196 case 1: \ 204 case 1: \
197 __put_user_asm("sbi",__pu_val,__pu_addr,err); \ 205 __put_user_asm("sbi", __pu_val, (ptr), (err)); \
198 break; \ 206 break; \
199 case 2: \ 207 case 2: \
200 __put_user_asm("shi",__pu_val,__pu_addr,err); \ 208 __put_user_asm("shi", __pu_val, (ptr), (err)); \
201 break; \ 209 break; \
202 case 4: \ 210 case 4: \
203 __put_user_asm("swi",__pu_val,__pu_addr,err); \ 211 __put_user_asm("swi", __pu_val, (ptr), (err)); \
204 break; \ 212 break; \
205 case 8: \ 213 case 8: \
206 __put_user_asm_dword(__pu_val,__pu_addr,err); \ 214 __put_user_asm_dword(__pu_val, (ptr), (err)); \
207 break; \ 215 break; \
208 default: \ 216 default: \
209 BUILD_BUG(); \ 217 BUILD_BUG(); \
@@ -211,22 +219,22 @@ do { \
211 } \ 219 } \
212} while (0) 220} while (0)
213 221
214#define __put_user_asm(inst,x,addr,err) \ 222#define __put_user_asm(inst, x, addr, err) \
215 asm volatile( \ 223 __asm__ __volatile__ ( \
216 "1: "inst" %1,[%2]\n" \ 224 "1: "inst" %1,[%2]\n" \
217 "2:\n" \ 225 "2:\n" \
218 " .section .fixup,\"ax\"\n" \ 226 " .section .fixup,\"ax\"\n" \
219 " .align 2\n" \ 227 " .align 2\n" \
220 "3: move %0, %3\n" \ 228 "3: move %0, %3\n" \
221 " b 2b\n" \ 229 " b 2b\n" \
222 " .previous\n" \ 230 " .previous\n" \
223 " .section __ex_table,\"a\"\n" \ 231 " .section __ex_table,\"a\"\n" \
224 " .align 3\n" \ 232 " .align 3\n" \
225 " .long 1b, 3b\n" \ 233 " .long 1b, 3b\n" \
226 " .previous" \ 234 " .previous" \
227 : "+r" (err) \ 235 : "+r" (err) \
228 : "r" (x), "r" (addr), "i" (-EFAULT) \ 236 : "r" (x), "r" (addr), "i" (-EFAULT) \
229 : "cc") 237 : "cc")
230 238
231#ifdef __NDS32_EB__ 239#ifdef __NDS32_EB__
232#define __pu_reg_oper0 "%H2" 240#define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do { \
237#endif 245#endif
238 246
239#define __put_user_asm_dword(x, addr, err) \ 247#define __put_user_asm_dword(x, addr, err) \
240 asm volatile( \ 248 __asm__ __volatile__ ( \
241 "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \ 249 "\n1:\tswi " __pu_reg_oper0 ",[%1]\n" \
242 "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \ 250 "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n" \
243 "3:\n" \ 251 "3:\n" \
244 " .section .fixup,\"ax\"\n" \ 252 " .section .fixup,\"ax\"\n" \
245 " .align 2\n" \ 253 " .align 2\n" \
246 "4: move %0, %3\n" \ 254 "4: move %0, %3\n" \
247 " b 3b\n" \ 255 " b 3b\n" \
248 " .previous\n" \ 256 " .previous\n" \
249 " .section __ex_table,\"a\"\n" \ 257 " .section __ex_table,\"a\"\n" \
250 " .align 3\n" \ 258 " .align 3\n" \
251 " .long 1b, 4b\n" \ 259 " .long 1b, 4b\n" \
252 " .long 2b, 4b\n" \ 260 " .long 2b, 4b\n" \
253 " .previous" \ 261 " .previous" \
254 : "+r"(err) \ 262 : "+r"(err) \
255 : "r"(addr), "r"(x), "i"(-EFAULT) \ 263 : "r"(addr), "r"(x), "i"(-EFAULT) \
256 : "cc") 264 : "cc")
265
257extern unsigned long __arch_clear_user(void __user * addr, unsigned long n); 266extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
258extern long strncpy_from_user(char *dest, const char __user * src, long count); 267extern long strncpy_from_user(char *dest, const char __user * src, long count);
259extern __must_check long strlen_user(const char __user * str); 268extern __must_check long strlen_user(const char __user * str);
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile
index 42792743e8b9..27cded39fa66 100644
--- a/arch/nds32/kernel/Makefile
+++ b/arch/nds32/kernel/Makefile
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
21 21
22 22
23obj-y += vdso/ 23obj-y += vdso/
24
25obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
26
27ifdef CONFIG_FUNCTION_TRACER
28CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
29endif
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c
index 0c6d031a1c4a..0c5386e72098 100644
--- a/arch/nds32/kernel/atl2c.c
+++ b/arch/nds32/kernel/atl2c.c
@@ -9,7 +9,8 @@
9 9
10void __iomem *atl2c_base; 10void __iomem *atl2c_base;
11static const struct of_device_id atl2c_ids[] __initconst = { 11static const struct of_device_id atl2c_ids[] __initconst = {
12 {.compatible = "andestech,atl2c",} 12 {.compatible = "andestech,atl2c",},
13 {}
13}; 14};
14 15
15static int __init atl2c_of_init(void) 16static int __init atl2c_of_init(void)
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
index b8ae4e9a6b93..21a144071566 100644
--- a/arch/nds32/kernel/ex-entry.S
+++ b/arch/nds32/kernel/ex-entry.S
@@ -118,7 +118,7 @@ common_exception_handler:
118 /* interrupt */ 118 /* interrupt */
1192: 1192:
120#ifdef CONFIG_TRACE_IRQFLAGS 120#ifdef CONFIG_TRACE_IRQFLAGS
121 jal trace_hardirqs_off 121 jal __trace_hardirqs_off
122#endif 122#endif
123 move $r0, $sp 123 move $r0, $sp
124 sethi $lp, hi20(ret_from_intr) 124 sethi $lp, hi20(ret_from_intr)
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
index 03e4f7788a18..f00af92f7e22 100644
--- a/arch/nds32/kernel/ex-exit.S
+++ b/arch/nds32/kernel/ex-exit.S
@@ -138,8 +138,8 @@ no_work_pending:
138#ifdef CONFIG_TRACE_IRQFLAGS 138#ifdef CONFIG_TRACE_IRQFLAGS
139 lwi $p0, [$sp+(#IPSW_OFFSET)] 139 lwi $p0, [$sp+(#IPSW_OFFSET)]
140 andi $p0, $p0, #0x1 140 andi $p0, $p0, #0x1
141 la $r10, trace_hardirqs_off 141 la $r10, __trace_hardirqs_off
142 la $r9, trace_hardirqs_on 142 la $r9, __trace_hardirqs_on
143 cmovz $r9, $p0, $r10 143 cmovz $r9, $p0, $r10
144 jral $r9 144 jral $r9
145#endif 145#endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644
index 000000000000..a0a9679ad5de
--- /dev/null
+++ b/arch/nds32/kernel/ftrace.c
@@ -0,0 +1,309 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/ftrace.h>
4#include <linux/uaccess.h>
5#include <asm/cacheflush.h>
6
7#ifndef CONFIG_DYNAMIC_FTRACE
8extern void (*ftrace_trace_function)(unsigned long, unsigned long,
9 struct ftrace_ops*, struct pt_regs*);
10extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
11extern void ftrace_graph_caller(void);
12
13noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
14 struct ftrace_ops *op, struct pt_regs *regs)
15{
16 __asm__ (""); /* avoid to optimize as pure function */
17}
18
19noinline void _mcount(unsigned long parent_ip)
20{
21 /* save all state by the compiler prologue */
22
23 unsigned long ip = (unsigned long)__builtin_return_address(0);
24
25 if (ftrace_trace_function != ftrace_stub)
26 ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
27 NULL, NULL);
28
29#ifdef CONFIG_FUNCTION_GRAPH_TRACER
30 if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
31 || ftrace_graph_entry != ftrace_graph_entry_stub)
32 ftrace_graph_caller();
33#endif
34
35 /* restore all state by the compiler epilogue */
36}
37EXPORT_SYMBOL(_mcount);
38
39#else /* CONFIG_DYNAMIC_FTRACE */
40
41noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
42 struct ftrace_ops *op, struct pt_regs *regs)
43{
44 __asm__ (""); /* avoid to optimize as pure function */
45}
46
47noinline void __naked _mcount(unsigned long parent_ip)
48{
49 __asm__ (""); /* avoid to optimize as pure function */
50}
51EXPORT_SYMBOL(_mcount);
52
53#define XSTR(s) STR(s)
54#define STR(s) #s
55void _ftrace_caller(unsigned long parent_ip)
56{
57 /* save all state needed by the compiler prologue */
58
59 /*
60 * prepare arguments for real tracing function
61 * first arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
62 * second arg : parent_ip
63 */
64 __asm__ __volatile__ (
65 "move $r1, %0 \n\t"
66 "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
67 :
68 : "r" (parent_ip), "r" (__builtin_return_address(0)));
69
70 /* a placeholder for the call to a real tracing function */
71 __asm__ __volatile__ (
72 "ftrace_call: \n\t"
73 "nop \n\t"
74 "nop \n\t"
75 "nop \n\t");
76
77#ifdef CONFIG_FUNCTION_GRAPH_TRACER
78 /* a placeholder for the call to ftrace_graph_caller */
79 __asm__ __volatile__ (
80 "ftrace_graph_call: \n\t"
81 "nop \n\t"
82 "nop \n\t"
83 "nop \n\t");
84#endif
85 /* restore all state needed by the compiler epilogue */
86}
87
88int __init ftrace_dyn_arch_init(void)
89{
90 return 0;
91}
92
93int ftrace_arch_code_modify_prepare(void)
94{
95 set_all_modules_text_rw();
96 return 0;
97}
98
99int ftrace_arch_code_modify_post_process(void)
100{
101 set_all_modules_text_ro();
102 return 0;
103}
104
105static unsigned long gen_sethi_insn(unsigned long addr)
106{
107 unsigned long opcode = 0x46000000;
108 unsigned long imm = addr >> 12;
109 unsigned long rt_num = 0xf << 20;
110
111 return ENDIAN_CONVERT(opcode | rt_num | imm);
112}
113
114static unsigned long gen_ori_insn(unsigned long addr)
115{
116 unsigned long opcode = 0x58000000;
117 unsigned long imm = addr & 0x0000fff;
118 unsigned long rt_num = 0xf << 20;
119 unsigned long ra_num = 0xf << 15;
120
121 return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
122}
123
124static unsigned long gen_jral_insn(unsigned long addr)
125{
126 unsigned long opcode = 0x4a000001;
127 unsigned long rt_num = 0x1e << 20;
128 unsigned long rb_num = 0xf << 10;
129
130 return ENDIAN_CONVERT(opcode | rt_num | rb_num);
131}
132
133static void ftrace_gen_call_insn(unsigned long *call_insns,
134 unsigned long addr)
135{
136 call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u */
137 call_insns[1] = gen_ori_insn(addr); /* ori $r15, $r15, imm15u */
138 call_insns[2] = gen_jral_insn(addr); /* jral $lp, $r15 */
139}
140
141static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
142 unsigned long *new_insn, bool validate)
143{
144 unsigned long orig_insn[3];
145
146 if (validate) {
147 if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
148 return -EFAULT;
149 if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
150 return -EINVAL;
151 }
152
153 if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
154 return -EPERM;
155
156 return 0;
157}
158
159static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
160 unsigned long *new_insn, bool validate)
161{
162 int ret;
163
164 ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
165 if (ret)
166 return ret;
167
168 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
169
170 return ret;
171}
172
173int ftrace_update_ftrace_func(ftrace_func_t func)
174{
175 unsigned long pc = (unsigned long)&ftrace_call;
176 unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
177 unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
178
179 if (func != ftrace_stub)
180 ftrace_gen_call_insn(new_insn, (unsigned long)func);
181
182 return ftrace_modify_code(pc, old_insn, new_insn, false);
183}
184
185int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
186{
187 unsigned long pc = rec->ip;
188 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
189 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
190
191 ftrace_gen_call_insn(call_insn, addr);
192
193 return ftrace_modify_code(pc, nop_insn, call_insn, true);
194}
195
196int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
197 unsigned long addr)
198{
199 unsigned long pc = rec->ip;
200 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
201 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
202
203 ftrace_gen_call_insn(call_insn, addr);
204
205 return ftrace_modify_code(pc, call_insn, nop_insn, true);
206}
207#endif /* CONFIG_DYNAMIC_FTRACE */
208
209#ifdef CONFIG_FUNCTION_GRAPH_TRACER
210void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
211 unsigned long frame_pointer)
212{
213 unsigned long return_hooker = (unsigned long)&return_to_handler;
214 struct ftrace_graph_ent trace;
215 unsigned long old;
216 int err;
217
218 if (unlikely(atomic_read(&current->tracing_graph_pause)))
219 return;
220
221 old = *parent;
222
223 trace.func = self_addr;
224 trace.depth = current->curr_ret_stack + 1;
225
226 /* Only trace if the calling function expects to */
227 if (!ftrace_graph_entry(&trace))
228 return;
229
230 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
231 frame_pointer, NULL);
232
233 if (err == -EBUSY)
234 return;
235
236 *parent = return_hooker;
237}
238
239noinline void ftrace_graph_caller(void)
240{
241 unsigned long *parent_ip =
242 (unsigned long *)(__builtin_frame_address(2) - 4);
243
244 unsigned long selfpc =
245 (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
246
247 unsigned long frame_pointer =
248 (unsigned long)__builtin_frame_address(3);
249
250 prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
251}
252
253extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
254void __naked return_to_handler(void)
255{
256 __asm__ __volatile__ (
257 /* save state needed by the ABI */
258 "smw.adm $r0,[$sp],$r1,#0x0 \n\t"
259
260 /* get original return address */
261 "move $r0, $fp \n\t"
262 "bal ftrace_return_to_handler\n\t"
263 "move $lp, $r0 \n\t"
264
265 /* restore state nedded by the ABI */
266 "lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
267}
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270extern unsigned long ftrace_graph_call;
271
272static int ftrace_modify_graph_caller(bool enable)
273{
274 unsigned long pc = (unsigned long)&ftrace_graph_call;
275 unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
276 unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
277
278 ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
279
280 if (enable)
281 return ftrace_modify_code(pc, nop_insn, call_insn, true);
282 else
283 return ftrace_modify_code(pc, call_insn, nop_insn, true);
284}
285
286int ftrace_enable_ftrace_graph_caller(void)
287{
288 return ftrace_modify_graph_caller(true);
289}
290
291int ftrace_disable_ftrace_graph_caller(void)
292{
293 return ftrace_modify_graph_caller(false);
294}
295#endif /* CONFIG_DYNAMIC_FTRACE */
296
297#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
298
299
300#ifdef CONFIG_TRACE_IRQFLAGS
301noinline void __trace_hardirqs_off(void)
302{
303 trace_hardirqs_off();
304}
305noinline void __trace_hardirqs_on(void)
306{
307 trace_hardirqs_on();
308}
309#endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
index 4167283d8293..1e31829cbc2a 100644
--- a/arch/nds32/kernel/module.c
+++ b/arch/nds32/kernel/module.c
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
40 40
41 tmp2 = tmp & loc_mask; 41 tmp2 = tmp & loc_mask;
42 if (partial_in_place) { 42 if (partial_in_place) {
43 tmp &= (!loc_mask); 43 tmp &= (~loc_mask);
44 tmp = 44 tmp =
45 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 45 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
46 } else { 46 } else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
70 70
71 tmp2 = tmp & loc_mask; 71 tmp2 = tmp & loc_mask;
72 if (partial_in_place) { 72 if (partial_in_place) {
73 tmp &= (!loc_mask); 73 tmp &= (~loc_mask);
74 tmp = 74 tmp =
75 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask); 75 tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
76 } else { 76 } else {
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
index 8b231e910ea6..d974c0c1c65f 100644
--- a/arch/nds32/kernel/stacktrace.c
+++ b/arch/nds32/kernel/stacktrace.c
@@ -4,6 +4,7 @@
4#include <linux/sched/debug.h> 4#include <linux/sched/debug.h>
5#include <linux/sched/task_stack.h> 5#include <linux/sched/task_stack.h>
6#include <linux/stacktrace.h> 6#include <linux/stacktrace.h>
7#include <linux/ftrace.h>
7 8
8void save_stack_trace(struct stack_trace *trace) 9void save_stack_trace(struct stack_trace *trace)
9{ 10{
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
16 unsigned long *fpn; 17 unsigned long *fpn;
17 int skip = trace->skip; 18 int skip = trace->skip;
18 int savesched; 19 int savesched;
20 int graph_idx = 0;
19 21
20 if (tsk == current) { 22 if (tsk == current) {
21 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn)); 23 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
29 && (fpn >= (unsigned long *)TASK_SIZE)) { 31 && (fpn >= (unsigned long *)TASK_SIZE)) {
30 unsigned long lpp, fpp; 32 unsigned long lpp, fpp;
31 33
32 lpp = fpn[-1]; 34 lpp = fpn[LP_OFFSET];
33 fpp = fpn[FP_OFFSET]; 35 fpp = fpn[FP_OFFSET];
34 if (!__kernel_text_address(lpp)) 36 if (!__kernel_text_address(lpp))
35 break; 37 break;
38 else
39 lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
36 40
37 if (savesched || !in_sched_functions(lpp)) { 41 if (savesched || !in_sched_functions(lpp)) {
38 if (skip) { 42 if (skip) {
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index a6205fd4db52..1496aab48998 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -8,6 +8,7 @@
8#include <linux/kdebug.h> 8#include <linux/kdebug.h>
9#include <linux/sched/task_stack.h> 9#include <linux/sched/task_stack.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
11 12
12#include <asm/proc-fns.h> 13#include <asm/proc-fns.h>
13#include <asm/unistd.h> 14#include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
94 set_fs(fs); 95 set_fs(fs);
95} 96}
96 97
97#ifdef CONFIG_FUNCTION_GRAPH_TRACER
98#include <linux/ftrace.h>
99static void
100get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
101{
102 if (*addr == (unsigned long)return_to_handler) {
103 int index = tsk->curr_ret_stack;
104
105 if (tsk->ret_stack && index >= *graph) {
106 index -= *graph;
107 *addr = tsk->ret_stack[index].ret;
108 (*graph)++;
109 }
110 }
111}
112#else
113static inline void
114get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
115{
116}
117#endif
118
119#define LOOP_TIMES (100) 98#define LOOP_TIMES (100)
120static void __dump(struct task_struct *tsk, unsigned long *base_reg) 99static void __dump(struct task_struct *tsk, unsigned long *base_reg)
121{ 100{
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
126 while (!kstack_end(base_reg)) { 105 while (!kstack_end(base_reg)) {
127 ret_addr = *base_reg++; 106 ret_addr = *base_reg++;
128 if (__kernel_text_address(ret_addr)) { 107 if (__kernel_text_address(ret_addr)) {
129 get_real_ret_addr(&ret_addr, tsk, &graph); 108 ret_addr = ftrace_graph_ret_addr(
109 tsk, &graph, ret_addr, NULL);
130 print_ip_sym(ret_addr); 110 print_ip_sym(ret_addr);
131 } 111 }
132 if (--cnt < 0) 112 if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
137 !((unsigned long)base_reg & 0x3) && 117 !((unsigned long)base_reg & 0x3) &&
138 ((unsigned long)base_reg >= TASK_SIZE)) { 118 ((unsigned long)base_reg >= TASK_SIZE)) {
139 unsigned long next_fp; 119 unsigned long next_fp;
140#if !defined(NDS32_ABI_2) 120 ret_addr = base_reg[LP_OFFSET];
141 ret_addr = base_reg[0];
142 next_fp = base_reg[1];
143#else
144 ret_addr = base_reg[-1];
145 next_fp = base_reg[FP_OFFSET]; 121 next_fp = base_reg[FP_OFFSET];
146#endif
147 if (__kernel_text_address(ret_addr)) { 122 if (__kernel_text_address(ret_addr)) {
148 get_real_ret_addr(&ret_addr, tsk, &graph); 123
124 ret_addr = ftrace_graph_ret_addr(
125 tsk, &graph, ret_addr, NULL);
149 print_ip_sym(ret_addr); 126 print_ip_sym(ret_addr);
150 } 127 }
151 if (--cnt < 0) 128 if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
196 pr_emerg("CPU: %i\n", smp_processor_id()); 173 pr_emerg("CPU: %i\n", smp_processor_id());
197 show_regs(regs); 174 show_regs(regs);
198 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n", 175 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
199 tsk->comm, tsk->pid, task_thread_info(tsk) + 1); 176 tsk->comm, tsk->pid, end_of_stack(tsk));
200 177
201 if (!user_mode(regs) || in_interrupt()) { 178 if (!user_mode(regs) || in_interrupt()) {
202 dump_mem("Stack: ", regs->sp, 179 dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
203 THREAD_SIZE + (unsigned long)task_thread_info(tsk));
204 dump_instr(regs); 180 dump_instr(regs);
205 dump_stack(); 181 dump_stack();
206 } 182 }
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
index 288313b886ef..9e90f30a181d 100644
--- a/arch/nds32/kernel/vmlinux.lds.S
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
13ENTRY(_stext_lma) 13ENTRY(_stext_lma)
14jiffies = jiffies_64; 14jiffies = jiffies_64;
15 15
16#if defined(CONFIG_GCOV_KERNEL)
17#define NDS32_EXIT_KEEP(x) x
18#else
19#define NDS32_EXIT_KEEP(x)
20#endif
21
16SECTIONS 22SECTIONS
17{ 23{
18 _stext_lma = TEXTADDR - LOAD_OFFSET; 24 _stext_lma = TEXTADDR - LOAD_OFFSET;
19 . = TEXTADDR; 25 . = TEXTADDR;
20 __init_begin = .; 26 __init_begin = .;
21 HEAD_TEXT_SECTION 27 HEAD_TEXT_SECTION
28 .exit.text : {
29 NDS32_EXIT_KEEP(EXIT_TEXT)
30 }
22 INIT_TEXT_SECTION(PAGE_SIZE) 31 INIT_TEXT_SECTION(PAGE_SIZE)
23 INIT_DATA_SECTION(16) 32 INIT_DATA_SECTION(16)
33 .exit.data : {
34 NDS32_EXIT_KEEP(EXIT_DATA)
35 }
24 PERCPU_SECTION(L1_CACHE_BYTES) 36 PERCPU_SECTION(L1_CACHE_BYTES)
25 __init_end = .; 37 __init_end = .;
26 38
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index 7a49f0d28d14..f1da8a7b17ff 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -3,15 +3,6 @@
3config TRACE_IRQFLAGS_SUPPORT 3config TRACE_IRQFLAGS_SUPPORT
4 def_bool y 4 def_bool y
5 5
6config DEBUG_STACK_USAGE
7 bool "Enable stack utilization instrumentation"
8 depends on DEBUG_KERNEL
9 help
10 Enables the display of the minimum amount of free stack which each
11 task has ever had available in the sysrq-T and sysrq-P debug output.
12
13 This option will slow down process creation somewhat.
14
15config EARLY_PRINTK 6config EARLY_PRINTK
16 bool "Activate early kernel debugging" 7 bool "Activate early kernel debugging"
17 default y 8 default y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index db0b6eebbfa5..a80669209155 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -177,7 +177,6 @@ config PPC
177 select HAVE_ARCH_KGDB 177 select HAVE_ARCH_KGDB
178 select HAVE_ARCH_MMAP_RND_BITS 178 select HAVE_ARCH_MMAP_RND_BITS
179 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT 179 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
180 select HAVE_ARCH_PREL32_RELOCATIONS
181 select HAVE_ARCH_SECCOMP_FILTER 180 select HAVE_ARCH_SECCOMP_FILTER
182 select HAVE_ARCH_TRACEHOOK 181 select HAVE_ARCH_TRACEHOOK
183 select HAVE_CBPF_JIT if !PPC64 182 select HAVE_CBPF_JIT if !PPC64
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 13a688fc8cd0..2fdc865ca374 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
1051 return hash__vmemmap_remove_mapping(start, page_size); 1051 return hash__vmemmap_remove_mapping(start, page_size);
1052} 1052}
1053#endif 1053#endif
1054struct page *realmode_pfn_to_page(unsigned long pfn);
1055 1054
1056static inline pte_t pmd_pte(pmd_t pmd) 1055static inline pte_t pmd_pte(pmd_t pmd)
1057{ 1056{
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..3d4b88cb8599 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
220extern int __init tce_iommu_bus_notifier_init(void); 220extern int __init tce_iommu_bus_notifier_init(void);
221extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, 221extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
222 unsigned long *hpa, enum dma_data_direction *direction); 222 unsigned long *hpa, enum dma_data_direction *direction);
223extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
224 unsigned long *hpa, enum dma_data_direction *direction);
225#else 223#else
226static inline void iommu_register_group(struct iommu_table_group *table_group, 224static inline void iommu_register_group(struct iommu_table_group *table_group,
227 int pci_domain_number, 225 int pci_domain_number,
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b2f89b621b15..b694d6af1150 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
38 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 38 unsigned long ua, unsigned int pageshift, unsigned long *hpa);
39extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, 39extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
40 unsigned long ua, unsigned int pageshift, unsigned long *hpa); 40 unsigned long ua, unsigned int pageshift, unsigned long *hpa);
41extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
41extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); 42extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
42extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); 43extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
43#endif 44#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 1a951b00465d..1fffbba8d6a5 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
9 9
10extern unsigned int rtas_data; 10extern unsigned int rtas_data;
11extern unsigned long long memory_limit; 11extern unsigned long long memory_limit;
12extern bool init_mem_is_free;
12extern unsigned long klimit; 13extern unsigned long klimit;
13extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
14 15
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ea04dfb8c092..2d8fc8c9da7a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1314 1314
1315#ifdef CONFIG_PPC_DENORMALISATION 1315#ifdef CONFIG_PPC_DENORMALISATION
1316 mfspr r10,SPRN_HSRR1 1316 mfspr r10,SPRN_HSRR1
1317 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
1318 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ 1317 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
1319 addi r11,r11,-4 /* HSRR0 is next instruction */
1320 bne+ denorm_assist 1318 bne+ denorm_assist
1321#endif 1319#endif
1322 1320
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1382 */ 1380 */
1383 XVCPSGNDP32(32) 1381 XVCPSGNDP32(32)
1384denorm_done: 1382denorm_done:
1383 mfspr r11,SPRN_HSRR0
1384 subi r11,r11,4
1385 mtspr SPRN_HSRR0,r11 1385 mtspr SPRN_HSRR0,r11
1386 mtcrf 0x80,r9 1386 mtcrf 0x80,r9
1387 ld r9,PACA_EXGEN+EX_R9(r13) 1387 ld r9,PACA_EXGEN+EX_R9(r13)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index af7a20dc6e09..19b4c628f3be 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
1013} 1013}
1014EXPORT_SYMBOL_GPL(iommu_tce_xchg); 1014EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1015 1015
1016#ifdef CONFIG_PPC_BOOK3S_64
1017long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
1018 unsigned long *hpa, enum dma_data_direction *direction)
1019{
1020 long ret;
1021
1022 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1023
1024 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1025 (*direction == DMA_BIDIRECTIONAL))) {
1026 struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
1027
1028 if (likely(pg)) {
1029 SetPageDirty(pg);
1030 } else {
1031 tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
1032 ret = -EFAULT;
1033 }
1034 }
1035
1036 return ret;
1037}
1038EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
1039#endif
1040
1041int iommu_take_ownership(struct iommu_table *tbl) 1016int iommu_take_ownership(struct iommu_table *tbl)
1042{ 1017{
1043 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; 1018 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 6bffbc5affe7..7716374786bd 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
176 std r1, PACATMSCRATCH(r13) 176 std r1, PACATMSCRATCH(r13)
177 ld r1, PACAR1(r13) 177 ld r1, PACAR1(r13)
178 178
179 /* Store the PPR in r11 and reset to decent value */
180 std r11, GPR11(r1) /* Temporary stash */ 179 std r11, GPR11(r1) /* Temporary stash */
181 180
181 /*
182 * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
183 * clobbered by an exception once we turn on MSR_RI below.
184 */
185 ld r11, PACATMSCRATCH(r13)
186 std r11, GPR1(r1)
187
188 /*
189 * Store r13 away so we can free up the scratch SPR for the SLB fault
190 * handler (needed once we start accessing the thread_struct).
191 */
192 GET_SCRATCH0(r11)
193 std r11, GPR13(r1)
194
182 /* Reset MSR RI so we can take SLB faults again */ 195 /* Reset MSR RI so we can take SLB faults again */
183 li r11, MSR_RI 196 li r11, MSR_RI
184 mtmsrd r11, 1 197 mtmsrd r11, 1
185 198
199 /* Store the PPR in r11 and reset to decent value */
186 mfspr r11, SPRN_PPR 200 mfspr r11, SPRN_PPR
187 HMT_MEDIUM 201 HMT_MEDIUM
188 202
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
207 SAVE_GPR(8, r7) /* user r8 */ 221 SAVE_GPR(8, r7) /* user r8 */
208 SAVE_GPR(9, r7) /* user r9 */ 222 SAVE_GPR(9, r7) /* user r9 */
209 SAVE_GPR(10, r7) /* user r10 */ 223 SAVE_GPR(10, r7) /* user r10 */
210 ld r3, PACATMSCRATCH(r13) /* user r1 */ 224 ld r3, GPR1(r1) /* user r1 */
211 ld r4, GPR7(r1) /* user r7 */ 225 ld r4, GPR7(r1) /* user r7 */
212 ld r5, GPR11(r1) /* user r11 */ 226 ld r5, GPR11(r1) /* user r11 */
213 ld r6, GPR12(r1) /* user r12 */ 227 ld r6, GPR12(r1) /* user r12 */
214 GET_SCRATCH0(8) /* user r13 */ 228 ld r8, GPR13(r1) /* user r13 */
215 std r3, GPR1(r7) 229 std r3, GPR1(r7)
216 std r4, GPR7(r7) 230 std r4, GPR7(r7)
217 std r5, GPR11(r7) 231 std r5, GPR11(r7)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 3c0e8fb2b773..68e14afecac8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
358 unsigned long pp, key; 358 unsigned long pp, key;
359 unsigned long v, orig_v, gr; 359 unsigned long v, orig_v, gr;
360 __be64 *hptep; 360 __be64 *hptep;
361 int index; 361 long int index;
362 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 362 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
363 363
364 if (kvm_is_radix(vcpu->kvm)) 364 if (kvm_is_radix(vcpu->kvm))
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0af1c0aea1fe..933c574e1cf7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
525 unsigned long ea, unsigned long dsisr) 525 unsigned long ea, unsigned long dsisr)
526{ 526{
527 struct kvm *kvm = vcpu->kvm; 527 struct kvm *kvm = vcpu->kvm;
528 unsigned long mmu_seq, pte_size; 528 unsigned long mmu_seq;
529 unsigned long gpa, gfn, hva, pfn; 529 unsigned long gpa, gfn, hva;
530 struct kvm_memory_slot *memslot; 530 struct kvm_memory_slot *memslot;
531 struct page *page = NULL; 531 struct page *page = NULL;
532 long ret; 532 long ret;
@@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
623 */ 623 */
624 hva = gfn_to_hva_memslot(memslot, gfn); 624 hva = gfn_to_hva_memslot(memslot, gfn);
625 if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { 625 if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
626 pfn = page_to_pfn(page);
627 upgrade_write = true; 626 upgrade_write = true;
628 } else { 627 } else {
628 unsigned long pfn;
629
629 /* Call KVM generic code to do the slow-path check */ 630 /* Call KVM generic code to do the slow-path check */
630 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, 631 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
631 writing, upgrade_p); 632 writing, upgrade_p);
@@ -639,63 +640,45 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
639 } 640 }
640 } 641 }
641 642
642 /* See if we can insert a 1GB or 2MB large PTE here */
643 level = 0;
644 if (page && PageCompound(page)) {
645 pte_size = PAGE_SIZE << compound_order(compound_head(page));
646 if (pte_size >= PUD_SIZE &&
647 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
648 (hva & (PUD_SIZE - PAGE_SIZE))) {
649 level = 2;
650 pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
651 } else if (pte_size >= PMD_SIZE &&
652 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
653 (hva & (PMD_SIZE - PAGE_SIZE))) {
654 level = 1;
655 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
656 }
657 }
658
659 /* 643 /*
660 * Compute the PTE value that we need to insert. 644 * Read the PTE from the process' radix tree and use that
645 * so we get the shift and attribute bits.
661 */ 646 */
662 if (page) { 647 local_irq_disable();
663 pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE | 648 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
664 _PAGE_ACCESSED; 649 pte = *ptep;
665 if (writing || upgrade_write) 650 local_irq_enable();
666 pgflags |= _PAGE_WRITE | _PAGE_DIRTY; 651
667 pte = pfn_pte(pfn, __pgprot(pgflags)); 652 /* Get pte level from shift/size */
653 if (shift == PUD_SHIFT &&
654 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
655 (hva & (PUD_SIZE - PAGE_SIZE))) {
656 level = 2;
657 } else if (shift == PMD_SHIFT &&
658 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
659 (hva & (PMD_SIZE - PAGE_SIZE))) {
660 level = 1;
668 } else { 661 } else {
669 /* 662 level = 0;
670 * Read the PTE from the process' radix tree and use that 663 if (shift > PAGE_SHIFT) {
671 * so we get the attribute bits. 664 /*
672 */ 665 * If the pte maps more than one page, bring over
673 local_irq_disable(); 666 * bits from the virtual address to get the real
674 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); 667 * address of the specific single page we want.
675 pte = *ptep; 668 */
676 local_irq_enable(); 669 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
677 if (shift == PUD_SHIFT && 670 pte = __pte(pte_val(pte) | (hva & rpnmask));
678 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
679 (hva & (PUD_SIZE - PAGE_SIZE))) {
680 level = 2;
681 } else if (shift == PMD_SHIFT &&
682 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
683 (hva & (PMD_SIZE - PAGE_SIZE))) {
684 level = 1;
685 } else if (shift && shift != PAGE_SHIFT) {
686 /* Adjust PFN */
687 unsigned long mask = (1ul << shift) - PAGE_SIZE;
688 pte = __pte(pte_val(pte) | (hva & mask));
689 }
690 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
691 if (writing || upgrade_write) {
692 if (pte_val(pte) & _PAGE_WRITE)
693 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
694 } else {
695 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
696 } 671 }
697 } 672 }
698 673
674 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
675 if (writing || upgrade_write) {
676 if (pte_val(pte) & _PAGE_WRITE)
677 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
678 } else {
679 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
680 }
681
699 /* Allocate space in the tree and write the PTE */ 682 /* Allocate space in the tree and write the PTE */
700 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); 683 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
701 684
@@ -725,10 +708,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
725 gpa, shift); 708 gpa, shift);
726 kvmppc_radix_tlbie_page(kvm, gpa, shift); 709 kvmppc_radix_tlbie_page(kvm, gpa, shift);
727 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { 710 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
728 unsigned long npages = 1; 711 unsigned long psize = PAGE_SIZE;
729 if (shift) 712 if (shift)
730 npages = 1ul << (shift - PAGE_SHIFT); 713 psize = 1ul << shift;
731 kvmppc_update_dirty_map(memslot, gfn, npages); 714 kvmppc_update_dirty_map(memslot, gfn, psize);
732 } 715 }
733 } 716 }
734 return 0; 717 return 0;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 506a4d400458..6821ead4b4eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); 187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188 188
189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
190static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) 190static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
191 unsigned long entry, unsigned long *hpa,
192 enum dma_data_direction *direction)
193{
194 long ret;
195
196 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
197
198 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
199 (*direction == DMA_BIDIRECTIONAL))) {
200 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
201 /*
202 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
203 * calling this so we still get here a valid UA.
204 */
205 if (pua && *pua)
206 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
207 }
208
209 return ret;
210}
211
212static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
213 unsigned long entry)
191{ 214{
192 unsigned long hpa = 0; 215 unsigned long hpa = 0;
193 enum dma_data_direction dir = DMA_NONE; 216 enum dma_data_direction dir = DMA_NONE;
194 217
195 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 218 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
196} 219}
197 220
198static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, 221static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
224 unsigned long hpa = 0; 247 unsigned long hpa = 0;
225 long ret; 248 long ret;
226 249
227 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) 250 if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
228 /* 251 /*
229 * real mode xchg can fail if struct page crosses 252 * real mode xchg can fail if struct page crosses
230 * a page boundary 253 * a page boundary
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
236 259
237 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); 260 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
238 if (ret) 261 if (ret)
239 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 262 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
240 263
241 return ret; 264 return ret;
242} 265}
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
282 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) 305 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
283 return H_CLOSED; 306 return H_CLOSED;
284 307
285 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 308 ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
286 if (ret) { 309 if (ret) {
287 mm_iommu_mapped_dec(mem); 310 mm_iommu_mapped_dec(mem);
288 /* 311 /*
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
371 return ret; 394 return ret;
372 395
373 WARN_ON_ONCE_RM(1); 396 WARN_ON_ONCE_RM(1);
374 kvmppc_rm_clear_tce(stit->tbl, entry); 397 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
375 } 398 }
376 399
377 kvmppc_tce_put(stt, entry, tce); 400 kvmppc_tce_put(stt, entry, tce);
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
520 goto unlock_exit; 543 goto unlock_exit;
521 544
522 WARN_ON_ONCE_RM(1); 545 WARN_ON_ONCE_RM(1);
523 kvmppc_rm_clear_tce(stit->tbl, entry); 546 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
524 } 547 }
525 548
526 kvmppc_tce_put(stt, entry + i, tce); 549 kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
571 return ret; 594 return ret;
572 595
573 WARN_ON_ONCE_RM(1); 596 WARN_ON_ONCE_RM(1);
574 kvmppc_rm_clear_tce(stit->tbl, entry); 597 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
575 } 598 }
576 } 599 }
577 600
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 886ed94b9c13..d05c8af4ac51 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
443 addc r0, r8, r9 443 addc r0, r8, r9
444 ld r10, 0(r4) 444 ld r10, 0(r4)
445 ld r11, 8(r4) 445 ld r11, 8(r4)
446#ifdef CONFIG_CPU_LITTLE_ENDIAN
447 rotldi r5, r5, 8
448#endif
446 adde r0, r0, r10 449 adde r0, r0, r10
447 add r5, r5, r7 450 add r5, r5, r7
448 adde r0, r0, r11 451 adde r0, r0, r11
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 850f3b8f4da5..6ae2777c220d 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
28{ 28{
29 int err; 29 int err;
30 30
31 /* Make sure we aren't patching a freed init section */
32 if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
33 pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
34 return 0;
35 }
36
31 __put_user_size(instr, patch_addr, 4, err); 37 __put_user_size(instr, patch_addr, 4, err);
32 if (err) 38 if (err)
33 return err; 39 return err;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 51ce091914f9..7a9886f98b0c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
308{ 308{
309} 309}
310 310
311/*
312 * We do not have access to the sparsemem vmemmap, so we fallback to
313 * walking the list of sparsemem blocks which we already maintain for
314 * the sake of crashdump. In the long run, we might want to maintain
315 * a tree if performance of that linear walk becomes a problem.
316 *
317 * realmode_pfn_to_page functions can fail due to:
318 * 1) As real sparsemem blocks do not lay in RAM continously (they
319 * are in virtual address space which is not available in the real mode),
320 * the requested page struct can be split between blocks so get_page/put_page
321 * may fail.
322 * 2) When huge pages are used, the get_page/put_page API will fail
323 * in real mode as the linked addresses in the page struct are virtual
324 * too.
325 */
326struct page *realmode_pfn_to_page(unsigned long pfn)
327{
328 struct vmemmap_backing *vmem_back;
329 struct page *page;
330 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
331 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
332
333 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
334 if (pg_va < vmem_back->virt_addr)
335 continue;
336
337 /* After vmemmap_list entry free is possible, need check all */
338 if ((pg_va + sizeof(struct page)) <=
339 (vmem_back->virt_addr + page_size)) {
340 page = (struct page *) (vmem_back->phys + pg_va -
341 vmem_back->virt_addr);
342 return page;
343 }
344 }
345
346 /* Probably that page struct is split between real pages */
347 return NULL;
348}
349EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
350
351#else
352
353struct page *realmode_pfn_to_page(unsigned long pfn)
354{
355 struct page *page = pfn_to_page(pfn);
356 return page;
357}
358EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
359
360#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 311#endif /* CONFIG_SPARSEMEM_VMEMMAP */
361 312
362#ifdef CONFIG_PPC_BOOK3S_64 313#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5c8530d0c611..04ccb274a620 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -63,6 +63,7 @@
63#endif 63#endif
64 64
65unsigned long long memory_limit; 65unsigned long long memory_limit;
66bool init_mem_is_free;
66 67
67#ifdef CONFIG_HIGHMEM 68#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte; 69pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
396{ 397{
397 ppc_md.progress = ppc_printk_progress; 398 ppc_md.progress = ppc_printk_progress;
398 mark_initmem_nx(); 399 mark_initmem_nx();
400 init_mem_is_free = true;
399 free_initmem_default(POISON_FREE_INITMEM); 401 free_initmem_default(POISON_FREE_INITMEM);
400} 402}
401 403
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index c9ee9e23845f..56c2234cc6ae 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -18,11 +18,15 @@
18#include <linux/migrate.h> 18#include <linux/migrate.h>
19#include <linux/hugetlb.h> 19#include <linux/hugetlb.h>
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/sizes.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
22#include <asm/pte-walk.h> 23#include <asm/pte-walk.h>
23 24
24static DEFINE_MUTEX(mem_list_mutex); 25static DEFINE_MUTEX(mem_list_mutex);
25 26
27#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
28#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
29
26struct mm_iommu_table_group_mem_t { 30struct mm_iommu_table_group_mem_t {
27 struct list_head next; 31 struct list_head next;
28 struct rcu_head rcu; 32 struct rcu_head rcu;
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
263 if (!page) 267 if (!page)
264 continue; 268 continue;
265 269
270 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
271 SetPageDirty(page);
272
266 put_page(page); 273 put_page(page);
267 mem->hpas[i] = 0; 274 mem->hpas[i] = 0;
268 } 275 }
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
360 367
361 return ret; 368 return ret;
362} 369}
363EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
364 370
365struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, 371struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
366 unsigned long ua, unsigned long entries) 372 unsigned long ua, unsigned long entries)
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
390 if (pageshift > mem->pageshift) 396 if (pageshift > mem->pageshift)
391 return -EFAULT; 397 return -EFAULT;
392 398
393 *hpa = *va | (ua & ~PAGE_MASK); 399 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
394 400
395 return 0; 401 return 0;
396} 402}
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
413 if (!pa) 419 if (!pa)
414 return -EFAULT; 420 return -EFAULT;
415 421
416 *hpa = *pa | (ua & ~PAGE_MASK); 422 *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
417 423
418 return 0; 424 return 0;
419} 425}
420EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm); 426
427extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
428{
429 struct mm_iommu_table_group_mem_t *mem;
430 long entry;
431 void *va;
432 unsigned long *pa;
433
434 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
435 if (!mem)
436 return;
437
438 entry = (ua - mem->ua) >> PAGE_SHIFT;
439 va = &mem->hpas[entry];
440
441 pa = (void *) vmalloc_to_phys(va);
442 if (!pa)
443 return;
444
445 *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
446}
421 447
422long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) 448long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
423{ 449{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 35ac5422903a..59d07bd5374a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
1204 int new_nid; 1204 int new_nid;
1205 1205
1206 /* Use associativity from first thread for all siblings */ 1206 /* Use associativity from first thread for all siblings */
1207 vphn_get_associativity(cpu, associativity); 1207 if (vphn_get_associativity(cpu, associativity))
1208 return cpu_to_node(cpu);
1209
1208 new_nid = associativity_to_nid(associativity); 1210 new_nid = associativity_to_nid(associativity);
1209 if (new_nid < 0 || !node_possible(new_nid)) 1211 if (new_nid < 0 || !node_possible(new_nid))
1210 new_nid = first_online_node; 1212 new_nid = first_online_node;
@@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
1452 1454
1453static void reset_topology_timer(void) 1455static void reset_topology_timer(void)
1454{ 1456{
1455 mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); 1457 if (vphn_enabled)
1458 mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1456} 1459}
1457 1460
1458#ifdef CONFIG_SMP 1461#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index 333b1f80c435..b271b283c785 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
45 * Since any pkey can be used for data or execute, we will just treat 45 * Since any pkey can be used for data or execute, we will just treat
46 * all keys as equal and track them as one entity. 46 * all keys as equal and track them as one entity.
47 */ 47 */
48 pkeys_total = be32_to_cpu(vals[0]); 48 pkeys_total = vals[0];
49 pkeys_devtree_defined = true; 49 pkeys_devtree_defined = true;
50} 50}
51 51
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 6c5db1acbe8d..fe9691040f54 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
276 level_shift = entries_shift + 3; 276 level_shift = entries_shift + 3;
277 level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); 277 level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
278 278
279 if ((level_shift - 3) * levels + page_shift >= 60) 279 if ((level_shift - 3) * levels + page_shift >= 55)
280 return -EINVAL; 280 return -EINVAL;
281 281
282 /* Allocate TCE table */ 282 /* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..c9fecd120d18
--- /dev/null
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_RISCV_PROTOTYPES_H
3
4#include <linux/ftrace.h>
5#include <asm-generic/asm-prototypes.h>
6
7#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index c229509288ea..439dc7072e05 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -14,6 +14,10 @@
14#ifndef _ASM_RISCV_TLB_H 14#ifndef _ASM_RISCV_TLB_H
15#define _ASM_RISCV_TLB_H 15#define _ASM_RISCV_TLB_H
16 16
17struct mmu_gather;
18
19static void tlb_flush(struct mmu_gather *tlb);
20
17#include <asm-generic/tlb.h> 21#include <asm-generic/tlb.h>
18 22
19static inline void tlb_flush(struct mmu_gather *tlb) 23static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index db20dc630e7e..b2d26d9d8489 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
85#ifdef CONFIG_BLK_DEV_INITRD 85#ifdef CONFIG_BLK_DEV_INITRD
86static void __init setup_initrd(void) 86static void __init setup_initrd(void)
87{ 87{
88 extern char __initramfs_start[];
89 extern unsigned long __initramfs_size;
90 unsigned long size; 88 unsigned long size;
91 89
92 if (__initramfs_size > 0) {
93 initrd_start = (unsigned long)(&__initramfs_start);
94 initrd_end = initrd_start + __initramfs_size;
95 }
96
97 if (initrd_start >= initrd_end) { 90 if (initrd_start >= initrd_end) {
98 printk(KERN_INFO "initrd not found or empty"); 91 printk(KERN_INFO "initrd not found or empty");
99 goto disable; 92 goto disable;
@@ -193,7 +186,7 @@ static void __init setup_bootmem(void)
193 BUG_ON(mem_size == 0); 186 BUG_ON(mem_size == 0);
194 187
195 set_max_mapnr(PFN_DOWN(mem_size)); 188 set_max_mapnr(PFN_DOWN(mem_size));
196 max_low_pfn = pfn_base + PFN_DOWN(mem_size); 189 max_low_pfn = memblock_end_of_DRAM();
197 190
198#ifdef CONFIG_BLK_DEV_INITRD 191#ifdef CONFIG_BLK_DEV_INITRD
199 setup_initrd(); 192 setup_initrd();
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 568026ccf6e8..fb03a4482ad6 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
65SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 65SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
66 uintptr_t, flags) 66 uintptr_t, flags)
67{ 67{
68#ifdef CONFIG_SMP
69 struct mm_struct *mm = current->mm;
70 bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
71#endif
72
73 /* Check the reserved flags. */ 68 /* Check the reserved flags. */
74 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) 69 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
75 return -EINVAL; 70 return -EINVAL;
76 71
77 /* 72 flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
78 * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
79 * which generates unused variable warnings all over this function.
80 */
81#ifdef CONFIG_SMP
82 flush_icache_mm(mm, local);
83#else
84 flush_icache_all();
85#endif
86 73
87 return 0; 74 return 0;
88} 75}
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 80b27294c1de..ab9a0ebecc19 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
208 walk->dst.virt.addr, walk->src.virt.addr, n); 208 walk->dst.virt.addr, walk->src.virt.addr, n);
209 if (k) 209 if (k)
210 ret = blkcipher_walk_done(desc, walk, nbytes - k); 210 ret = blkcipher_walk_done(desc, walk, nbytes - k);
211 if (n < k) { 211 if (k < n) {
212 if (__cbc_paes_set_key(ctx) != 0) 212 if (__cbc_paes_set_key(ctx) != 0)
213 return blkcipher_walk_done(desc, walk, -EIO); 213 return blkcipher_walk_done(desc, walk, -EIO);
214 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 214 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f31a15044c24..a8418e1379eb 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -16,7 +16,13 @@ typedef struct {
16 unsigned long asce; 16 unsigned long asce;
17 unsigned long asce_limit; 17 unsigned long asce_limit;
18 unsigned long vdso_base; 18 unsigned long vdso_base;
19 /* The mmu context allocates 4K page tables. */ 19 /*
20 * The following bitfields need a down_write on the mm
21 * semaphore when they are written to. As they are only
22 * written once, they can be read without a lock.
23 *
24 * The mmu context allocates 4K page tables.
25 */
20 unsigned int alloc_pgste:1; 26 unsigned int alloc_pgste:1;
21 /* The mmu context uses extended page tables. */ 27 /* The mmu context uses extended page tables. */
22 unsigned int has_pgste:1; 28 unsigned int has_pgste:1;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 91ad4a9425c0..ac5da6b0b862 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
481 break; 481 break;
482 case KVM_CAP_S390_HPAGE_1M: 482 case KVM_CAP_S390_HPAGE_1M:
483 r = 0; 483 r = 0;
484 if (hpage) 484 if (hpage && !kvm_is_ucontrol(kvm))
485 r = 1; 485 r = 1;
486 break; 486 break;
487 case KVM_CAP_S390_MEM_OP: 487 case KVM_CAP_S390_MEM_OP:
@@ -691,11 +691,13 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
691 mutex_lock(&kvm->lock); 691 mutex_lock(&kvm->lock);
692 if (kvm->created_vcpus) 692 if (kvm->created_vcpus)
693 r = -EBUSY; 693 r = -EBUSY;
694 else if (!hpage || kvm->arch.use_cmma) 694 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
695 r = -EINVAL; 695 r = -EINVAL;
696 else { 696 else {
697 r = 0; 697 r = 0;
698 down_write(&kvm->mm->mmap_sem);
698 kvm->mm->context.allow_gmap_hpage_1m = 1; 699 kvm->mm->context.allow_gmap_hpage_1m = 1;
700 up_write(&kvm->mm->mmap_sem);
699 /* 701 /*
700 * We might have to create fake 4k page 702 * We might have to create fake 4k page
701 * tables. To avoid that the hardware works on 703 * tables. To avoid that the hardware works on
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d68f10441a16..8679bd74d337 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -280,9 +280,11 @@ retry:
280 goto retry; 280 goto retry;
281 } 281 }
282 } 282 }
283 if (rc)
284 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
285 up_read(&current->mm->mmap_sem); 283 up_read(&current->mm->mmap_sem);
284 if (rc == -EFAULT)
285 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
286 if (rc < 0)
287 return rc;
286 vcpu->run->s.regs.gprs[reg1] &= ~0xff; 288 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
287 vcpu->run->s.regs.gprs[reg1] |= key; 289 vcpu->run->s.regs.gprs[reg1] |= key;
288 return 0; 290 return 0;
@@ -324,9 +326,11 @@ retry:
324 goto retry; 326 goto retry;
325 } 327 }
326 } 328 }
327 if (rc < 0)
328 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
329 up_read(&current->mm->mmap_sem); 329 up_read(&current->mm->mmap_sem);
330 if (rc == -EFAULT)
331 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
332 if (rc < 0)
333 return rc;
330 kvm_s390_set_psw_cc(vcpu, rc); 334 kvm_s390_set_psw_cc(vcpu, rc);
331 return 0; 335 return 0;
332} 336}
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
390 FAULT_FLAG_WRITE, &unlocked); 394 FAULT_FLAG_WRITE, &unlocked);
391 rc = !rc ? -EAGAIN : rc; 395 rc = !rc ? -EAGAIN : rc;
392 } 396 }
397 up_read(&current->mm->mmap_sem);
393 if (rc == -EFAULT) 398 if (rc == -EFAULT)
394 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 399 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
395 400 if (rc < 0)
396 up_read(&current->mm->mmap_sem); 401 return rc;
397 if (rc >= 0) 402 start += PAGE_SIZE;
398 start += PAGE_SIZE;
399 } 403 }
400 404
401 if (m3 & (SSKE_MC | SSKE_MR)) { 405 if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
1002 FAULT_FLAG_WRITE, &unlocked); 1006 FAULT_FLAG_WRITE, &unlocked);
1003 rc = !rc ? -EAGAIN : rc; 1007 rc = !rc ? -EAGAIN : rc;
1004 } 1008 }
1009 up_read(&current->mm->mmap_sem);
1005 if (rc == -EFAULT) 1010 if (rc == -EFAULT)
1006 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1011 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1007 1012 if (rc == -EAGAIN)
1008 up_read(&current->mm->mmap_sem); 1013 continue;
1009 if (rc >= 0) 1014 if (rc < 0)
1010 start += PAGE_SIZE; 1015 return rc;
1011 } 1016 }
1017 start += PAGE_SIZE;
1012 } 1018 }
1013 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { 1019 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1014 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { 1020 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 63844b95c22c..a2b28cd1e3fe 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
173 return set_validity_icpt(scb_s, 0x0039U); 173 return set_validity_icpt(scb_s, 0x0039U);
174 174
175 /* copy only the wrapping keys */ 175 /* copy only the wrapping keys */
176 if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56)) 176 if (read_guest_real(vcpu, crycb_addr + 72,
177 vsie_page->crycb.dea_wrapping_key_mask, 56))
177 return set_validity_icpt(scb_s, 0x0035U); 178 return set_validity_icpt(scb_s, 0x0035U);
178 179
179 scb_s->ecb3 |= ecb3_flags; 180 scb_s->ecb3 |= ecb3_flags;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bb44990c8212..911c7ded35f1 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
708 vmaddr |= gaddr & ~PMD_MASK; 708 vmaddr |= gaddr & ~PMD_MASK;
709 /* Find vma in the parent mm */ 709 /* Find vma in the parent mm */
710 vma = find_vma(gmap->mm, vmaddr); 710 vma = find_vma(gmap->mm, vmaddr);
711 if (!vma)
712 continue;
711 /* 713 /*
712 * We do not discard pages that are backed by 714 * We do not discard pages that are backed by
713 * hugetlbfs, so we don't have to refault them. 715 * hugetlbfs, so we don't have to refault them.
714 */ 716 */
715 if (vma && is_vm_hugetlb_page(vma)) 717 if (is_vm_hugetlb_page(vma))
716 continue; 718 continue;
717 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); 719 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
718 zap_page_range(vma, vmaddr, size); 720 zap_page_range(vma, vmaddr, size);
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 3641a294ed54..e4abe9b8f97a 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -9,6 +9,7 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/of_platform.h> 11#include <linux/of_platform.h>
12#include <linux/dma-mapping.h>
12#include <asm/leon.h> 13#include <asm/leon.h>
13#include <asm/leon_amba.h> 14#include <asm/leon_amba.h>
14 15
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
381 else 382 else
382 dev_set_name(&op->dev, "%08x", dp->phandle); 383 dev_set_name(&op->dev, "%08x", dp->phandle);
383 384
385 op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
386 op->dev.dma_mask = &op->dev.coherent_dma_mask;
387
384 if (of_device_register(op)) { 388 if (of_device_register(op)) {
385 printk("%s: Could not register of device.\n", 389 printk("%s: Could not register of device.\n",
386 dp->full_name); 390 dp->full_name);
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 44e4d4435bed..6df6086968c6 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -2,6 +2,7 @@
2#include <linux/string.h> 2#include <linux/string.h>
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/of.h> 4#include <linux/of.h>
5#include <linux/dma-mapping.h>
5#include <linux/init.h> 6#include <linux/init.h>
6#include <linux/export.h> 7#include <linux/export.h>
7#include <linux/mod_devicetable.h> 8#include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
675 dev_set_name(&op->dev, "root"); 676 dev_set_name(&op->dev, "root");
676 else 677 else
677 dev_set_name(&op->dev, "%08x", dp->phandle); 678 dev_set_name(&op->dev, "%08x", dp->phandle);
679 op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
680 op->dev.dma_mask = &op->dev.coherent_dma_mask;
678 681
679 if (of_device_register(op)) { 682 if (of_device_register(op)) {
680 printk("%s: Could not register of device.\n", 683 printk("%s: Could not register of device.\n",
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c5ff296bc5d1..1a0be022f91d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2843,7 +2843,7 @@ config X86_SYSFB
2843 This option, if enabled, marks VGA/VBE/EFI framebuffers as generic 2843 This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
2844 framebuffers so the new generic system-framebuffer drivers can be 2844 framebuffers so the new generic system-framebuffer drivers can be
2845 used on x86. If the framebuffer is not compatible with the generic 2845 used on x86. If the framebuffer is not compatible with the generic
2846 modes, it is adverticed as fallback platform framebuffer so legacy 2846 modes, it is advertised as fallback platform framebuffer so legacy
2847 drivers like efifb, vesafb and uvesafb can pick it up. 2847 drivers like efifb, vesafb and uvesafb can pick it up.
2848 If this option is not selected, all system framebuffers are always 2848 If this option is not selected, all system framebuffers are always
2849 marked as fallback platform framebuffers as usual. 2849 marked as fallback platform framebuffers as usual.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 94859241bc3e..8f6e7eb8ae9f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
175 endif 175 endif
176endif 176endif
177 177
178ifndef CC_HAVE_ASM_GOTO
179 $(error Compiler lacks asm-goto support.)
180endif
181
182#
183# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
184# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way
185# to test for this bug at compile-time because the test case needs to execute,
186# which is a no-go for cross compilers. So check the GCC version instead.
187#
188ifdef CONFIG_JUMP_LABEL
189 ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
190 ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
191 endif
192endif
193
194ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) 178ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
195 # This compiler flag is not supported by Clang: 179 # This compiler flag is not supported by Clang:
196 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) 180 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
@@ -312,6 +296,13 @@ PHONY += vdso_install
312vdso_install: 296vdso_install:
313 $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@ 297 $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
314 298
299archprepare: checkbin
300checkbin:
301ifndef CC_HAVE_ASM_GOTO
302 @echo Compiler lacks asm-goto support.
303 @exit 1
304endif
305
315archclean: 306archclean:
316 $(Q)rm -rf $(objtree)/arch/i386 307 $(Q)rm -rf $(objtree)/arch/i386
317 $(Q)rm -rf $(objtree)/arch/x86_64 308 $(Q)rm -rf $(objtree)/arch/x86_64
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index eaa843a52907..a480356e0ed8 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
25 push %ebx 25 push %ebx
26 push %ecx 26 push %ecx
27 push %edx 27 push %edx
28 push %edi
29
30 /*
31 * RIP-relative addressing is needed to access the encryption bit
32 * variable. Since we are running in 32-bit mode we need this call/pop
33 * sequence to get the proper relative addressing.
34 */
35 call 1f
361: popl %edi
37 subl $1b, %edi
38
39 movl enc_bit(%edi), %eax
40 cmpl $0, %eax
41 jge .Lsev_exit
42 28
43 /* Check if running under a hypervisor */ 29 /* Check if running under a hypervisor */
44 movl $1, %eax 30 movl $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
69 55
70 movl %ebx, %eax 56 movl %ebx, %eax
71 andl $0x3f, %eax /* Return the encryption bit location */ 57 andl $0x3f, %eax /* Return the encryption bit location */
72 movl %eax, enc_bit(%edi)
73 jmp .Lsev_exit 58 jmp .Lsev_exit
74 59
75.Lno_sev: 60.Lno_sev:
76 xor %eax, %eax 61 xor %eax, %eax
77 movl %eax, enc_bit(%edi)
78 62
79.Lsev_exit: 63.Lsev_exit:
80 pop %edi
81 pop %edx 64 pop %edx
82 pop %ecx 65 pop %ecx
83 pop %ebx 66 pop %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
113ENDPROC(set_sev_encryption_mask) 96ENDPROC(set_sev_encryption_mask)
114 97
115 .data 98 .data
116enc_bit:
117 .int 0xffffffff
118 99
119#ifdef CONFIG_AMD_MEM_ENCRYPT 100#ifdef CONFIG_AMD_MEM_ENCRYPT
120 .balign 8 101 .balign 8
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index acd11b3bf639..2a356b948720 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
379{ 379{
380 if (!boot_cpu_has(X86_FEATURE_XMM2) || 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) || 381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 382 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
384 return -ENODEV; 383 return -ENODEV;
385 384
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 2071c3d1ae07..dbe8bb980da1 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
379{ 379{
380 if (!boot_cpu_has(X86_FEATURE_XMM2) || 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) || 381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 382 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
384 return -ENODEV; 383 return -ENODEV;
385 384
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index b5f2a8fd5a71..8bebda2de92f 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
379{ 379{
380 if (!boot_cpu_has(X86_FEATURE_XMM2) || 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) || 381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 382 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
384 return -ENODEV; 383 return -ENODEV;
385 384
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 9bd139569b41..cb2deb61c5d9 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -223,34 +223,34 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
223 pcmpeqd TWOONE(%rip), \TMP2 223 pcmpeqd TWOONE(%rip), \TMP2
224 pand POLY(%rip), \TMP2 224 pand POLY(%rip), \TMP2
225 pxor \TMP2, \TMP3 225 pxor \TMP2, \TMP3
226 movdqa \TMP3, HashKey(%arg2) 226 movdqu \TMP3, HashKey(%arg2)
227 227
228 movdqa \TMP3, \TMP5 228 movdqa \TMP3, \TMP5
229 pshufd $78, \TMP3, \TMP1 229 pshufd $78, \TMP3, \TMP1
230 pxor \TMP3, \TMP1 230 pxor \TMP3, \TMP1
231 movdqa \TMP1, HashKey_k(%arg2) 231 movdqu \TMP1, HashKey_k(%arg2)
232 232
233 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 233 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
234# TMP5 = HashKey^2<<1 (mod poly) 234# TMP5 = HashKey^2<<1 (mod poly)
235 movdqa \TMP5, HashKey_2(%arg2) 235 movdqu \TMP5, HashKey_2(%arg2)
236# HashKey_2 = HashKey^2<<1 (mod poly) 236# HashKey_2 = HashKey^2<<1 (mod poly)
237 pshufd $78, \TMP5, \TMP1 237 pshufd $78, \TMP5, \TMP1
238 pxor \TMP5, \TMP1 238 pxor \TMP5, \TMP1
239 movdqa \TMP1, HashKey_2_k(%arg2) 239 movdqu \TMP1, HashKey_2_k(%arg2)
240 240
241 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 241 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
242# TMP5 = HashKey^3<<1 (mod poly) 242# TMP5 = HashKey^3<<1 (mod poly)
243 movdqa \TMP5, HashKey_3(%arg2) 243 movdqu \TMP5, HashKey_3(%arg2)
244 pshufd $78, \TMP5, \TMP1 244 pshufd $78, \TMP5, \TMP1
245 pxor \TMP5, \TMP1 245 pxor \TMP5, \TMP1
246 movdqa \TMP1, HashKey_3_k(%arg2) 246 movdqu \TMP1, HashKey_3_k(%arg2)
247 247
248 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7 248 GHASH_MUL \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
249# TMP5 = HashKey^3<<1 (mod poly) 249# TMP5 = HashKey^3<<1 (mod poly)
250 movdqa \TMP5, HashKey_4(%arg2) 250 movdqu \TMP5, HashKey_4(%arg2)
251 pshufd $78, \TMP5, \TMP1 251 pshufd $78, \TMP5, \TMP1
252 pxor \TMP5, \TMP1 252 pxor \TMP5, \TMP1
253 movdqa \TMP1, HashKey_4_k(%arg2) 253 movdqu \TMP1, HashKey_4_k(%arg2)
254.endm 254.endm
255 255
256# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding. 256# GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@@ -271,7 +271,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff
271 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv 271 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
272 272
273 PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, 273 PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
274 movdqa HashKey(%arg2), %xmm13 274 movdqu HashKey(%arg2), %xmm13
275 275
276 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ 276 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
277 %xmm4, %xmm5, %xmm6 277 %xmm4, %xmm5, %xmm6
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
997 pshufd $78, \XMM5, \TMP6 997 pshufd $78, \XMM5, \TMP6
998 pxor \XMM5, \TMP6 998 pxor \XMM5, \TMP6
999 paddd ONE(%rip), \XMM0 # INCR CNT 999 paddd ONE(%rip), \XMM0 # INCR CNT
1000 movdqa HashKey_4(%arg2), \TMP5 1000 movdqu HashKey_4(%arg2), \TMP5
1001 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 1001 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1002 movdqa \XMM0, \XMM1 1002 movdqa \XMM0, \XMM1
1003 paddd ONE(%rip), \XMM0 # INCR CNT 1003 paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1016 pxor (%arg1), \XMM2 1016 pxor (%arg1), \XMM2
1017 pxor (%arg1), \XMM3 1017 pxor (%arg1), \XMM3
1018 pxor (%arg1), \XMM4 1018 pxor (%arg1), \XMM4
1019 movdqa HashKey_4_k(%arg2), \TMP5 1019 movdqu HashKey_4_k(%arg2), \TMP5
1020 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 1020 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
1021 movaps 0x10(%arg1), \TMP1 1021 movaps 0x10(%arg1), \TMP1
1022 AESENC \TMP1, \XMM1 # Round 1 1022 AESENC \TMP1, \XMM1 # Round 1
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1031 movdqa \XMM6, \TMP1 1031 movdqa \XMM6, \TMP1
1032 pshufd $78, \XMM6, \TMP2 1032 pshufd $78, \XMM6, \TMP2
1033 pxor \XMM6, \TMP2 1033 pxor \XMM6, \TMP2
1034 movdqa HashKey_3(%arg2), \TMP5 1034 movdqu HashKey_3(%arg2), \TMP5
1035 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 1035 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1036 movaps 0x30(%arg1), \TMP3 1036 movaps 0x30(%arg1), \TMP3
1037 AESENC \TMP3, \XMM1 # Round 3 1037 AESENC \TMP3, \XMM1 # Round 3
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1044 AESENC \TMP3, \XMM2 1044 AESENC \TMP3, \XMM2
1045 AESENC \TMP3, \XMM3 1045 AESENC \TMP3, \XMM3
1046 AESENC \TMP3, \XMM4 1046 AESENC \TMP3, \XMM4
1047 movdqa HashKey_3_k(%arg2), \TMP5 1047 movdqu HashKey_3_k(%arg2), \TMP5
1048 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1048 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1049 movaps 0x50(%arg1), \TMP3 1049 movaps 0x50(%arg1), \TMP3
1050 AESENC \TMP3, \XMM1 # Round 5 1050 AESENC \TMP3, \XMM1 # Round 5
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1058 movdqa \XMM7, \TMP1 1058 movdqa \XMM7, \TMP1
1059 pshufd $78, \XMM7, \TMP2 1059 pshufd $78, \XMM7, \TMP2
1060 pxor \XMM7, \TMP2 1060 pxor \XMM7, \TMP2
1061 movdqa HashKey_2(%arg2), \TMP5 1061 movdqu HashKey_2(%arg2), \TMP5
1062 1062
1063 # Multiply TMP5 * HashKey using karatsuba 1063 # Multiply TMP5 * HashKey using karatsuba
1064 1064
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1074 AESENC \TMP3, \XMM2 1074 AESENC \TMP3, \XMM2
1075 AESENC \TMP3, \XMM3 1075 AESENC \TMP3, \XMM3
1076 AESENC \TMP3, \XMM4 1076 AESENC \TMP3, \XMM4
1077 movdqa HashKey_2_k(%arg2), \TMP5 1077 movdqu HashKey_2_k(%arg2), \TMP5
1078 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1078 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1079 movaps 0x80(%arg1), \TMP3 1079 movaps 0x80(%arg1), \TMP3
1080 AESENC \TMP3, \XMM1 # Round 8 1080 AESENC \TMP3, \XMM1 # Round 8
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1092 movdqa \XMM8, \TMP1 1092 movdqa \XMM8, \TMP1
1093 pshufd $78, \XMM8, \TMP2 1093 pshufd $78, \XMM8, \TMP2
1094 pxor \XMM8, \TMP2 1094 pxor \XMM8, \TMP2
1095 movdqa HashKey(%arg2), \TMP5 1095 movdqu HashKey(%arg2), \TMP5
1096 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1096 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1097 movaps 0x90(%arg1), \TMP3 1097 movaps 0x90(%arg1), \TMP3
1098 AESENC \TMP3, \XMM1 # Round 9 1098 AESENC \TMP3, \XMM1 # Round 9
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
1121 AESENCLAST \TMP3, \XMM2 1121 AESENCLAST \TMP3, \XMM2
1122 AESENCLAST \TMP3, \XMM3 1122 AESENCLAST \TMP3, \XMM3
1123 AESENCLAST \TMP3, \XMM4 1123 AESENCLAST \TMP3, \XMM4
1124 movdqa HashKey_k(%arg2), \TMP5 1124 movdqu HashKey_k(%arg2), \TMP5
1125 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1125 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1126 movdqu (%arg4,%r11,1), \TMP3 1126 movdqu (%arg4,%r11,1), \TMP3
1127 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK 1127 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1205 pshufd $78, \XMM5, \TMP6 1205 pshufd $78, \XMM5, \TMP6
1206 pxor \XMM5, \TMP6 1206 pxor \XMM5, \TMP6
1207 paddd ONE(%rip), \XMM0 # INCR CNT 1207 paddd ONE(%rip), \XMM0 # INCR CNT
1208 movdqa HashKey_4(%arg2), \TMP5 1208 movdqu HashKey_4(%arg2), \TMP5
1209 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1 1209 PCLMULQDQ 0x11, \TMP5, \TMP4 # TMP4 = a1*b1
1210 movdqa \XMM0, \XMM1 1210 movdqa \XMM0, \XMM1
1211 paddd ONE(%rip), \XMM0 # INCR CNT 1211 paddd ONE(%rip), \XMM0 # INCR CNT
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1224 pxor (%arg1), \XMM2 1224 pxor (%arg1), \XMM2
1225 pxor (%arg1), \XMM3 1225 pxor (%arg1), \XMM3
1226 pxor (%arg1), \XMM4 1226 pxor (%arg1), \XMM4
1227 movdqa HashKey_4_k(%arg2), \TMP5 1227 movdqu HashKey_4_k(%arg2), \TMP5
1228 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0) 1228 PCLMULQDQ 0x00, \TMP5, \TMP6 # TMP6 = (a1+a0)*(b1+b0)
1229 movaps 0x10(%arg1), \TMP1 1229 movaps 0x10(%arg1), \TMP1
1230 AESENC \TMP1, \XMM1 # Round 1 1230 AESENC \TMP1, \XMM1 # Round 1
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1239 movdqa \XMM6, \TMP1 1239 movdqa \XMM6, \TMP1
1240 pshufd $78, \XMM6, \TMP2 1240 pshufd $78, \XMM6, \TMP2
1241 pxor \XMM6, \TMP2 1241 pxor \XMM6, \TMP2
1242 movdqa HashKey_3(%arg2), \TMP5 1242 movdqu HashKey_3(%arg2), \TMP5
1243 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1 1243 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1 * b1
1244 movaps 0x30(%arg1), \TMP3 1244 movaps 0x30(%arg1), \TMP3
1245 AESENC \TMP3, \XMM1 # Round 3 1245 AESENC \TMP3, \XMM1 # Round 3
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1252 AESENC \TMP3, \XMM2 1252 AESENC \TMP3, \XMM2
1253 AESENC \TMP3, \XMM3 1253 AESENC \TMP3, \XMM3
1254 AESENC \TMP3, \XMM4 1254 AESENC \TMP3, \XMM4
1255 movdqa HashKey_3_k(%arg2), \TMP5 1255 movdqu HashKey_3_k(%arg2), \TMP5
1256 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1256 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1257 movaps 0x50(%arg1), \TMP3 1257 movaps 0x50(%arg1), \TMP3
1258 AESENC \TMP3, \XMM1 # Round 5 1258 AESENC \TMP3, \XMM1 # Round 5
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1266 movdqa \XMM7, \TMP1 1266 movdqa \XMM7, \TMP1
1267 pshufd $78, \XMM7, \TMP2 1267 pshufd $78, \XMM7, \TMP2
1268 pxor \XMM7, \TMP2 1268 pxor \XMM7, \TMP2
1269 movdqa HashKey_2(%arg2), \TMP5 1269 movdqu HashKey_2(%arg2), \TMP5
1270 1270
1271 # Multiply TMP5 * HashKey using karatsuba 1271 # Multiply TMP5 * HashKey using karatsuba
1272 1272
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1282 AESENC \TMP3, \XMM2 1282 AESENC \TMP3, \XMM2
1283 AESENC \TMP3, \XMM3 1283 AESENC \TMP3, \XMM3
1284 AESENC \TMP3, \XMM4 1284 AESENC \TMP3, \XMM4
1285 movdqa HashKey_2_k(%arg2), \TMP5 1285 movdqu HashKey_2_k(%arg2), \TMP5
1286 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1286 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1287 movaps 0x80(%arg1), \TMP3 1287 movaps 0x80(%arg1), \TMP3
1288 AESENC \TMP3, \XMM1 # Round 8 1288 AESENC \TMP3, \XMM1 # Round 8
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
1300 movdqa \XMM8, \TMP1 1300 movdqa \XMM8, \TMP1
1301 pshufd $78, \XMM8, \TMP2 1301 pshufd $78, \XMM8, \TMP2
1302 pxor \XMM8, \TMP2 1302 pxor \XMM8, \TMP2
1303 movdqa HashKey(%arg2), \TMP5 1303 movdqu HashKey(%arg2), \TMP5
1304 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1304 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1305 movaps 0x90(%arg1), \TMP3 1305 movaps 0x90(%arg1), \TMP3
1306 AESENC \TMP3, \XMM1 # Round 9 1306 AESENC \TMP3, \XMM1 # Round 9
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
1329 AESENCLAST \TMP3, \XMM2 1329 AESENCLAST \TMP3, \XMM2
1330 AESENCLAST \TMP3, \XMM3 1330 AESENCLAST \TMP3, \XMM3
1331 AESENCLAST \TMP3, \XMM4 1331 AESENCLAST \TMP3, \XMM4
1332 movdqa HashKey_k(%arg2), \TMP5 1332 movdqu HashKey_k(%arg2), \TMP5
1333 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1333 PCLMULQDQ 0x00, \TMP5, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1334 movdqu (%arg4,%r11,1), \TMP3 1334 movdqu (%arg4,%r11,1), \TMP3
1335 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK 1335 pxor \TMP3, \XMM1 # Ciphertext/Plaintext XOR EK
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1405 movdqa \XMM1, \TMP6 1405 movdqa \XMM1, \TMP6
1406 pshufd $78, \XMM1, \TMP2 1406 pshufd $78, \XMM1, \TMP2
1407 pxor \XMM1, \TMP2 1407 pxor \XMM1, \TMP2
1408 movdqa HashKey_4(%arg2), \TMP5 1408 movdqu HashKey_4(%arg2), \TMP5
1409 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1 1409 PCLMULQDQ 0x11, \TMP5, \TMP6 # TMP6 = a1*b1
1410 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0 1410 PCLMULQDQ 0x00, \TMP5, \XMM1 # XMM1 = a0*b0
1411 movdqa HashKey_4_k(%arg2), \TMP4 1411 movdqu HashKey_4_k(%arg2), \TMP4
1412 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1412 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1413 movdqa \XMM1, \XMMDst 1413 movdqa \XMM1, \XMMDst
1414 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1 1414 movdqa \TMP2, \XMM1 # result in TMP6, XMMDst, XMM1
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1418 movdqa \XMM2, \TMP1 1418 movdqa \XMM2, \TMP1
1419 pshufd $78, \XMM2, \TMP2 1419 pshufd $78, \XMM2, \TMP2
1420 pxor \XMM2, \TMP2 1420 pxor \XMM2, \TMP2
1421 movdqa HashKey_3(%arg2), \TMP5 1421 movdqu HashKey_3(%arg2), \TMP5
1422 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1422 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1423 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0 1423 PCLMULQDQ 0x00, \TMP5, \XMM2 # XMM2 = a0*b0
1424 movdqa HashKey_3_k(%arg2), \TMP4 1424 movdqu HashKey_3_k(%arg2), \TMP4
1425 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1425 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1426 pxor \TMP1, \TMP6 1426 pxor \TMP1, \TMP6
1427 pxor \XMM2, \XMMDst 1427 pxor \XMM2, \XMMDst
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1433 movdqa \XMM3, \TMP1 1433 movdqa \XMM3, \TMP1
1434 pshufd $78, \XMM3, \TMP2 1434 pshufd $78, \XMM3, \TMP2
1435 pxor \XMM3, \TMP2 1435 pxor \XMM3, \TMP2
1436 movdqa HashKey_2(%arg2), \TMP5 1436 movdqu HashKey_2(%arg2), \TMP5
1437 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1437 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1438 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0 1438 PCLMULQDQ 0x00, \TMP5, \XMM3 # XMM3 = a0*b0
1439 movdqa HashKey_2_k(%arg2), \TMP4 1439 movdqu HashKey_2_k(%arg2), \TMP4
1440 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1440 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1441 pxor \TMP1, \TMP6 1441 pxor \TMP1, \TMP6
1442 pxor \XMM3, \XMMDst 1442 pxor \XMM3, \XMMDst
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
1446 movdqa \XMM4, \TMP1 1446 movdqa \XMM4, \TMP1
1447 pshufd $78, \XMM4, \TMP2 1447 pshufd $78, \XMM4, \TMP2
1448 pxor \XMM4, \TMP2 1448 pxor \XMM4, \TMP2
1449 movdqa HashKey(%arg2), \TMP5 1449 movdqu HashKey(%arg2), \TMP5
1450 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1 1450 PCLMULQDQ 0x11, \TMP5, \TMP1 # TMP1 = a1*b1
1451 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0 1451 PCLMULQDQ 0x00, \TMP5, \XMM4 # XMM4 = a0*b0
1452 movdqa HashKey_k(%arg2), \TMP4 1452 movdqu HashKey_k(%arg2), \TMP4
1453 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0) 1453 PCLMULQDQ 0x00, \TMP4, \TMP2 # TMP2 = (a1+a0)*(b1+b0)
1454 pxor \TMP1, \TMP6 1454 pxor \TMP1, \TMP6
1455 pxor \XMM4, \XMMDst 1455 pxor \XMM4, \XMMDst
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 95cf857d2cbb..f40244eaf14d 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
40static int __init crypto_morus1280_sse2_module_init(void) 40static int __init crypto_morus1280_sse2_module_init(void)
41{ 41{
42 if (!boot_cpu_has(X86_FEATURE_XMM2) || 42 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
43 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 43 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
45 return -ENODEV; 44 return -ENODEV;
46 45
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 615fb7bc9a32..9afaf8f8565a 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
40static int __init crypto_morus640_sse2_module_init(void) 40static int __init crypto_morus640_sse2_module_init(void)
41{ 41{
42 if (!boot_cpu_has(X86_FEATURE_XMM2) || 42 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
43 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) 43 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
45 return -ENODEV; 44 return -ENODEV;
46 45
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 5f4829f10129..dfb2f7c0d019 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2465 2465
2466 perf_callchain_store(entry, regs->ip); 2466 perf_callchain_store(entry, regs->ip);
2467 2467
2468 if (!current->mm) 2468 if (!nmi_uaccess_okay())
2469 return; 2469 return;
2470 2470
2471 if (perf_callchain_user32(regs, entry)) 2471 if (perf_callchain_user32(regs, entry))
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index f3e006bed9a7..c88ed39582a1 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
1272 1272
1273 x86_pmu.lbr_sel_mask = LBR_SEL_MASK; 1273 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1274 x86_pmu.lbr_sel_map = snb_lbr_sel_map; 1274 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1275
1276 /* Knights Landing does have MISPREDICT bit */
1277 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1278 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
1275} 1279}
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5b0f613428c2..2c43e3055948 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
95 */ 95 */
96static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) 96static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
97{ 97{
98 struct ipi_arg_ex **arg; 98 struct hv_send_ipi_ex **arg;
99 struct ipi_arg_ex *ipi_arg; 99 struct hv_send_ipi_ex *ipi_arg;
100 unsigned long flags; 100 unsigned long flags;
101 int nr_bank = 0; 101 int nr_bank = 0;
102 int ret = 1; 102 int ret = 1;
@@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
105 return false; 105 return false;
106 106
107 local_irq_save(flags); 107 local_irq_save(flags);
108 arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg); 108 arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
109 109
110 ipi_arg = *arg; 110 ipi_arg = *arg;
111 if (unlikely(!ipi_arg)) 111 if (unlikely(!ipi_arg))
@@ -135,7 +135,7 @@ ipi_mask_ex_done:
135static bool __send_ipi_mask(const struct cpumask *mask, int vector) 135static bool __send_ipi_mask(const struct cpumask *mask, int vector)
136{ 136{
137 int cur_cpu, vcpu; 137 int cur_cpu, vcpu;
138 struct ipi_arg_non_ex ipi_arg; 138 struct hv_send_ipi ipi_arg;
139 int ret = 1; 139 int ret = 1;
140 140
141 trace_hyperv_send_ipi_mask(mask, vector); 141 trace_hyperv_send_ipi_mask(mask, vector);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b143717b92b3..ce84388e540c 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
80 * true if the result is zero, or false for all 80 * true if the result is zero, or false for all
81 * other cases. 81 * other cases.
82 */ 82 */
83#define arch_atomic_sub_and_test arch_atomic_sub_and_test
84static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) 83static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
85{ 84{
86 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); 85 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
87} 86}
87#define arch_atomic_sub_and_test arch_atomic_sub_and_test
88 88
89/** 89/**
90 * arch_atomic_inc - increment atomic variable 90 * arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
92 * 92 *
93 * Atomically increments @v by 1. 93 * Atomically increments @v by 1.
94 */ 94 */
95#define arch_atomic_inc arch_atomic_inc
96static __always_inline void arch_atomic_inc(atomic_t *v) 95static __always_inline void arch_atomic_inc(atomic_t *v)
97{ 96{
98 asm volatile(LOCK_PREFIX "incl %0" 97 asm volatile(LOCK_PREFIX "incl %0"
99 : "+m" (v->counter)); 98 : "+m" (v->counter));
100} 99}
100#define arch_atomic_inc arch_atomic_inc
101 101
102/** 102/**
103 * arch_atomic_dec - decrement atomic variable 103 * arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
105 * 105 *
106 * Atomically decrements @v by 1. 106 * Atomically decrements @v by 1.
107 */ 107 */
108#define arch_atomic_dec arch_atomic_dec
109static __always_inline void arch_atomic_dec(atomic_t *v) 108static __always_inline void arch_atomic_dec(atomic_t *v)
110{ 109{
111 asm volatile(LOCK_PREFIX "decl %0" 110 asm volatile(LOCK_PREFIX "decl %0"
112 : "+m" (v->counter)); 111 : "+m" (v->counter));
113} 112}
113#define arch_atomic_dec arch_atomic_dec
114 114
115/** 115/**
116 * arch_atomic_dec_and_test - decrement and test 116 * arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
120 * returns true if the result is 0, or false for all other 120 * returns true if the result is 0, or false for all other
121 * cases. 121 * cases.
122 */ 122 */
123#define arch_atomic_dec_and_test arch_atomic_dec_and_test
124static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) 123static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
125{ 124{
126 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); 125 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
127} 126}
127#define arch_atomic_dec_and_test arch_atomic_dec_and_test
128 128
129/** 129/**
130 * arch_atomic_inc_and_test - increment and test 130 * arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
134 * and returns true if the result is zero, or false for all 134 * and returns true if the result is zero, or false for all
135 * other cases. 135 * other cases.
136 */ 136 */
137#define arch_atomic_inc_and_test arch_atomic_inc_and_test
138static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) 137static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
139{ 138{
140 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); 139 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
141} 140}
141#define arch_atomic_inc_and_test arch_atomic_inc_and_test
142 142
143/** 143/**
144 * arch_atomic_add_negative - add and test if negative 144 * arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
149 * if the result is negative, or false when 149 * if the result is negative, or false when
150 * result is greater than or equal to zero. 150 * result is greater than or equal to zero.
151 */ 151 */
152#define arch_atomic_add_negative arch_atomic_add_negative
153static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) 152static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
154{ 153{
155 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); 154 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
156} 155}
156#define arch_atomic_add_negative arch_atomic_add_negative
157 157
158/** 158/**
159 * arch_atomic_add_return - add integer and return 159 * arch_atomic_add_return - add integer and return
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index ef959f02d070..6a5b0ec460da 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
205 * 205 *
206 * Atomically increments @v by 1. 206 * Atomically increments @v by 1.
207 */ 207 */
208#define arch_atomic64_inc arch_atomic64_inc
209static inline void arch_atomic64_inc(atomic64_t *v) 208static inline void arch_atomic64_inc(atomic64_t *v)
210{ 209{
211 __alternative_atomic64(inc, inc_return, /* no output */, 210 __alternative_atomic64(inc, inc_return, /* no output */,
212 "S" (v) : "memory", "eax", "ecx", "edx"); 211 "S" (v) : "memory", "eax", "ecx", "edx");
213} 212}
213#define arch_atomic64_inc arch_atomic64_inc
214 214
215/** 215/**
216 * arch_atomic64_dec - decrement atomic64 variable 216 * arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
218 * 218 *
219 * Atomically decrements @v by 1. 219 * Atomically decrements @v by 1.
220 */ 220 */
221#define arch_atomic64_dec arch_atomic64_dec
222static inline void arch_atomic64_dec(atomic64_t *v) 221static inline void arch_atomic64_dec(atomic64_t *v)
223{ 222{
224 __alternative_atomic64(dec, dec_return, /* no output */, 223 __alternative_atomic64(dec, dec_return, /* no output */,
225 "S" (v) : "memory", "eax", "ecx", "edx"); 224 "S" (v) : "memory", "eax", "ecx", "edx");
226} 225}
226#define arch_atomic64_dec arch_atomic64_dec
227 227
228/** 228/**
229 * arch_atomic64_add_unless - add unless the number is a given value 229 * arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
245 return (int)a; 245 return (int)a;
246} 246}
247 247
248#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
249static inline int arch_atomic64_inc_not_zero(atomic64_t *v) 248static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
250{ 249{
251 int r; 250 int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
253 "S" (v) : "ecx", "edx", "memory"); 252 "S" (v) : "ecx", "edx", "memory");
254 return r; 253 return r;
255} 254}
255#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
256 256
257#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
258static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) 257static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
259{ 258{
260 long long r; 259 long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
262 "S" (v) : "ecx", "memory"); 261 "S" (v) : "ecx", "memory");
263 return r; 262 return r;
264} 263}
264#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
265 265
266#undef alternative_atomic64 266#undef alternative_atomic64
267#undef __alternative_atomic64 267#undef __alternative_atomic64
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 4343d9b4f30e..5f851d92eecd 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
71 * true if the result is zero, or false for all 71 * true if the result is zero, or false for all
72 * other cases. 72 * other cases.
73 */ 73 */
74#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
75static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) 74static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
76{ 75{
77 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); 76 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
78} 77}
78#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
79 79
80/** 80/**
81 * arch_atomic64_inc - increment atomic64 variable 81 * arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
83 * 83 *
84 * Atomically increments @v by 1. 84 * Atomically increments @v by 1.
85 */ 85 */
86#define arch_atomic64_inc arch_atomic64_inc
87static __always_inline void arch_atomic64_inc(atomic64_t *v) 86static __always_inline void arch_atomic64_inc(atomic64_t *v)
88{ 87{
89 asm volatile(LOCK_PREFIX "incq %0" 88 asm volatile(LOCK_PREFIX "incq %0"
90 : "=m" (v->counter) 89 : "=m" (v->counter)
91 : "m" (v->counter)); 90 : "m" (v->counter));
92} 91}
92#define arch_atomic64_inc arch_atomic64_inc
93 93
94/** 94/**
95 * arch_atomic64_dec - decrement atomic64 variable 95 * arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
97 * 97 *
98 * Atomically decrements @v by 1. 98 * Atomically decrements @v by 1.
99 */ 99 */
100#define arch_atomic64_dec arch_atomic64_dec
101static __always_inline void arch_atomic64_dec(atomic64_t *v) 100static __always_inline void arch_atomic64_dec(atomic64_t *v)
102{ 101{
103 asm volatile(LOCK_PREFIX "decq %0" 102 asm volatile(LOCK_PREFIX "decq %0"
104 : "=m" (v->counter) 103 : "=m" (v->counter)
105 : "m" (v->counter)); 104 : "m" (v->counter));
106} 105}
106#define arch_atomic64_dec arch_atomic64_dec
107 107
108/** 108/**
109 * arch_atomic64_dec_and_test - decrement and test 109 * arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
113 * returns true if the result is 0, or false for all other 113 * returns true if the result is 0, or false for all other
114 * cases. 114 * cases.
115 */ 115 */
116#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
117static inline bool arch_atomic64_dec_and_test(atomic64_t *v) 116static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
118{ 117{
119 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); 118 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
120} 119}
120#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
121 121
122/** 122/**
123 * arch_atomic64_inc_and_test - increment and test 123 * arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
127 * and returns true if the result is zero, or false for all 127 * and returns true if the result is zero, or false for all
128 * other cases. 128 * other cases.
129 */ 129 */
130#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
131static inline bool arch_atomic64_inc_and_test(atomic64_t *v) 130static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
132{ 131{
133 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); 132 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
134} 133}
134#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
135 135
136/** 136/**
137 * arch_atomic64_add_negative - add and test if negative 137 * arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
142 * if the result is negative, or false when 142 * if the result is negative, or false when
143 * result is greater than or equal to zero. 143 * result is greater than or equal to zero.
144 */ 144 */
145#define arch_atomic64_add_negative arch_atomic64_add_negative
146static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) 145static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
147{ 146{
148 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); 147 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
149} 148}
149#define arch_atomic64_add_negative arch_atomic64_add_negative
150 150
151/** 151/**
152 * arch_atomic64_add_return - add and return 152 * arch_atomic64_add_return - add and return
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index e203169931c7..6390bd8c141b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
14#ifndef _ASM_X86_FIXMAP_H 14#ifndef _ASM_X86_FIXMAP_H
15#define _ASM_X86_FIXMAP_H 15#define _ASM_X86_FIXMAP_H
16 16
17/*
18 * Exposed to assembly code for setting up initial page tables. Cannot be
19 * calculated in assembly code (fixmap entries are an enum), but is sanity
20 * checked in the actual fixmap C code to make sure that the fixmap is
21 * covered fully.
22 */
23#define FIXMAP_PMD_NUM 2
24/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
25#define FIXMAP_PMD_TOP 507
26
17#ifndef __ASSEMBLY__ 27#ifndef __ASSEMBLY__
18#include <linux/kernel.h> 28#include <linux/kernel.h>
19#include <asm/acpi.h> 29#include <asm/acpi.h>
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index e977b6b3a538..00e01d215f74 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -726,19 +726,21 @@ struct hv_enlightened_vmcs {
726#define HV_STIMER_AUTOENABLE (1ULL << 3) 726#define HV_STIMER_AUTOENABLE (1ULL << 3)
727#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F) 727#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
728 728
729struct ipi_arg_non_ex {
730 u32 vector;
731 u32 reserved;
732 u64 cpu_mask;
733};
734
735struct hv_vpset { 729struct hv_vpset {
736 u64 format; 730 u64 format;
737 u64 valid_bank_mask; 731 u64 valid_bank_mask;
738 u64 bank_contents[]; 732 u64 bank_contents[];
739}; 733};
740 734
741struct ipi_arg_ex { 735/* HvCallSendSyntheticClusterIpi hypercall */
736struct hv_send_ipi {
737 u32 vector;
738 u32 reserved;
739 u64 cpu_mask;
740};
741
742/* HvCallSendSyntheticClusterIpiEx hypercall */
743struct hv_send_ipi_ex {
742 u32 vector; 744 u32 vector;
743 u32 reserved; 745 u32 reserved;
744 struct hv_vpset vp_set; 746 struct hv_vpset vp_set;
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c14f2a74b2be..15450a675031 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
33 return flags; 33 return flags;
34} 34}
35 35
36static inline void native_restore_fl(unsigned long flags) 36extern inline void native_restore_fl(unsigned long flags);
37extern inline void native_restore_fl(unsigned long flags)
37{ 38{
38 asm volatile("push %0 ; popf" 39 asm volatile("push %0 ; popf"
39 : /* no output */ 40 : /* no output */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 395c9631e000..75f1e35e7c15 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -22,10 +22,20 @@ enum die_val {
22 DIE_NMIUNKNOWN, 22 DIE_NMIUNKNOWN,
23}; 23};
24 24
25enum show_regs_mode {
26 SHOW_REGS_SHORT,
27 /*
28 * For when userspace crashed, but we don't think it's our fault, and
29 * therefore don't print kernel registers.
30 */
31 SHOW_REGS_USER,
32 SHOW_REGS_ALL
33};
34
25extern void die(const char *, struct pt_regs *,long); 35extern void die(const char *, struct pt_regs *,long);
26extern int __must_check __die(const char *, struct pt_regs *, long); 36extern int __must_check __die(const char *, struct pt_regs *, long);
27extern void show_stack_regs(struct pt_regs *regs); 37extern void show_stack_regs(struct pt_regs *regs);
28extern void __show_regs(struct pt_regs *regs, int all); 38extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
29extern void show_iret_regs(struct pt_regs *regs); 39extern void show_iret_regs(struct pt_regs *regs);
30extern unsigned long oops_begin(void); 40extern unsigned long oops_begin(void);
31extern void oops_end(unsigned long, struct pt_regs *, int signr); 41extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00ddb0c9e612..09b2e3e2cf1b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -869,6 +869,8 @@ struct kvm_arch {
869 869
870 bool x2apic_format; 870 bool x2apic_format;
871 bool x2apic_broadcast_quirk_disabled; 871 bool x2apic_broadcast_quirk_disabled;
872
873 bool guest_can_read_msr_platform_info;
872}; 874};
873 875
874struct kvm_vm_stat { 876struct kvm_vm_stat {
@@ -1022,6 +1024,7 @@ struct kvm_x86_ops {
1022 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); 1024 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1023 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 1025 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1024 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); 1026 void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1027 bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1025 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 1028 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1026 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); 1029 void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1027 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); 1030 void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -1055,6 +1058,7 @@ struct kvm_x86_ops {
1055 bool (*umip_emulated)(void); 1058 bool (*umip_emulated)(void);
1056 1059
1057 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr); 1060 int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
1061 void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1058 1062
1059 void (*sched_in)(struct kvm_vcpu *kvm, int cpu); 1063 void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1060 1064
@@ -1237,19 +1241,12 @@ enum emulation_result {
1237#define EMULTYPE_NO_DECODE (1 << 0) 1241#define EMULTYPE_NO_DECODE (1 << 0)
1238#define EMULTYPE_TRAP_UD (1 << 1) 1242#define EMULTYPE_TRAP_UD (1 << 1)
1239#define EMULTYPE_SKIP (1 << 2) 1243#define EMULTYPE_SKIP (1 << 2)
1240#define EMULTYPE_RETRY (1 << 3) 1244#define EMULTYPE_ALLOW_RETRY (1 << 3)
1241#define EMULTYPE_NO_REEXECUTE (1 << 4) 1245#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
1242#define EMULTYPE_NO_UD_ON_FAIL (1 << 5) 1246#define EMULTYPE_VMWARE (1 << 5)
1243#define EMULTYPE_VMWARE (1 << 6) 1247int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1244int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 1248int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1245 int emulation_type, void *insn, int insn_len); 1249 void *insn, int insn_len);
1246
1247static inline int emulate_instruction(struct kvm_vcpu *vcpu,
1248 int emulation_type)
1249{
1250 return x86_emulate_instruction(vcpu, 0,
1251 emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
1252}
1253 1250
1254void kvm_enable_efer_bits(u64); 1251void kvm_enable_efer_bits(u64);
1255bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer); 1252bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1447,6 @@ asmlinkage void kvm_spurious_fault(void);
1450 ____kvm_handle_fault_on_reboot(insn, "") 1447 ____kvm_handle_fault_on_reboot(insn, "")
1451 1448
1452#define KVM_ARCH_WANT_MMU_NOTIFIER 1449#define KVM_ARCH_WANT_MMU_NOTIFIER
1453int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
1454int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); 1450int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
1455int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); 1451int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1456int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 1452int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1459,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1463void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); 1459void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1464 1460
1465int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, 1461int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1466 unsigned long ipi_bitmap_high, int min, 1462 unsigned long ipi_bitmap_high, u32 min,
1467 unsigned long icr, int op_64_bit); 1463 unsigned long icr, int op_64_bit);
1468 1464
1469u64 kvm_get_arch_capabilities(void); 1465u64 kvm_get_arch_capabilities(void);
@@ -1490,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1490 1486
1491int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu); 1487int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1492int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); 1488int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1489void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1493 1490
1494int kvm_is_in_guest(void); 1491int kvm_is_in_guest(void);
1495 1492
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c0643831706e..616f8e637bc3 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
48 48
49/* Architecture __weak replacement functions */ 49/* Architecture __weak replacement functions */
50void __init mem_encrypt_init(void); 50void __init mem_encrypt_init(void);
51void __init mem_encrypt_free_decrypted_mem(void);
51 52
52bool sme_active(void); 53bool sme_active(void);
53bool sev_active(void); 54bool sev_active(void);
54 55
56#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
57
55#else /* !CONFIG_AMD_MEM_ENCRYPT */ 58#else /* !CONFIG_AMD_MEM_ENCRYPT */
56 59
57#define sme_me_mask 0ULL 60#define sme_me_mask 0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
77static inline int __init 80static inline int __init
78early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } 81early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
79 82
83#define __bss_decrypted
84
80#endif /* CONFIG_AMD_MEM_ENCRYPT */ 85#endif /* CONFIG_AMD_MEM_ENCRYPT */
81 86
82/* 87/*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
88#define __sme_pa(x) (__pa(x) | sme_me_mask) 93#define __sme_pa(x) (__pa(x) | sme_me_mask)
89#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask) 94#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
90 95
96extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
97
91#endif /* __ASSEMBLY__ */ 98#endif /* __ASSEMBLY__ */
92 99
93#endif /* __X86_MEM_ENCRYPT_H__ */ 100#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 24c6cf5f16b7..60d0f9015317 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
19 19
20static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 20static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
21{ 21{
22#ifdef CONFIG_PAGE_TABLE_ISOLATION
23 pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
24#endif
25 *pmdp = pmd; 22 *pmdp = pmd;
26} 23}
27 24
@@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
61#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
62static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) 59static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
63{ 60{
64#ifdef CONFIG_PAGE_TABLE_ISOLATION
65 pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
66#endif
67 return __pmd(xchg((pmdval_t *)xp, 0)); 61 return __pmd(xchg((pmdval_t *)xp, 0));
68} 62}
69#else 63#else
@@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
73#ifdef CONFIG_SMP 67#ifdef CONFIG_SMP
74static inline pud_t native_pudp_get_and_clear(pud_t *xp) 68static inline pud_t native_pudp_get_and_clear(pud_t *xp)
75{ 69{
76#ifdef CONFIG_PAGE_TABLE_ISOLATION
77 pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
78#endif
79 return __pud(xchg((pudval_t *)xp, 0)); 70 return __pud(xchg((pudval_t *)xp, 0));
80} 71}
81#else 72#else
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index a564084c6141..f8b1ad2c3828 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -2,6 +2,8 @@
2#ifndef _ASM_X86_PGTABLE_3LEVEL_H 2#ifndef _ASM_X86_PGTABLE_3LEVEL_H
3#define _ASM_X86_PGTABLE_3LEVEL_H 3#define _ASM_X86_PGTABLE_3LEVEL_H
4 4
5#include <asm/atomic64_32.h>
6
5/* 7/*
6 * Intel Physical Address Extension (PAE) Mode - three-level page 8 * Intel Physical Address Extension (PAE) Mode - three-level page
7 * tables on PPro+ CPUs. 9 * tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
150{ 152{
151 pte_t res; 153 pte_t res;
152 154
153 /* xchg acts as a barrier before the setting of the high bits */ 155 res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
154 res.pte_low = xchg(&ptep->pte_low, 0);
155 res.pte_high = ptep->pte_high;
156 ptep->pte_high = 0;
157 156
158 return res; 157 return res;
159} 158}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e4ffa565a69f..690c0307afed 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1195 return xchg(pmdp, pmd); 1195 return xchg(pmdp, pmd);
1196 } else { 1196 } else {
1197 pmd_t old = *pmdp; 1197 pmd_t old = *pmdp;
1198 *pmdp = pmd; 1198 WRITE_ONCE(*pmdp, pmd);
1199 return old; 1199 return old;
1200 } 1200 }
1201} 1201}
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f773d5e6c8cc..9c85b54bf03c 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,6 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/threads.h> 16#include <linux/threads.h>
17#include <asm/fixmap.h>
17 18
18extern p4d_t level4_kernel_pgt[512]; 19extern p4d_t level4_kernel_pgt[512];
19extern p4d_t level4_ident_pgt[512]; 20extern p4d_t level4_ident_pgt[512];
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
22extern pmd_t level2_kernel_pgt[512]; 23extern pmd_t level2_kernel_pgt[512];
23extern pmd_t level2_fixmap_pgt[512]; 24extern pmd_t level2_fixmap_pgt[512];
24extern pmd_t level2_ident_pgt[512]; 25extern pmd_t level2_ident_pgt[512];
25extern pte_t level1_fixmap_pgt[512]; 26extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
26extern pgd_t init_top_pgt[]; 27extern pgd_t init_top_pgt[];
27 28
28#define swapper_pg_dir init_top_pgt 29#define swapper_pg_dir init_top_pgt
@@ -55,15 +56,15 @@ struct mm_struct;
55void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte); 56void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
56void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte); 57void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
57 58
58static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, 59static inline void native_set_pte(pte_t *ptep, pte_t pte)
59 pte_t *ptep)
60{ 60{
61 *ptep = native_make_pte(0); 61 WRITE_ONCE(*ptep, pte);
62} 62}
63 63
64static inline void native_set_pte(pte_t *ptep, pte_t pte) 64static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
65 pte_t *ptep)
65{ 66{
66 *ptep = pte; 67 native_set_pte(ptep, native_make_pte(0));
67} 68}
68 69
69static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 70static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +74,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
73 74
74static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) 75static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
75{ 76{
76 *pmdp = pmd; 77 WRITE_ONCE(*pmdp, pmd);
77} 78}
78 79
79static inline void native_pmd_clear(pmd_t *pmd) 80static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +110,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
109 110
110static inline void native_set_pud(pud_t *pudp, pud_t pud) 111static inline void native_set_pud(pud_t *pudp, pud_t pud)
111{ 112{
112 *pudp = pud; 113 WRITE_ONCE(*pudp, pud);
113} 114}
114 115
115static inline void native_pud_clear(pud_t *pud) 116static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +138,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
137 pgd_t pgd; 138 pgd_t pgd;
138 139
139 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { 140 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
140 *p4dp = p4d; 141 WRITE_ONCE(*p4dp, p4d);
141 return; 142 return;
142 } 143 }
143 144
144 pgd = native_make_pgd(native_p4d_val(p4d)); 145 pgd = native_make_pgd(native_p4d_val(p4d));
145 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd); 146 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
146 *p4dp = native_make_p4d(native_pgd_val(pgd)); 147 WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
147} 148}
148 149
149static inline void native_p4d_clear(p4d_t *p4d) 150static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +154,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
153 154
154static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) 155static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
155{ 156{
156 *pgdp = pti_set_user_pgtbl(pgdp, pgd); 157 WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
157} 158}
158 159
159static inline void native_pgd_clear(pgd_t *pgd) 160static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c24297268ebc..d53c54b842da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
132 /* Index into per_cpu list: */ 132 /* Index into per_cpu list: */
133 u16 cpu_index; 133 u16 cpu_index;
134 u32 microcode; 134 u32 microcode;
135 /* Address space bits used by the cache internally */
136 u8 x86_cache_bits;
135 unsigned initialized : 1; 137 unsigned initialized : 1;
136} __randomize_layout; 138} __randomize_layout;
137 139
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
183 185
184static inline unsigned long long l1tf_pfn_limit(void) 186static inline unsigned long long l1tf_pfn_limit(void)
185{ 187{
186 return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT); 188 return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
187} 189}
188 190
189extern void early_cpu_init(void); 191extern void early_cpu_init(void);
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 5f9012ff52ed..33d3c88a7225 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
39 39
40#define __ARCH_HAS_SA_RESTORER 40#define __ARCH_HAS_SA_RESTORER
41 41
42#include <asm/asm.h>
42#include <uapi/asm/sigcontext.h> 43#include <uapi/asm/sigcontext.h>
43 44
44#ifdef __i386__ 45#ifdef __i386__
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
86 87
87static inline int __gen_sigismember(sigset_t *set, int _sig) 88static inline int __gen_sigismember(sigset_t *set, int _sig)
88{ 89{
89 unsigned char ret; 90 bool ret;
90 asm("btl %2,%1\n\tsetc %0" 91 asm("btl %2,%1" CC_SET(c)
91 : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); 92 : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
92 return ret; 93 return ret;
93} 94}
94 95
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index b6dc698f992a..f335aad404a4 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
111 return (unsigned long)frame; 111 return (unsigned long)frame;
112} 112}
113 113
114void show_opcodes(u8 *rip, const char *loglvl); 114void show_opcodes(struct pt_regs *regs, const char *loglvl);
115void show_ip(struct pt_regs *regs, const char *loglvl); 115void show_ip(struct pt_regs *regs, const char *loglvl);
116#endif /* _ASM_X86_STACKTRACE_H */ 116#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 29c9da6c62fc..58ce5288878e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -175,8 +175,16 @@ struct tlb_state {
175 * are on. This means that it may not match current->active_mm, 175 * are on. This means that it may not match current->active_mm,
176 * which will contain the previous user mm when we're in lazy TLB 176 * which will contain the previous user mm when we're in lazy TLB
177 * mode even if we've already switched back to swapper_pg_dir. 177 * mode even if we've already switched back to swapper_pg_dir.
178 *
179 * During switch_mm_irqs_off(), loaded_mm will be set to
180 * LOADED_MM_SWITCHING during the brief interrupts-off window
181 * when CR3 and loaded_mm would otherwise be inconsistent. This
182 * is for nmi_uaccess_okay()'s benefit.
178 */ 183 */
179 struct mm_struct *loaded_mm; 184 struct mm_struct *loaded_mm;
185
186#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
187
180 u16 loaded_mm_asid; 188 u16 loaded_mm_asid;
181 u16 next_asid; 189 u16 next_asid;
182 /* last user mm's ctx id */ 190 /* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
246}; 254};
247DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); 255DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
248 256
257/*
258 * Blindly accessing user memory from NMI context can be dangerous
259 * if we're in the middle of switching the current user task or
260 * switching the loaded mm. It can also be dangerous if we
261 * interrupted some kernel code that was temporarily using a
262 * different mm.
263 */
264static inline bool nmi_uaccess_okay(void)
265{
266 struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
267 struct mm_struct *current_mm = current->mm;
268
269 VM_WARN_ON_ONCE(!loaded_mm);
270
271 /*
272 * The condition we want to check is
273 * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
274 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
275 * is supposed to be reasonably fast.
276 *
277 * Instead, we check the almost equivalent but somewhat conservative
278 * condition below, and we rely on the fact that switch_mm_irqs_off()
279 * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
280 */
281 if (loaded_mm != current_mm)
282 return false;
283
284 VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
285
286 return true;
287}
288
249/* Initialize cr4 shadow for this CPU. */ 289/* Initialize cr4 shadow for this CPU. */
250static inline void cr4_init_shadow(void) 290static inline void cr4_init_shadow(void)
251{ 291{
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index fb856c9f0449..53748541c487 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
93 * 93 *
94 * If RDPID is available, use it. 94 * If RDPID is available, use it.
95 */ 95 */
96 alternative_io ("lsl %[p],%[seg]", 96 alternative_io ("lsl %[seg],%[p]",
97 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ 97 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
98 X86_FEATURE_RDPID, 98 X86_FEATURE_RDPID,
99 [p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); 99 [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 86299efa804a..fd23d5778ea1 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -377,6 +377,7 @@ struct kvm_sync_regs {
377 377
378#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 378#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
379#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) 379#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
380#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
380 381
381#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 382#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
382#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 383#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 014f214da581..b9d5e7c9ef43 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
684 * It means the size must be writable atomically and the address must be aligned 684 * It means the size must be writable atomically and the address must be aligned
685 * in a way that permits an atomic write. It also makes sure we fit on a single 685 * in a way that permits an atomic write. It also makes sure we fit on a single
686 * page. 686 * page.
687 *
688 * Note: Must be called under text_mutex.
689 */ 687 */
690void *text_poke(void *addr, const void *opcode, size_t len) 688void *text_poke(void *addr, const void *opcode, size_t len)
691{ 689{
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
700 */ 698 */
701 BUG_ON(!after_bootmem); 699 BUG_ON(!after_bootmem);
702 700
701 lockdep_assert_held(&text_mutex);
702
703 if (!core_kernel_text((unsigned long)addr)) { 703 if (!core_kernel_text((unsigned long)addr)) {
704 pages[0] = vmalloc_to_page(addr); 704 pages[0] = vmalloc_to_page(addr);
705 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 705 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
782 * - replace the first byte (int3) by the first byte of 782 * - replace the first byte (int3) by the first byte of
783 * replacing opcode 783 * replacing opcode
784 * - sync cores 784 * - sync cores
785 *
786 * Note: must be called under text_mutex.
787 */ 785 */
788void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) 786void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
789{ 787{
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
792 bp_int3_handler = handler; 790 bp_int3_handler = handler;
793 bp_int3_addr = (u8 *)addr + sizeof(int3); 791 bp_int3_addr = (u8 *)addr + sizeof(int3);
794 bp_patching_in_progress = true; 792 bp_patching_in_progress = true;
793
794 lockdep_assert_held(&text_mutex);
795
795 /* 796 /*
796 * Corresponding read barrier in int3 notifier for making sure the 797 * Corresponding read barrier in int3 notifier for making sure the
797 * in_progress and handler are correctly ordered wrt. patching. 798 * in_progress and handler are correctly ordered wrt. patching.
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 9f148e3d45b4..7654febd5102 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { 413 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
414 /* Something in the core code broke! Survive gracefully */ 414 /* Something in the core code broke! Survive gracefully */
415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); 415 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
416 return EINVAL; 416 return -EINVAL;
417 } 417 }
418 418
419 ret = assign_managed_vector(irqd, vector_searchmask); 419 ret = assign_managed_vector(irqd, vector_searchmask);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index ec00d1ff5098..f7151cd03cb0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
1640 return 0; 1640 return 0;
1641} 1641}
1642 1642
1643#ifdef CONFIG_PROC_FS
1643static int proc_apm_show(struct seq_file *m, void *v) 1644static int proc_apm_show(struct seq_file *m, void *v)
1644{ 1645{
1645 unsigned short bx; 1646 unsigned short bx;
@@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
1719 units); 1720 units);
1720 return 0; 1721 return 0;
1721} 1722}
1723#endif
1722 1724
1723static int apm(void *unused) 1725static int apm(void *unused)
1724{ 1726{
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..40bdaea97fe7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
668enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; 668enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
669EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); 669EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
670 670
671/*
672 * These CPUs all support 44bits physical address space internally in the
673 * cache but CPUID can report a smaller number of physical address bits.
674 *
675 * The L1TF mitigation uses the top most address bit for the inversion of
676 * non present PTEs. When the installed memory reaches into the top most
677 * address bit due to memory holes, which has been observed on machines
678 * which report 36bits physical address bits and have 32G RAM installed,
679 * then the mitigation range check in l1tf_select_mitigation() triggers.
680 * This is a false positive because the mitigation is still possible due to
681 * the fact that the cache uses 44bit internally. Use the cache bits
682 * instead of the reported physical bits and adjust them on the affected
683 * machines to 44bit if the reported bits are less than 44.
684 */
685static void override_cache_bits(struct cpuinfo_x86 *c)
686{
687 if (c->x86 != 6)
688 return;
689
690 switch (c->x86_model) {
691 case INTEL_FAM6_NEHALEM:
692 case INTEL_FAM6_WESTMERE:
693 case INTEL_FAM6_SANDYBRIDGE:
694 case INTEL_FAM6_IVYBRIDGE:
695 case INTEL_FAM6_HASWELL_CORE:
696 case INTEL_FAM6_HASWELL_ULT:
697 case INTEL_FAM6_HASWELL_GT3E:
698 case INTEL_FAM6_BROADWELL_CORE:
699 case INTEL_FAM6_BROADWELL_GT3E:
700 case INTEL_FAM6_SKYLAKE_MOBILE:
701 case INTEL_FAM6_SKYLAKE_DESKTOP:
702 case INTEL_FAM6_KABYLAKE_MOBILE:
703 case INTEL_FAM6_KABYLAKE_DESKTOP:
704 if (c->x86_cache_bits < 44)
705 c->x86_cache_bits = 44;
706 break;
707 }
708}
709
671static void __init l1tf_select_mitigation(void) 710static void __init l1tf_select_mitigation(void)
672{ 711{
673 u64 half_pa; 712 u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
675 if (!boot_cpu_has_bug(X86_BUG_L1TF)) 714 if (!boot_cpu_has_bug(X86_BUG_L1TF))
676 return; 715 return;
677 716
717 override_cache_bits(&boot_cpu_data);
718
678 switch (l1tf_mitigation) { 719 switch (l1tf_mitigation) {
679 case L1TF_MITIGATION_OFF: 720 case L1TF_MITIGATION_OFF:
680 case L1TF_MITIGATION_FLUSH_NOWARN: 721 case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
694 return; 735 return;
695#endif 736#endif
696 737
697 /*
698 * This is extremely unlikely to happen because almost all
699 * systems have far more MAX_PA/2 than RAM can be fit into
700 * DIMM slots.
701 */
702 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; 738 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
703 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { 739 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
704 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); 740 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84dee5ab745a..44c4ef3d989b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
919 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 919 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
920 c->x86_phys_bits = 36; 920 c->x86_phys_bits = 36;
921#endif 921#endif
922 c->x86_cache_bits = c->x86_phys_bits;
922} 923}
923 924
924static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 925static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 401e8c133108..fc3c07fe7df5 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
150 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 150 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
151 return false; 151 return false;
152 152
153 if (c->x86 != 6)
154 return false;
155
153 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 156 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
154 if (c->x86_model == spectre_bad_microcodes[i].model && 157 if (c->x86_model == spectre_bad_microcodes[i].model &&
155 c->x86_stepping == spectre_bad_microcodes[i].stepping) 158 c->x86_stepping == spectre_bad_microcodes[i].stepping)
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 4e588f36228f..285eb3ec4200 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
382 e <= QOS_L3_MBM_LOCAL_EVENT_ID); 382 e <= QOS_L3_MBM_LOCAL_EVENT_ID);
383} 383}
384 384
385struct rdt_parse_data {
386 struct rdtgroup *rdtgrp;
387 char *buf;
388};
389
385/** 390/**
386 * struct rdt_resource - attributes of an RDT resource 391 * struct rdt_resource - attributes of an RDT resource
387 * @rid: The index of the resource 392 * @rid: The index of the resource
@@ -423,16 +428,19 @@ struct rdt_resource {
423 struct rdt_cache cache; 428 struct rdt_cache cache;
424 struct rdt_membw membw; 429 struct rdt_membw membw;
425 const char *format_str; 430 const char *format_str;
426 int (*parse_ctrlval) (void *data, struct rdt_resource *r, 431 int (*parse_ctrlval)(struct rdt_parse_data *data,
427 struct rdt_domain *d); 432 struct rdt_resource *r,
433 struct rdt_domain *d);
428 struct list_head evt_list; 434 struct list_head evt_list;
429 int num_rmid; 435 int num_rmid;
430 unsigned int mon_scale; 436 unsigned int mon_scale;
431 unsigned long fflags; 437 unsigned long fflags;
432}; 438};
433 439
434int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d); 440int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
435int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d); 441 struct rdt_domain *d);
442int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
443 struct rdt_domain *d);
436 444
437extern struct mutex rdtgroup_mutex; 445extern struct mutex rdtgroup_mutex;
438 446
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
536void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); 544void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
537struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); 545struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
538int update_domains(struct rdt_resource *r, int closid); 546int update_domains(struct rdt_resource *r, int closid);
547int closids_supported(void);
539void closid_free(int closid); 548void closid_free(int closid);
540int alloc_rmid(void); 549int alloc_rmid(void);
541void free_rmid(u32 rmid); 550void free_rmid(u32 rmid);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index af358ca05160..0f53049719cd 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
64 return true; 64 return true;
65} 65}
66 66
67int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d) 67int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
68 struct rdt_domain *d)
68{ 69{
69 unsigned long data; 70 unsigned long bw_val;
70 char *buf = _buf;
71 71
72 if (d->have_new_ctrl) { 72 if (d->have_new_ctrl) {
73 rdt_last_cmd_printf("duplicate domain %d\n", d->id); 73 rdt_last_cmd_printf("duplicate domain %d\n", d->id);
74 return -EINVAL; 74 return -EINVAL;
75 } 75 }
76 76
77 if (!bw_validate(buf, &data, r)) 77 if (!bw_validate(data->buf, &bw_val, r))
78 return -EINVAL; 78 return -EINVAL;
79 d->new_ctrl = data; 79 d->new_ctrl = bw_val;
80 d->have_new_ctrl = true; 80 d->have_new_ctrl = true;
81 81
82 return 0; 82 return 0;
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
123 return true; 123 return true;
124} 124}
125 125
126struct rdt_cbm_parse_data {
127 struct rdtgroup *rdtgrp;
128 char *buf;
129};
130
131/* 126/*
132 * Read one cache bit mask (hex). Check that it is valid for the current 127 * Read one cache bit mask (hex). Check that it is valid for the current
133 * resource type. 128 * resource type.
134 */ 129 */
135int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d) 130int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
131 struct rdt_domain *d)
136{ 132{
137 struct rdt_cbm_parse_data *data = _data;
138 struct rdtgroup *rdtgrp = data->rdtgrp; 133 struct rdtgroup *rdtgrp = data->rdtgrp;
139 u32 cbm_val; 134 u32 cbm_val;
140 135
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
195static int parse_line(char *line, struct rdt_resource *r, 190static int parse_line(char *line, struct rdt_resource *r,
196 struct rdtgroup *rdtgrp) 191 struct rdtgroup *rdtgrp)
197{ 192{
198 struct rdt_cbm_parse_data data; 193 struct rdt_parse_data data;
199 char *dom = NULL, *id; 194 char *dom = NULL, *id;
200 struct rdt_domain *d; 195 struct rdt_domain *d;
201 unsigned long dom_id; 196 unsigned long dom_id;
202 197
198 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
199 r->rid == RDT_RESOURCE_MBA) {
200 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
201 return -EINVAL;
202 }
203
203next: 204next:
204 if (!line || line[0] == '\0') 205 if (!line || line[0] == '\0')
205 return 0; 206 return 0;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index b799c00bef09..1b8e86a5d5e1 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
97 * limited as the number of resources grows. 97 * limited as the number of resources grows.
98 */ 98 */
99static int closid_free_map; 99static int closid_free_map;
100static int closid_free_map_len;
101
102int closids_supported(void)
103{
104 return closid_free_map_len;
105}
100 106
101static void closid_init(void) 107static void closid_init(void)
102{ 108{
@@ -111,6 +117,7 @@ static void closid_init(void)
111 117
112 /* CLOSID 0 is always reserved for the default group */ 118 /* CLOSID 0 is always reserved for the default group */
113 closid_free_map &= ~1; 119 closid_free_map &= ~1;
120 closid_free_map_len = rdt_min_closid;
114} 121}
115 122
116static int closid_alloc(void) 123static int closid_alloc(void)
@@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
802 sw_shareable = 0; 809 sw_shareable = 0;
803 exclusive = 0; 810 exclusive = 0;
804 seq_printf(seq, "%d=", dom->id); 811 seq_printf(seq, "%d=", dom->id);
805 for (i = 0; i < r->num_closid; i++, ctrl++) { 812 for (i = 0; i < closids_supported(); i++, ctrl++) {
806 if (!closid_allocated(i)) 813 if (!closid_allocated(i))
807 continue; 814 continue;
808 mode = rdtgroup_mode_by_closid(i); 815 mode = rdtgroup_mode_by_closid(i);
@@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
989 996
990 /* Check for overlap with other resource groups */ 997 /* Check for overlap with other resource groups */
991 ctrl = d->ctrl_val; 998 ctrl = d->ctrl_val;
992 for (i = 0; i < r->num_closid; i++, ctrl++) { 999 for (i = 0; i < closids_supported(); i++, ctrl++) {
993 ctrl_b = (unsigned long *)ctrl; 1000 ctrl_b = (unsigned long *)ctrl;
994 mode = rdtgroup_mode_by_closid(i); 1001 mode = rdtgroup_mode_by_closid(i);
995 if (closid_allocated(i) && i != closid && 1002 if (closid_allocated(i) && i != closid &&
@@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1024{ 1031{
1025 int closid = rdtgrp->closid; 1032 int closid = rdtgrp->closid;
1026 struct rdt_resource *r; 1033 struct rdt_resource *r;
1034 bool has_cache = false;
1027 struct rdt_domain *d; 1035 struct rdt_domain *d;
1028 1036
1029 for_each_alloc_enabled_rdt_resource(r) { 1037 for_each_alloc_enabled_rdt_resource(r) {
1038 if (r->rid == RDT_RESOURCE_MBA)
1039 continue;
1040 has_cache = true;
1030 list_for_each_entry(d, &r->domains, list) { 1041 list_for_each_entry(d, &r->domains, list) {
1031 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], 1042 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1032 rdtgrp->closid, false)) 1043 rdtgrp->closid, false)) {
1044 rdt_last_cmd_puts("schemata overlaps\n");
1033 return false; 1045 return false;
1046 }
1034 } 1047 }
1035 } 1048 }
1036 1049
1050 if (!has_cache) {
1051 rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
1052 return false;
1053 }
1054
1037 return true; 1055 return true;
1038} 1056}
1039 1057
@@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1085 rdtgrp->mode = RDT_MODE_SHAREABLE; 1103 rdtgrp->mode = RDT_MODE_SHAREABLE;
1086 } else if (!strcmp(buf, "exclusive")) { 1104 } else if (!strcmp(buf, "exclusive")) {
1087 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1105 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1088 rdt_last_cmd_printf("schemata overlaps\n");
1089 ret = -EINVAL; 1106 ret = -EINVAL;
1090 goto out; 1107 goto out;
1091 } 1108 }
@@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1155 struct rdt_resource *r; 1172 struct rdt_resource *r;
1156 struct rdt_domain *d; 1173 struct rdt_domain *d;
1157 unsigned int size; 1174 unsigned int size;
1158 bool sep = false; 1175 bool sep;
1159 u32 cbm; 1176 u32 ctrl;
1160 1177
1161 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1178 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1162 if (!rdtgrp) { 1179 if (!rdtgrp) {
@@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1174 } 1191 }
1175 1192
1176 for_each_alloc_enabled_rdt_resource(r) { 1193 for_each_alloc_enabled_rdt_resource(r) {
1194 sep = false;
1177 seq_printf(s, "%*s:", max_name_width, r->name); 1195 seq_printf(s, "%*s:", max_name_width, r->name);
1178 list_for_each_entry(d, &r->domains, list) { 1196 list_for_each_entry(d, &r->domains, list) {
1179 if (sep) 1197 if (sep)
@@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
1181 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1199 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1182 size = 0; 1200 size = 0;
1183 } else { 1201 } else {
1184 cbm = d->ctrl_val[rdtgrp->closid]; 1202 ctrl = (!is_mba_sc(r) ?
1185 size = rdtgroup_cbm_to_size(r, d, cbm); 1203 d->ctrl_val[rdtgrp->closid] :
1204 d->mbps_val[rdtgrp->closid]);
1205 if (r->rid == RDT_RESOURCE_MBA)
1206 size = ctrl;
1207 else
1208 size = rdtgroup_cbm_to_size(r, d, ctrl);
1186 } 1209 }
1187 seq_printf(s, "%d=%u", d->id, size); 1210 seq_printf(s, "%d=%u", d->id, size);
1188 sep = true; 1211 sep = true;
@@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2336 u32 *ctrl; 2359 u32 *ctrl;
2337 2360
2338 for_each_alloc_enabled_rdt_resource(r) { 2361 for_each_alloc_enabled_rdt_resource(r) {
2362 /*
2363 * Only initialize default allocations for CBM cache
2364 * resources
2365 */
2366 if (r->rid == RDT_RESOURCE_MBA)
2367 continue;
2339 list_for_each_entry(d, &r->domains, list) { 2368 list_for_each_entry(d, &r->domains, list) {
2340 d->have_new_ctrl = false; 2369 d->have_new_ctrl = false;
2341 d->new_ctrl = r->cache.shareable_bits; 2370 d->new_ctrl = r->cache.shareable_bits;
2342 used_b = r->cache.shareable_bits; 2371 used_b = r->cache.shareable_bits;
2343 ctrl = d->ctrl_val; 2372 ctrl = d->ctrl_val;
2344 for (i = 0; i < r->num_closid; i++, ctrl++) { 2373 for (i = 0; i < closids_supported(); i++, ctrl++) {
2345 if (closid_allocated(i) && i != closid) { 2374 if (closid_allocated(i) && i != closid) {
2346 mode = rdtgroup_mode_by_closid(i); 2375 mode = rdtgroup_mode_by_closid(i);
2347 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2376 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2373 } 2402 }
2374 2403
2375 for_each_alloc_enabled_rdt_resource(r) { 2404 for_each_alloc_enabled_rdt_resource(r) {
2405 /*
2406 * Only initialize default allocations for CBM cache
2407 * resources
2408 */
2409 if (r->rid == RDT_RESOURCE_MBA)
2410 continue;
2376 ret = update_domains(r, rdtgrp->closid); 2411 ret = update_domains(r, rdtgrp->closid);
2377 if (ret < 0) { 2412 if (ret < 0) {
2378 rdt_last_cmd_puts("failed to initialize allocations\n"); 2413 rdt_last_cmd_puts("failed to initialize allocations\n");
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 0624957aa068..07b5fc00b188 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
504 struct microcode_amd *mc_amd; 504 struct microcode_amd *mc_amd;
505 struct ucode_cpu_info *uci; 505 struct ucode_cpu_info *uci;
506 struct ucode_patch *p; 506 struct ucode_patch *p;
507 enum ucode_state ret;
507 u32 rev, dummy; 508 u32 rev, dummy;
508 509
509 BUG_ON(raw_smp_processor_id() != cpu); 510 BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
521 522
522 /* need to apply patch? */ 523 /* need to apply patch? */
523 if (rev >= mc_amd->hdr.patch_id) { 524 if (rev >= mc_amd->hdr.patch_id) {
524 c->microcode = rev; 525 ret = UCODE_OK;
525 uci->cpu_sig.rev = rev; 526 goto out;
526 return UCODE_OK;
527 } 527 }
528 528
529 if (__apply_microcode_amd(mc_amd)) { 529 if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
531 cpu, mc_amd->hdr.patch_id); 531 cpu, mc_amd->hdr.patch_id);
532 return UCODE_ERROR; 532 return UCODE_ERROR;
533 } 533 }
534 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
535 mc_amd->hdr.patch_id);
536 534
537 uci->cpu_sig.rev = mc_amd->hdr.patch_id; 535 rev = mc_amd->hdr.patch_id;
538 c->microcode = mc_amd->hdr.patch_id; 536 ret = UCODE_UPDATED;
537
538 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
539 539
540 return UCODE_UPDATED; 540out:
541 uci->cpu_sig.rev = rev;
542 c->microcode = rev;
543
544 /* Update boot_cpu_data's revision too, if we're on the BSP: */
545 if (c->cpu_index == boot_cpu_data.cpu_index)
546 boot_cpu_data.microcode = rev;
547
548 return ret;
541} 549}
542 550
543static int install_equiv_cpu_table(const u8 *buf) 551static int install_equiv_cpu_table(const u8 *buf)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 97ccf4c3b45b..16936a24795c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
795 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 795 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
796 struct cpuinfo_x86 *c = &cpu_data(cpu); 796 struct cpuinfo_x86 *c = &cpu_data(cpu);
797 struct microcode_intel *mc; 797 struct microcode_intel *mc;
798 enum ucode_state ret;
798 static int prev_rev; 799 static int prev_rev;
799 u32 rev; 800 u32 rev;
800 801
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
817 */ 818 */
818 rev = intel_get_microcode_revision(); 819 rev = intel_get_microcode_revision();
819 if (rev >= mc->hdr.rev) { 820 if (rev >= mc->hdr.rev) {
820 uci->cpu_sig.rev = rev; 821 ret = UCODE_OK;
821 c->microcode = rev; 822 goto out;
822 return UCODE_OK;
823 } 823 }
824 824
825 /* 825 /*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
848 prev_rev = rev; 848 prev_rev = rev;
849 } 849 }
850 850
851 ret = UCODE_UPDATED;
852
853out:
851 uci->cpu_sig.rev = rev; 854 uci->cpu_sig.rev = rev;
852 c->microcode = rev; 855 c->microcode = rev;
856
857 /* Update boot_cpu_data's revision too, if we're on the BSP: */
858 if (c->cpu_index == boot_cpu_data.cpu_index)
859 boot_cpu_data.microcode = rev;
853 860
854 return UCODE_UPDATED; 861 return ret;
855} 862}
856 863
857static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, 864static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9c8652974f8e..2b5886401e5f 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -17,6 +17,7 @@
17#include <linux/bug.h> 17#include <linux/bug.h>
18#include <linux/nmi.h> 18#include <linux/nmi.h>
19#include <linux/sysfs.h> 19#include <linux/sysfs.h>
20#include <linux/kasan.h>
20 21
21#include <asm/cpu_entry_area.h> 22#include <asm/cpu_entry_area.h>
22#include <asm/stacktrace.h> 23#include <asm/stacktrace.h>
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
89 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random 90 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
90 * guesstimate in attempt to achieve all of the above. 91 * guesstimate in attempt to achieve all of the above.
91 */ 92 */
92void show_opcodes(u8 *rip, const char *loglvl) 93void show_opcodes(struct pt_regs *regs, const char *loglvl)
93{ 94{
94#define PROLOGUE_SIZE 42 95#define PROLOGUE_SIZE 42
95#define EPILOGUE_SIZE 21 96#define EPILOGUE_SIZE 21
96#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) 97#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
97 u8 opcodes[OPCODE_BUFSIZE]; 98 u8 opcodes[OPCODE_BUFSIZE];
99 unsigned long prologue = regs->ip - PROLOGUE_SIZE;
100 bool bad_ip;
98 101
99 if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) { 102 /*
103 * Make sure userspace isn't trying to trick us into dumping kernel
104 * memory by pointing the userspace instruction pointer at it.
105 */
106 bad_ip = user_mode(regs) &&
107 __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
108
109 if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
110 OPCODE_BUFSIZE)) {
100 printk("%sCode: Bad RIP value.\n", loglvl); 111 printk("%sCode: Bad RIP value.\n", loglvl);
101 } else { 112 } else {
102 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" 113 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
112#else 123#else
113 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); 124 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
114#endif 125#endif
115 show_opcodes((u8 *)regs->ip, loglvl); 126 show_opcodes(regs, loglvl);
116} 127}
117 128
118void show_iret_regs(struct pt_regs *regs) 129void show_iret_regs(struct pt_regs *regs)
@@ -135,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
135 * they can be printed in the right context. 146 * they can be printed in the right context.
136 */ 147 */
137 if (!partial && on_stack(info, regs, sizeof(*regs))) { 148 if (!partial && on_stack(info, regs, sizeof(*regs))) {
138 __show_regs(regs, 0); 149 __show_regs(regs, SHOW_REGS_SHORT);
139 150
140 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, 151 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
141 IRET_FRAME_SIZE)) { 152 IRET_FRAME_SIZE)) {
@@ -333,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
333 oops_exit(); 344 oops_exit();
334 345
335 /* Executive summary in case the oops scrolled away */ 346 /* Executive summary in case the oops scrolled away */
336 __show_regs(&exec_summary_regs, true); 347 __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
337 348
338 if (!signr) 349 if (!signr)
339 return; 350 return;
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
346 * We're not going to return, but we might be on an IST stack or 357 * We're not going to return, but we might be on an IST stack or
347 * have very little stack space left. Rewind the stack and kill 358 * have very little stack space left. Rewind the stack and kill
348 * the task. 359 * the task.
360 * Before we rewind the stack, we have to tell KASAN that we're going to
361 * reuse the task stack and that existing poisons are invalid.
349 */ 362 */
363 kasan_unpoison_task_stack(current);
350 rewind_stack_do_exit(signr); 364 rewind_stack_do_exit(signr);
351} 365}
352NOKPROBE_SYMBOL(oops_end); 366NOKPROBE_SYMBOL(oops_end);
@@ -393,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
393 407
394void show_regs(struct pt_regs *regs) 408void show_regs(struct pt_regs *regs)
395{ 409{
396 bool all = true;
397
398 show_regs_print_info(KERN_DEFAULT); 410 show_regs_print_info(KERN_DEFAULT);
399 411
400 if (IS_ENABLED(CONFIG_X86_32)) 412 __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
401 all = !user_mode(regs);
402
403 __show_regs(regs, all);
404 413
405 /* 414 /*
406 * When in-kernel, we also print out the stack at the time of the fault.. 415 * When in-kernel, we also print out the stack at the time of the fault..
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index f260e452e4f8..e8c8c5d78dbd 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -7,11 +7,17 @@
7#include <linux/eisa.h> 7#include <linux/eisa.h>
8#include <linux/io.h> 8#include <linux/io.h>
9 9
10#include <xen/xen.h>
11
10static __init int eisa_bus_probe(void) 12static __init int eisa_bus_probe(void)
11{ 13{
12 void __iomem *p = ioremap(0x0FFFD9, 4); 14 void __iomem *p;
15
16 if (xen_pv_domain() && !xen_initial_domain())
17 return 0;
13 18
14 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) 19 p = ioremap(0x0FFFD9, 4);
20 if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
15 EISA_bus = 1; 21 EISA_bus = 1;
16 iounmap(p); 22 iounmap(p);
17 return 0; 23 return 0;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8047379e575a..ddee1f0870c4 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -35,6 +35,7 @@
35#include <asm/bootparam_utils.h> 35#include <asm/bootparam_utils.h>
36#include <asm/microcode.h> 36#include <asm/microcode.h>
37#include <asm/kasan.h> 37#include <asm/kasan.h>
38#include <asm/fixmap.h>
38 39
39/* 40/*
40 * Manage page tables very early on. 41 * Manage page tables very early on.
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
112unsigned long __head __startup_64(unsigned long physaddr, 113unsigned long __head __startup_64(unsigned long physaddr,
113 struct boot_params *bp) 114 struct boot_params *bp)
114{ 115{
116 unsigned long vaddr, vaddr_end;
115 unsigned long load_delta, *p; 117 unsigned long load_delta, *p;
116 unsigned long pgtable_flags; 118 unsigned long pgtable_flags;
117 pgdval_t *pgd; 119 pgdval_t *pgd;
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
165 pud[511] += load_delta; 167 pud[511] += load_delta;
166 168
167 pmd = fixup_pointer(level2_fixmap_pgt, physaddr); 169 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
168 pmd[506] += load_delta; 170 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
171 pmd[i] += load_delta;
169 172
170 /* 173 /*
171 * Set up the identity mapping for the switchover. These 174 * Set up the identity mapping for the switchover. These
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
235 sme_encrypt_kernel(bp); 238 sme_encrypt_kernel(bp);
236 239
237 /* 240 /*
241 * Clear the memory encryption mask from the .bss..decrypted section.
242 * The bss section will be memset to zero later in the initialization so
243 * there is no need to zero it after changing the memory encryption
244 * attribute.
245 */
246 if (mem_encrypt_active()) {
247 vaddr = (unsigned long)__start_bss_decrypted;
248 vaddr_end = (unsigned long)__end_bss_decrypted;
249 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
250 i = pmd_index(vaddr);
251 pmd[i] -= sme_get_me_mask();
252 }
253 }
254
255 /*
238 * Return the SME encryption mask (if SME is active) to be used as a 256 * Return the SME encryption mask (if SME is active) to be used as a
239 * modifier for the initial pgdir entry programmed into CR3. 257 * modifier for the initial pgdir entry programmed into CR3.
240 */ 258 */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 15ebc2fc166e..a3618cf04cf6 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -24,6 +24,7 @@
24#include "../entry/calling.h" 24#include "../entry/calling.h"
25#include <asm/export.h> 25#include <asm/export.h>
26#include <asm/nospec-branch.h> 26#include <asm/nospec-branch.h>
27#include <asm/fixmap.h>
27 28
28#ifdef CONFIG_PARAVIRT 29#ifdef CONFIG_PARAVIRT
29#include <asm/asm-offsets.h> 30#include <asm/asm-offsets.h>
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
445 KERNEL_IMAGE_SIZE/PMD_SIZE) 446 KERNEL_IMAGE_SIZE/PMD_SIZE)
446 447
447NEXT_PAGE(level2_fixmap_pgt) 448NEXT_PAGE(level2_fixmap_pgt)
448 .fill 506,8,0 449 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
449 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC 450 pgtno = 0
450 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ 451 .rept (FIXMAP_PMD_NUM)
451 .fill 5,8,0 452 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
453 + _PAGE_TABLE_NOENC;
454 pgtno = pgtno + 1
455 .endr
456 /* 6 MB reserved space + a 2MB hole */
457 .fill 4,8,0
452 458
453NEXT_PAGE(level1_fixmap_pgt) 459NEXT_PAGE(level1_fixmap_pgt)
460 .rept (FIXMAP_PMD_NUM)
454 .fill 512,8,0 461 .fill 512,8,0
462 .endr
455 463
456#undef PMDS 464#undef PMDS
457 465
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1e6764648af3..013fe3d21dbb 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -28,6 +28,7 @@
28#include <linux/sched/clock.h> 28#include <linux/sched/clock.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/set_memory.h>
31 32
32#include <asm/hypervisor.h> 33#include <asm/hypervisor.h>
33#include <asm/mem_encrypt.h> 34#include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
61 (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info)) 62 (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
62 63
63static struct pvclock_vsyscall_time_info 64static struct pvclock_vsyscall_time_info
64 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE); 65 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
65static struct pvclock_wall_clock wall_clock; 66static struct pvclock_wall_clock wall_clock __bss_decrypted;
66static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); 67static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
68static struct pvclock_vsyscall_time_info *hvclock_mem;
67 69
68static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) 70static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
69{ 71{
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
236 native_machine_shutdown(); 238 native_machine_shutdown();
237} 239}
238 240
241static void __init kvmclock_init_mem(void)
242{
243 unsigned long ncpus;
244 unsigned int order;
245 struct page *p;
246 int r;
247
248 if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
249 return;
250
251 ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
252 order = get_order(ncpus * sizeof(*hvclock_mem));
253
254 p = alloc_pages(GFP_KERNEL, order);
255 if (!p) {
256 pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
257 return;
258 }
259
260 hvclock_mem = page_address(p);
261
262 /*
263 * hvclock is shared between the guest and the hypervisor, must
264 * be mapped decrypted.
265 */
266 if (sev_active()) {
267 r = set_memory_decrypted((unsigned long) hvclock_mem,
268 1UL << order);
269 if (r) {
270 __free_pages(p, order);
271 hvclock_mem = NULL;
272 pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
273 return;
274 }
275 }
276
277 memset(hvclock_mem, 0, PAGE_SIZE << order);
278}
279
239static int __init kvm_setup_vsyscall_timeinfo(void) 280static int __init kvm_setup_vsyscall_timeinfo(void)
240{ 281{
241#ifdef CONFIG_X86_64 282#ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
250 291
251 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK; 292 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
252#endif 293#endif
294
295 kvmclock_init_mem();
296
253 return 0; 297 return 0;
254} 298}
255early_initcall(kvm_setup_vsyscall_timeinfo); 299early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
269 /* Use the static page for the first CPUs, allocate otherwise */ 313 /* Use the static page for the first CPUs, allocate otherwise */
270 if (cpu < HVC_BOOT_ARRAY_SIZE) 314 if (cpu < HVC_BOOT_ARRAY_SIZE)
271 p = &hv_clock_boot[cpu]; 315 p = &hv_clock_boot[cpu];
316 else if (hvclock_mem)
317 p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
272 else 318 else
273 p = kzalloc(sizeof(*p), GFP_KERNEL); 319 return -ENOMEM;
274 320
275 per_cpu(hv_clock_per_cpu, cpu) = p; 321 per_cpu(hv_clock_per_cpu, cpu) = p;
276 return p ? 0 : -ENOMEM; 322 return p ? 0 : -ENOMEM;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index afdb303285f8..8dc69d82567e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf,
91 91
92 if (len < 5) { 92 if (len < 5) {
93#ifdef CONFIG_RETPOLINE 93#ifdef CONFIG_RETPOLINE
94 WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr); 94 WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
95#endif 95#endif
96 return len; /* call too long for patch site */ 96 return len; /* call too long for patch site */
97 } 97 }
@@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
111 111
112 if (len < 5) { 112 if (len < 5) {
113#ifdef CONFIG_RETPOLINE 113#ifdef CONFIG_RETPOLINE
114 WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr); 114 WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
115#endif 115#endif
116 return len; /* call too long for patch site */ 116 return len; /* call too long for patch site */
117 } 117 }
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 2924fd447e61..5046a3c9dec2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -59,7 +59,7 @@
59#include <asm/intel_rdt_sched.h> 59#include <asm/intel_rdt_sched.h>
60#include <asm/proto.h> 60#include <asm/proto.h>
61 61
62void __show_regs(struct pt_regs *regs, int all) 62void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
63{ 63{
64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 64 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
65 unsigned long d0, d1, d2, d3, d6, d7; 65 unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
85 printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", 85 printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
86 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); 86 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
87 87
88 if (!all) 88 if (mode != SHOW_REGS_ALL)
89 return; 89 return;
90 90
91 cr0 = read_cr0(); 91 cr0 = read_cr0();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a451bc374b9b..ea5ea850348d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,7 +62,7 @@
62__visible DEFINE_PER_CPU(unsigned long, rsp_scratch); 62__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
63 63
64/* Prints also some state that isn't saved in the pt_regs */ 64/* Prints also some state that isn't saved in the pt_regs */
65void __show_regs(struct pt_regs *regs, int all) 65void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
66{ 66{
67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; 67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
68 unsigned long d0, d1, d2, d3, d6, d7; 68 unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
88 regs->r13, regs->r14, regs->r15); 88 regs->r13, regs->r14, regs->r15);
89 89
90 if (!all) 90 if (mode == SHOW_REGS_SHORT)
91 return; 91 return;
92 92
93 if (mode == SHOW_REGS_USER) {
94 rdmsrl(MSR_FS_BASE, fs);
95 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
96 printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
97 fs, shadowgs);
98 return;
99 }
100
93 asm("movl %%ds,%0" : "=r" (ds)); 101 asm("movl %%ds,%0" : "=r" (ds));
94 asm("movl %%cs,%0" : "=r" (cs)); 102 asm("movl %%cs,%0" : "=r" (cs));
95 asm("movl %%es,%0" : "=r" (es)); 103 asm("movl %%es,%0" : "=r" (es));
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 12cbe2b88c0f..738bf42b0218 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -111,8 +111,10 @@ int arch_register_cpu(int num)
111 /* 111 /*
112 * Currently CPU0 is only hotpluggable on Intel platforms. Other 112 * Currently CPU0 is only hotpluggable on Intel platforms. Other
113 * vendors can add hotplug support later. 113 * vendors can add hotplug support later.
114 * Xen PV guests don't support CPU0 hotplug at all.
114 */ 115 */
115 if (c->x86_vendor != X86_VENDOR_INTEL) 116 if (c->x86_vendor != X86_VENDOR_INTEL ||
117 boot_cpu_has(X86_FEATURE_XENPV))
116 cpu0_hotpluggable = 0; 118 cpu0_hotpluggable = 0;
117 119
118 /* 120 /*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 1463468ba9a0..6490f618e096 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1415,7 +1415,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
1415 1415
1416static unsigned long __init get_loops_per_jiffy(void) 1416static unsigned long __init get_loops_per_jiffy(void)
1417{ 1417{
1418 unsigned long lpj = tsc_khz * KHZ; 1418 u64 lpj = (u64)tsc_khz * KHZ;
1419 1419
1420 do_div(lpj, HZ); 1420 do_div(lpj, HZ);
1421 return lpj; 1421 return lpj;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 8bde0a419f86..5dd3317d761f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
65#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 65#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
66#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 66#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
67 67
68/*
69 * This section contains data which will be mapped as decrypted. Memory
70 * encryption operates on a page basis. Make this section PMD-aligned
71 * to avoid splitting the pages while mapping the section early.
72 *
73 * Note: We use a separate section so that only this section gets
74 * decrypted to avoid exposing more than we wish.
75 */
76#define BSS_DECRYPTED \
77 . = ALIGN(PMD_SIZE); \
78 __start_bss_decrypted = .; \
79 *(.bss..decrypted); \
80 . = ALIGN(PAGE_SIZE); \
81 __start_bss_decrypted_unused = .; \
82 . = ALIGN(PMD_SIZE); \
83 __end_bss_decrypted = .; \
84
68#else 85#else
69 86
70#define X86_ALIGN_RODATA_BEGIN 87#define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
74 91
75#define ALIGN_ENTRY_TEXT_BEGIN 92#define ALIGN_ENTRY_TEXT_BEGIN
76#define ALIGN_ENTRY_TEXT_END 93#define ALIGN_ENTRY_TEXT_END
94#define BSS_DECRYPTED
77 95
78#endif 96#endif
79 97
@@ -355,6 +373,7 @@ SECTIONS
355 __bss_start = .; 373 __bss_start = .;
356 *(.bss..page_aligned) 374 *(.bss..page_aligned)
357 *(.bss) 375 *(.bss)
376 BSS_DECRYPTED
358 . = ALIGN(PAGE_SIZE); 377 . = ALIGN(PAGE_SIZE);
359 __bss_stop = .; 378 __bss_stop = .;
360 } 379 }
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0cefba28c864..fbb0e6df121b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
548} 548}
549 549
550int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, 550int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
551 unsigned long ipi_bitmap_high, int min, 551 unsigned long ipi_bitmap_high, u32 min,
552 unsigned long icr, int op_64_bit) 552 unsigned long icr, int op_64_bit)
553{ 553{
554 int i; 554 int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
571 rcu_read_lock(); 571 rcu_read_lock();
572 map = rcu_dereference(kvm->arch.apic_map); 572 map = rcu_dereference(kvm->arch.apic_map);
573 573
574 if (min > map->max_apic_id)
575 goto out;
574 /* Bits above cluster_size are masked in the caller. */ 576 /* Bits above cluster_size are masked in the caller. */
575 for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) { 577 for_each_set_bit(i, &ipi_bitmap_low,
576 vcpu = map->phys_map[min + i]->vcpu; 578 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
577 count += kvm_apic_set_irq(vcpu, &irq, NULL); 579 if (map->phys_map[min + i]) {
580 vcpu = map->phys_map[min + i]->vcpu;
581 count += kvm_apic_set_irq(vcpu, &irq, NULL);
582 }
578 } 583 }
579 584
580 min += cluster_size; 585 min += cluster_size;
581 for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) { 586
582 vcpu = map->phys_map[min + i]->vcpu; 587 if (min > map->max_apic_id)
583 count += kvm_apic_set_irq(vcpu, &irq, NULL); 588 goto out;
589
590 for_each_set_bit(i, &ipi_bitmap_high,
591 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
592 if (map->phys_map[min + i]) {
593 vcpu = map->phys_map[min + i]->vcpu;
594 count += kvm_apic_set_irq(vcpu, &irq, NULL);
595 }
584 } 596 }
585 597
598out:
586 rcu_read_unlock(); 599 rcu_read_unlock();
587 return count; 600 return count;
588} 601}
@@ -1331,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1331 1344
1332static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) 1345static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1333{ 1346{
1334 return kvm_apic_hw_enabled(apic) && 1347 return addr >= apic->base_address &&
1335 addr >= apic->base_address && 1348 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1336 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1337} 1349}
1338 1350
1339static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 1351static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1345,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1345 if (!apic_mmio_in_range(apic, address)) 1357 if (!apic_mmio_in_range(apic, address))
1346 return -EOPNOTSUPP; 1358 return -EOPNOTSUPP;
1347 1359
1360 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1361 if (!kvm_check_has_quirk(vcpu->kvm,
1362 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1363 return -EOPNOTSUPP;
1364
1365 memset(data, 0xff, len);
1366 return 0;
1367 }
1368
1348 kvm_lapic_reg_read(apic, offset, len, data); 1369 kvm_lapic_reg_read(apic, offset, len, data);
1349 1370
1350 return 0; 1371 return 0;
@@ -1904,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1904 if (!apic_mmio_in_range(apic, address)) 1925 if (!apic_mmio_in_range(apic, address))
1905 return -EOPNOTSUPP; 1926 return -EOPNOTSUPP;
1906 1927
1928 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1929 if (!kvm_check_has_quirk(vcpu->kvm,
1930 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1931 return -EOPNOTSUPP;
1932
1933 return 0;
1934 }
1935
1907 /* 1936 /*
1908 * APIC register must be aligned on 128-bits boundary. 1937 * APIC register must be aligned on 128-bits boundary.
1909 * 32/64/128 bits registers must be accessed thru 32 bits. 1938 * 32/64/128 bits registers must be accessed thru 32 bits.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a282321329b5..d7e9bce6ff61 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
899{ 899{
900 /* 900 /*
901 * Make sure the write to vcpu->mode is not reordered in front of 901 * Make sure the write to vcpu->mode is not reordered in front of
902 * reads to sptes. If it does, kvm_commit_zap_page() can see us 902 * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
903 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. 903 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
904 */ 904 */
905 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); 905 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1853 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 1853 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1854} 1854}
1855 1855
1856int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1857{
1858 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
1859}
1860
1861int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) 1856int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
1862{ 1857{
1863 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); 1858 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5212,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
5217int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, 5212int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
5218 void *insn, int insn_len) 5213 void *insn, int insn_len)
5219{ 5214{
5220 int r, emulation_type = EMULTYPE_RETRY; 5215 int r, emulation_type = 0;
5221 enum emulation_result er; 5216 enum emulation_result er;
5222 bool direct = vcpu->arch.mmu.direct_map; 5217 bool direct = vcpu->arch.mmu.direct_map;
5223 5218
@@ -5230,10 +5225,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
5230 r = RET_PF_INVALID; 5225 r = RET_PF_INVALID;
5231 if (unlikely(error_code & PFERR_RSVD_MASK)) { 5226 if (unlikely(error_code & PFERR_RSVD_MASK)) {
5232 r = handle_mmio_page_fault(vcpu, cr2, direct); 5227 r = handle_mmio_page_fault(vcpu, cr2, direct);
5233 if (r == RET_PF_EMULATE) { 5228 if (r == RET_PF_EMULATE)
5234 emulation_type = 0;
5235 goto emulate; 5229 goto emulate;
5236 }
5237 } 5230 }
5238 5231
5239 if (r == RET_PF_INVALID) { 5232 if (r == RET_PF_INVALID) {
@@ -5260,8 +5253,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
5260 return 1; 5253 return 1;
5261 } 5254 }
5262 5255
5263 if (mmio_info_in_cache(vcpu, cr2, direct)) 5256 /*
5264 emulation_type = 0; 5257 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5258 * optimistically try to just unprotect the page and let the processor
5259 * re-execute the instruction that caused the page fault. Do not allow
5260 * retrying MMIO emulation, as it's not only pointless but could also
5261 * cause us to enter an infinite loop because the processor will keep
5262 * faulting on the non-existent MMIO address. Retrying an instruction
5263 * from a nested guest is also pointless and dangerous as we are only
5264 * explicitly shadowing L1's page tables, i.e. unprotecting something
5265 * for L1 isn't going to magically fix whatever issue cause L2 to fail.
5266 */
5267 if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
5268 emulation_type = EMULTYPE_ALLOW_RETRY;
5265emulate: 5269emulate:
5266 /* 5270 /*
5267 * On AMD platforms, under certain conditions insn_len may be zero on #NPF. 5271 * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
@@ -5413,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
5413{ 5417{
5414 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); 5418 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
5415 5419
5416 kvm_init_mmu(vcpu, true); 5420 /*
5421 * kvm_mmu_setup() is called only on vCPU initialization.
5422 * Therefore, no need to reset mmu roots as they are not yet
5423 * initialized.
5424 */
5425 kvm_init_mmu(vcpu, false);
5417} 5426}
5418 5427
5419static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, 5428static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6276140044d0..d96092b35936 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
776 } 776 }
777 777
778 if (!svm->next_rip) { 778 if (!svm->next_rip) {
779 if (emulate_instruction(vcpu, EMULTYPE_SKIP) != 779 if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
780 EMULATE_DONE) 780 EMULATE_DONE)
781 printk(KERN_DEBUG "%s: NOP\n", __func__); 781 printk(KERN_DEBUG "%s: NOP\n", __func__);
782 return; 782 return;
@@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void)
1226 min_sev_asid = cpuid_edx(0x8000001F); 1226 min_sev_asid = cpuid_edx(0x8000001F);
1227 1227
1228 /* Initialize SEV ASID bitmap */ 1228 /* Initialize SEV ASID bitmap */
1229 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid), 1229 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1230 sizeof(unsigned long), GFP_KERNEL);
1231 if (!sev_asid_bitmap) 1230 if (!sev_asid_bitmap)
1232 return 1; 1231 return 1;
1233 1232
@@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void)
1405 int cpu; 1404 int cpu;
1406 1405
1407 if (svm_sev_enabled()) 1406 if (svm_sev_enabled())
1408 kfree(sev_asid_bitmap); 1407 bitmap_free(sev_asid_bitmap);
1409 1408
1410 for_each_possible_cpu(cpu) 1409 for_each_possible_cpu(cpu)
1411 svm_cpu_uninit(cpu); 1410 svm_cpu_uninit(cpu);
@@ -2715,7 +2714,7 @@ static int gp_interception(struct vcpu_svm *svm)
2715 2714
2716 WARN_ON_ONCE(!enable_vmware_backdoor); 2715 WARN_ON_ONCE(!enable_vmware_backdoor);
2717 2716
2718 er = emulate_instruction(vcpu, 2717 er = kvm_emulate_instruction(vcpu,
2719 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); 2718 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
2720 if (er == EMULATE_USER_EXIT) 2719 if (er == EMULATE_USER_EXIT)
2721 return 0; 2720 return 0;
@@ -2819,7 +2818,7 @@ static int io_interception(struct vcpu_svm *svm)
2819 string = (io_info & SVM_IOIO_STR_MASK) != 0; 2818 string = (io_info & SVM_IOIO_STR_MASK) != 0;
2820 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 2819 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2821 if (string) 2820 if (string)
2822 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 2821 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
2823 2822
2824 port = io_info >> 16; 2823 port = io_info >> 16;
2825 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 2824 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3860,7 @@ static int iret_interception(struct vcpu_svm *svm)
3861static int invlpg_interception(struct vcpu_svm *svm) 3860static int invlpg_interception(struct vcpu_svm *svm)
3862{ 3861{
3863 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) 3862 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3864 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3863 return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3865 3864
3866 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); 3865 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3867 return kvm_skip_emulated_instruction(&svm->vcpu); 3866 return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3868,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
3869 3868
3870static int emulate_on_interception(struct vcpu_svm *svm) 3869static int emulate_on_interception(struct vcpu_svm *svm)
3871{ 3870{
3872 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; 3871 return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3873} 3872}
3874 3873
3875static int rsm_interception(struct vcpu_svm *svm) 3874static int rsm_interception(struct vcpu_svm *svm)
3876{ 3875{
3877 return x86_emulate_instruction(&svm->vcpu, 0, 0, 3876 return kvm_emulate_instruction_from_buffer(&svm->vcpu,
3878 rsm_ins_bytes, 2) == EMULATE_DONE; 3877 rsm_ins_bytes, 2) == EMULATE_DONE;
3879} 3878}
3880 3879
3881static int rdpmc_interception(struct vcpu_svm *svm) 3880static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4699,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4700 ret = avic_unaccel_trap_write(svm); 4699 ret = avic_unaccel_trap_write(svm);
4701 } else { 4700 } else {
4702 /* Handling Fault */ 4701 /* Handling Fault */
4703 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE); 4702 ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4704 } 4703 }
4705 4704
4706 return ret; 4705 return ret;
@@ -6747,7 +6746,7 @@ e_free:
6747static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) 6746static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6748{ 6747{
6749 unsigned long vaddr, vaddr_end, next_vaddr; 6748 unsigned long vaddr, vaddr_end, next_vaddr;
6750 unsigned long dst_vaddr, dst_vaddr_end; 6749 unsigned long dst_vaddr;
6751 struct page **src_p, **dst_p; 6750 struct page **src_p, **dst_p;
6752 struct kvm_sev_dbg debug; 6751 struct kvm_sev_dbg debug;
6753 unsigned long n; 6752 unsigned long n;
@@ -6763,7 +6762,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6763 size = debug.len; 6762 size = debug.len;
6764 vaddr_end = vaddr + size; 6763 vaddr_end = vaddr + size;
6765 dst_vaddr = debug.dst_uaddr; 6764 dst_vaddr = debug.dst_uaddr;
6766 dst_vaddr_end = dst_vaddr + size;
6767 6765
6768 for (; vaddr < vaddr_end; vaddr = next_vaddr) { 6766 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6769 int len, s_off, d_off; 6767 int len, s_off, d_off;
@@ -7150,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7150 .check_intercept = svm_check_intercept, 7148 .check_intercept = svm_check_intercept,
7151 .handle_external_intr = svm_handle_external_intr, 7149 .handle_external_intr = svm_handle_external_intr,
7152 7150
7151 .request_immediate_exit = __kvm_request_immediate_exit,
7152
7153 .sched_in = svm_sched_in, 7153 .sched_in = svm_sched_in,
7154 7154
7155 .pmu_ops = &amd_pmu_ops, 7155 .pmu_ops = &amd_pmu_ops,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d26f3c4985b..06412ba46aa3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -397,6 +397,7 @@ struct loaded_vmcs {
397 int cpu; 397 int cpu;
398 bool launched; 398 bool launched;
399 bool nmi_known_unmasked; 399 bool nmi_known_unmasked;
400 bool hv_timer_armed;
400 /* Support for vnmi-less CPUs */ 401 /* Support for vnmi-less CPUs */
401 int soft_vnmi_blocked; 402 int soft_vnmi_blocked;
402 ktime_t entry_time; 403 ktime_t entry_time;
@@ -1019,6 +1020,8 @@ struct vcpu_vmx {
1019 int ple_window; 1020 int ple_window;
1020 bool ple_window_dirty; 1021 bool ple_window_dirty;
1021 1022
1023 bool req_immediate_exit;
1024
1022 /* Support for PML */ 1025 /* Support for PML */
1023#define PML_ENTITY_NUM 512 1026#define PML_ENTITY_NUM 512
1024 struct page *pml_pg; 1027 struct page *pml_pg;
@@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
2864 u16 fs_sel, gs_sel; 2867 u16 fs_sel, gs_sel;
2865 int i; 2868 int i;
2866 2869
2870 vmx->req_immediate_exit = false;
2871
2867 if (vmx->loaded_cpu_state) 2872 if (vmx->loaded_cpu_state)
2868 return; 2873 return;
2869 2874
@@ -5393,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
5393 * To use VMXON (and later other VMX instructions), a guest 5398 * To use VMXON (and later other VMX instructions), a guest
5394 * must first be able to turn on cr4.VMXE (see handle_vmon()). 5399 * must first be able to turn on cr4.VMXE (see handle_vmon()).
5395 * So basically the check on whether to allow nested VMX 5400 * So basically the check on whether to allow nested VMX
5396 * is here. 5401 * is here. We operate under the default treatment of SMM,
5402 * so VMX cannot be enabled under SMM.
5397 */ 5403 */
5398 if (!nested_vmx_allowed(vcpu)) 5404 if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
5399 return 1; 5405 return 1;
5400 } 5406 }
5401 5407
@@ -6183,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
6183 nested_mark_vmcs12_pages_dirty(vcpu); 6189 nested_mark_vmcs12_pages_dirty(vcpu);
6184} 6190}
6185 6191
6192static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6193{
6194 struct vcpu_vmx *vmx = to_vmx(vcpu);
6195 void *vapic_page;
6196 u32 vppr;
6197 int rvi;
6198
6199 if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
6200 !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
6201 WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
6202 return false;
6203
6204 rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
6205
6206 vapic_page = kmap(vmx->nested.virtual_apic_page);
6207 vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
6208 kunmap(vmx->nested.virtual_apic_page);
6209
6210 return ((rvi & 0xf0) > (vppr & 0xf0));
6211}
6212
6186static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, 6213static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
6187 bool nested) 6214 bool nested)
6188{ 6215{
@@ -6983,7 +7010,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
6983 * Cause the #SS fault with 0 error code in VM86 mode. 7010 * Cause the #SS fault with 0 error code in VM86 mode.
6984 */ 7011 */
6985 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) { 7012 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
6986 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { 7013 if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
6987 if (vcpu->arch.halt_request) { 7014 if (vcpu->arch.halt_request) {
6988 vcpu->arch.halt_request = 0; 7015 vcpu->arch.halt_request = 0;
6989 return kvm_vcpu_halt(vcpu); 7016 return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7081,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
7054 7081
7055 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { 7082 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
7056 WARN_ON_ONCE(!enable_vmware_backdoor); 7083 WARN_ON_ONCE(!enable_vmware_backdoor);
7057 er = emulate_instruction(vcpu, 7084 er = kvm_emulate_instruction(vcpu,
7058 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL); 7085 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
7059 if (er == EMULATE_USER_EXIT) 7086 if (er == EMULATE_USER_EXIT)
7060 return 0; 7087 return 0;
@@ -7157,7 +7184,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
7157 ++vcpu->stat.io_exits; 7184 ++vcpu->stat.io_exits;
7158 7185
7159 if (string) 7186 if (string)
7160 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7187 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7161 7188
7162 port = exit_qualification >> 16; 7189 port = exit_qualification >> 16;
7163 size = (exit_qualification & 7) + 1; 7190 size = (exit_qualification & 7) + 1;
@@ -7231,7 +7258,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
7231static int handle_desc(struct kvm_vcpu *vcpu) 7258static int handle_desc(struct kvm_vcpu *vcpu)
7232{ 7259{
7233 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); 7260 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
7234 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7261 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7235} 7262}
7236 7263
7237static int handle_cr(struct kvm_vcpu *vcpu) 7264static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7507,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
7480 7507
7481static int handle_invd(struct kvm_vcpu *vcpu) 7508static int handle_invd(struct kvm_vcpu *vcpu)
7482{ 7509{
7483 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7510 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7484} 7511}
7485 7512
7486static int handle_invlpg(struct kvm_vcpu *vcpu) 7513static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7574,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
7547 return kvm_skip_emulated_instruction(vcpu); 7574 return kvm_skip_emulated_instruction(vcpu);
7548 } 7575 }
7549 } 7576 }
7550 return emulate_instruction(vcpu, 0) == EMULATE_DONE; 7577 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
7551} 7578}
7552 7579
7553static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu) 7580static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7731,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
7704 if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) 7731 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
7705 return kvm_skip_emulated_instruction(vcpu); 7732 return kvm_skip_emulated_instruction(vcpu);
7706 else 7733 else
7707 return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP, 7734 return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
7708 NULL, 0) == EMULATE_DONE; 7735 EMULATE_DONE;
7709 } 7736 }
7710 7737
7711 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0); 7738 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7775,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
7748 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 7775 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
7749 return 1; 7776 return 1;
7750 7777
7751 err = emulate_instruction(vcpu, 0); 7778 err = kvm_emulate_instruction(vcpu, 0);
7752 7779
7753 if (err == EMULATE_USER_EXIT) { 7780 if (err == EMULATE_USER_EXIT) {
7754 ++vcpu->stat.mmio_exits; 7781 ++vcpu->stat.mmio_exits;
@@ -7966,6 +7993,9 @@ static __init int hardware_setup(void)
7966 kvm_x86_ops->enable_log_dirty_pt_masked = NULL; 7993 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
7967 } 7994 }
7968 7995
7996 if (!cpu_has_vmx_preemption_timer())
7997 kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
7998
7969 if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) { 7999 if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
7970 u64 vmx_msr; 8000 u64 vmx_msr;
7971 8001
@@ -9208,7 +9238,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
9208 9238
9209static int handle_preemption_timer(struct kvm_vcpu *vcpu) 9239static int handle_preemption_timer(struct kvm_vcpu *vcpu)
9210{ 9240{
9211 kvm_lapic_expired_hv_timer(vcpu); 9241 if (!to_vmx(vcpu)->req_immediate_exit)
9242 kvm_lapic_expired_hv_timer(vcpu);
9212 return 1; 9243 return 1;
9213} 9244}
9214 9245
@@ -10595,24 +10626,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
10595 msrs[i].host, false); 10626 msrs[i].host, false);
10596} 10627}
10597 10628
10598static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) 10629static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
10630{
10631 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
10632 if (!vmx->loaded_vmcs->hv_timer_armed)
10633 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
10634 PIN_BASED_VMX_PREEMPTION_TIMER);
10635 vmx->loaded_vmcs->hv_timer_armed = true;
10636}
10637
10638static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
10599{ 10639{
10600 struct vcpu_vmx *vmx = to_vmx(vcpu); 10640 struct vcpu_vmx *vmx = to_vmx(vcpu);
10601 u64 tscl; 10641 u64 tscl;
10602 u32 delta_tsc; 10642 u32 delta_tsc;
10603 10643
10604 if (vmx->hv_deadline_tsc == -1) 10644 if (vmx->req_immediate_exit) {
10645 vmx_arm_hv_timer(vmx, 0);
10605 return; 10646 return;
10647 }
10606 10648
10607 tscl = rdtsc(); 10649 if (vmx->hv_deadline_tsc != -1) {
10608 if (vmx->hv_deadline_tsc > tscl) 10650 tscl = rdtsc();
10609 /* sure to be 32 bit only because checked on set_hv_timer */ 10651 if (vmx->hv_deadline_tsc > tscl)
10610 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> 10652 /* set_hv_timer ensures the delta fits in 32-bits */
10611 cpu_preemption_timer_multi); 10653 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10612 else 10654 cpu_preemption_timer_multi);
10613 delta_tsc = 0; 10655 else
10656 delta_tsc = 0;
10657
10658 vmx_arm_hv_timer(vmx, delta_tsc);
10659 return;
10660 }
10614 10661
10615 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc); 10662 if (vmx->loaded_vmcs->hv_timer_armed)
10663 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
10664 PIN_BASED_VMX_PREEMPTION_TIMER);
10665 vmx->loaded_vmcs->hv_timer_armed = false;
10616} 10666}
10617 10667
10618static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) 10668static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10672,7 +10722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
10672 10722
10673 atomic_switch_perf_msrs(vmx); 10723 atomic_switch_perf_msrs(vmx);
10674 10724
10675 vmx_arm_hv_timer(vcpu); 10725 vmx_update_hv_timer(vcpu);
10676 10726
10677 /* 10727 /*
10678 * If this vCPU has touched SPEC_CTRL, restore the guest's value if 10728 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -11427,16 +11477,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
11427 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 11477 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
11428 struct vcpu_vmx *vmx = to_vmx(vcpu); 11478 struct vcpu_vmx *vmx = to_vmx(vcpu);
11429 11479
11430 if (vcpu->arch.virtual_tsc_khz == 0) 11480 /*
11431 return; 11481 * A timer value of zero is architecturally guaranteed to cause
11432 11482 * a VMExit prior to executing any instructions in the guest.
11433 /* Make sure short timeouts reliably trigger an immediate vmexit. 11483 */
11434 * hrtimer_start does not guarantee this. */ 11484 if (preemption_timeout == 0) {
11435 if (preemption_timeout <= 1) {
11436 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); 11485 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
11437 return; 11486 return;
11438 } 11487 }
11439 11488
11489 if (vcpu->arch.virtual_tsc_khz == 0)
11490 return;
11491
11440 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; 11492 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
11441 preemption_timeout *= 1000000; 11493 preemption_timeout *= 1000000;
11442 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); 11494 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
@@ -11646,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
11646 * bits 15:8 should be zero in posted_intr_nv, 11698 * bits 15:8 should be zero in posted_intr_nv,
11647 * the descriptor address has been already checked 11699 * the descriptor address has been already checked
11648 * in nested_get_vmcs12_pages. 11700 * in nested_get_vmcs12_pages.
11701 *
11702 * bits 5:0 of posted_intr_desc_addr should be zero.
11649 */ 11703 */
11650 if (nested_cpu_has_posted_intr(vmcs12) && 11704 if (nested_cpu_has_posted_intr(vmcs12) &&
11651 (!nested_cpu_has_vid(vmcs12) || 11705 (!nested_cpu_has_vid(vmcs12) ||
11652 !nested_exit_intr_ack_set(vcpu) || 11706 !nested_exit_intr_ack_set(vcpu) ||
11653 vmcs12->posted_intr_nv & 0xff00)) 11707 (vmcs12->posted_intr_nv & 0xff00) ||
11708 (vmcs12->posted_intr_desc_addr & 0x3f) ||
11709 (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
11654 return -EINVAL; 11710 return -EINVAL;
11655 11711
11656 /* tpr shadow is needed by all apicv features. */ 11712 /* tpr shadow is needed by all apicv features. */
@@ -12076,11 +12132,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12076 12132
12077 exec_control = vmcs12->pin_based_vm_exec_control; 12133 exec_control = vmcs12->pin_based_vm_exec_control;
12078 12134
12079 /* Preemption timer setting is only taken from vmcs01. */ 12135 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
12080 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12081 exec_control |= vmcs_config.pin_based_exec_ctrl; 12136 exec_control |= vmcs_config.pin_based_exec_ctrl;
12082 if (vmx->hv_deadline_tsc == -1) 12137 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
12083 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 12138 vmx->loaded_vmcs->hv_timer_armed = false;
12084 12139
12085 /* Posted interrupts setting is only taken from vmcs12. */ 12140 /* Posted interrupts setting is only taken from vmcs12. */
12086 if (nested_cpu_has_posted_intr(vmcs12)) { 12141 if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -12318,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12318 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) 12373 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
12319 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 12374 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12320 12375
12376 if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
12377 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12378
12321 if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12)) 12379 if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
12322 return VMXERR_ENTRY_INVALID_CONTROL_FIELD; 12380 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
12323 12381
@@ -12537,8 +12595,11 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
12537 struct vmcs12 *vmcs12 = get_vmcs12(vcpu); 12595 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12538 bool from_vmentry = !!exit_qual; 12596 bool from_vmentry = !!exit_qual;
12539 u32 dummy_exit_qual; 12597 u32 dummy_exit_qual;
12598 u32 vmcs01_cpu_exec_ctrl;
12540 int r = 0; 12599 int r = 0;
12541 12600
12601 vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
12602
12542 enter_guest_mode(vcpu); 12603 enter_guest_mode(vcpu);
12543 12604
12544 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) 12605 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -12575,6 +12636,25 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
12575 } 12636 }
12576 12637
12577 /* 12638 /*
12639 * If L1 had a pending IRQ/NMI until it executed
12640 * VMLAUNCH/VMRESUME which wasn't delivered because it was
12641 * disallowed (e.g. interrupts disabled), L0 needs to
12642 * evaluate if this pending event should cause an exit from L2
12643 * to L1 or delivered directly to L2 (e.g. In case L1 don't
12644 * intercept EXTERNAL_INTERRUPT).
12645 *
12646 * Usually this would be handled by L0 requesting a
12647 * IRQ/NMI window by setting VMCS accordingly. However,
12648 * this setting was done on VMCS01 and now VMCS02 is active
12649 * instead. Thus, we force L0 to perform pending event
12650 * evaluation by requesting a KVM_REQ_EVENT.
12651 */
12652 if (vmcs01_cpu_exec_ctrl &
12653 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
12654 kvm_make_request(KVM_REQ_EVENT, vcpu);
12655 }
12656
12657 /*
12578 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point 12658 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
12579 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet 12659 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
12580 * returned as far as L1 is concerned. It will only return (and set 12660 * returned as far as L1 is concerned. It will only return (and set
@@ -12841,6 +12921,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
12841 return 0; 12921 return 0;
12842} 12922}
12843 12923
12924static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
12925{
12926 to_vmx(vcpu)->req_immediate_exit = true;
12927}
12928
12844static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) 12929static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
12845{ 12930{
12846 ktime_t remaining = 12931 ktime_t remaining =
@@ -13231,12 +13316,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
13231 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); 13316 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
13232 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); 13317 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
13233 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); 13318 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
13234 if (vmx->hv_deadline_tsc == -1) 13319
13235 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
13236 PIN_BASED_VMX_PREEMPTION_TIMER);
13237 else
13238 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
13239 PIN_BASED_VMX_PREEMPTION_TIMER);
13240 if (kvm_has_tsc_control) 13320 if (kvm_has_tsc_control)
13241 decache_tsc_multiplier(vmx); 13321 decache_tsc_multiplier(vmx);
13242 13322
@@ -13440,18 +13520,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
13440 return -ERANGE; 13520 return -ERANGE;
13441 13521
13442 vmx->hv_deadline_tsc = tscl + delta_tsc; 13522 vmx->hv_deadline_tsc = tscl + delta_tsc;
13443 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
13444 PIN_BASED_VMX_PREEMPTION_TIMER);
13445
13446 return delta_tsc == 0; 13523 return delta_tsc == 0;
13447} 13524}
13448 13525
13449static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu) 13526static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
13450{ 13527{
13451 struct vcpu_vmx *vmx = to_vmx(vcpu); 13528 to_vmx(vcpu)->hv_deadline_tsc = -1;
13452 vmx->hv_deadline_tsc = -1;
13453 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
13454 PIN_BASED_VMX_PREEMPTION_TIMER);
13455} 13529}
13456#endif 13530#endif
13457 13531
@@ -13932,6 +14006,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
13932 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) 14006 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
13933 return -EINVAL; 14007 return -EINVAL;
13934 14008
14009 /*
14010 * SMM temporarily disables VMX, so we cannot be in guest mode,
14011 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
14012 * must be zero.
14013 */
14014 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
14015 return -EINVAL;
14016
13935 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && 14017 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
13936 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) 14018 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
13937 return -EINVAL; 14019 return -EINVAL;
@@ -13988,9 +14070,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
13988 check_vmentry_postreqs(vcpu, vmcs12, &exit_qual)) 14070 check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
13989 return -EINVAL; 14071 return -EINVAL;
13990 14072
13991 if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
13992 vmx->nested.nested_run_pending = 1;
13993
13994 vmx->nested.dirty_vmcs12 = true; 14073 vmx->nested.dirty_vmcs12 = true;
13995 ret = enter_vmx_non_root_mode(vcpu, NULL); 14074 ret = enter_vmx_non_root_mode(vcpu, NULL);
13996 if (ret) 14075 if (ret)
@@ -14078,6 +14157,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
14078 .apicv_post_state_restore = vmx_apicv_post_state_restore, 14157 .apicv_post_state_restore = vmx_apicv_post_state_restore,
14079 .hwapic_irr_update = vmx_hwapic_irr_update, 14158 .hwapic_irr_update = vmx_hwapic_irr_update,
14080 .hwapic_isr_update = vmx_hwapic_isr_update, 14159 .hwapic_isr_update = vmx_hwapic_isr_update,
14160 .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
14081 .sync_pir_to_irr = vmx_sync_pir_to_irr, 14161 .sync_pir_to_irr = vmx_sync_pir_to_irr,
14082 .deliver_posted_interrupt = vmx_deliver_posted_interrupt, 14162 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
14083 14163
@@ -14111,6 +14191,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
14111 .umip_emulated = vmx_umip_emulated, 14191 .umip_emulated = vmx_umip_emulated,
14112 14192
14113 .check_nested_events = vmx_check_nested_events, 14193 .check_nested_events = vmx_check_nested_events,
14194 .request_immediate_exit = vmx_request_immediate_exit,
14114 14195
14115 .sched_in = vmx_sched_in, 14196 .sched_in = vmx_sched_in,
14116 14197
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 506bd2b4b8bb..edbf00ec56b3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
628 gfn_t gfn; 628 gfn_t gfn;
629 int r; 629 int r;
630 630
631 if (is_long_mode(vcpu) || !is_pae(vcpu)) 631 if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
632 return false; 632 return false;
633 633
634 if (!test_bit(VCPU_EXREG_PDPTR, 634 if (!test_bit(VCPU_EXREG_PDPTR,
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2537 break; 2537 break;
2538 case MSR_PLATFORM_INFO: 2538 case MSR_PLATFORM_INFO:
2539 if (!msr_info->host_initiated || 2539 if (!msr_info->host_initiated ||
2540 data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
2541 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) && 2540 (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
2542 cpuid_fault_enabled(vcpu))) 2541 cpuid_fault_enabled(vcpu)))
2543 return 1; 2542 return 1;
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2780 msr_info->data = vcpu->arch.osvw.status; 2779 msr_info->data = vcpu->arch.osvw.status;
2781 break; 2780 break;
2782 case MSR_PLATFORM_INFO: 2781 case MSR_PLATFORM_INFO:
2782 if (!msr_info->host_initiated &&
2783 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
2784 return 1;
2783 msr_info->data = vcpu->arch.msr_platform_info; 2785 msr_info->data = vcpu->arch.msr_platform_info;
2784 break; 2786 break;
2785 case MSR_MISC_FEATURES_ENABLES: 2787 case MSR_MISC_FEATURES_ENABLES:
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2927 case KVM_CAP_SPLIT_IRQCHIP: 2929 case KVM_CAP_SPLIT_IRQCHIP:
2928 case KVM_CAP_IMMEDIATE_EXIT: 2930 case KVM_CAP_IMMEDIATE_EXIT:
2929 case KVM_CAP_GET_MSR_FEATURES: 2931 case KVM_CAP_GET_MSR_FEATURES:
2932 case KVM_CAP_MSR_PLATFORM_INFO:
2930 r = 1; 2933 r = 1;
2931 break; 2934 break;
2932 case KVM_CAP_SYNC_REGS: 2935 case KVM_CAP_SYNC_REGS:
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
4007 break; 4010 break;
4008 4011
4009 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); 4012 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
4013 r = -EFAULT;
4010 if (get_user(user_data_size, &user_kvm_nested_state->size)) 4014 if (get_user(user_data_size, &user_kvm_nested_state->size))
4011 return -EFAULT; 4015 break;
4012 4016
4013 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, 4017 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
4014 user_data_size); 4018 user_data_size);
4015 if (r < 0) 4019 if (r < 0)
4016 return r; 4020 break;
4017 4021
4018 if (r > user_data_size) { 4022 if (r > user_data_size) {
4019 if (put_user(r, &user_kvm_nested_state->size)) 4023 if (put_user(r, &user_kvm_nested_state->size))
4020 return -EFAULT; 4024 r = -EFAULT;
4021 return -E2BIG; 4025 else
4026 r = -E2BIG;
4027 break;
4022 } 4028 }
4029
4023 r = 0; 4030 r = 0;
4024 break; 4031 break;
4025 } 4032 }
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
4031 if (!kvm_x86_ops->set_nested_state) 4038 if (!kvm_x86_ops->set_nested_state)
4032 break; 4039 break;
4033 4040
4041 r = -EFAULT;
4034 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state))) 4042 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
4035 return -EFAULT; 4043 break;
4036 4044
4045 r = -EINVAL;
4037 if (kvm_state.size < sizeof(kvm_state)) 4046 if (kvm_state.size < sizeof(kvm_state))
4038 return -EINVAL; 4047 break;
4039 4048
4040 if (kvm_state.flags & 4049 if (kvm_state.flags &
4041 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE)) 4050 ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
4042 return -EINVAL; 4051 break;
4043 4052
4044 /* nested_run_pending implies guest_mode. */ 4053 /* nested_run_pending implies guest_mode. */
4045 if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING) 4054 if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
4046 return -EINVAL; 4055 break;
4047 4056
4048 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); 4057 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
4049 break; 4058 break;
@@ -4350,6 +4359,10 @@ split_irqchip_unlock:
4350 kvm->arch.pause_in_guest = true; 4359 kvm->arch.pause_in_guest = true;
4351 r = 0; 4360 r = 0;
4352 break; 4361 break;
4362 case KVM_CAP_MSR_PLATFORM_INFO:
4363 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
4364 r = 0;
4365 break;
4353 default: 4366 default:
4354 r = -EINVAL; 4367 r = -EINVAL;
4355 break; 4368 break;
@@ -4987,7 +5000,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
4987 emul_type = 0; 5000 emul_type = 0;
4988 } 5001 }
4989 5002
4990 er = emulate_instruction(vcpu, emul_type); 5003 er = kvm_emulate_instruction(vcpu, emul_type);
4991 if (er == EMULATE_USER_EXIT) 5004 if (er == EMULATE_USER_EXIT)
4992 return 0; 5005 return 0;
4993 if (er != EMULATE_DONE) 5006 if (er != EMULATE_DONE)
@@ -5870,7 +5883,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
5870 gpa_t gpa = cr2; 5883 gpa_t gpa = cr2;
5871 kvm_pfn_t pfn; 5884 kvm_pfn_t pfn;
5872 5885
5873 if (emulation_type & EMULTYPE_NO_REEXECUTE) 5886 if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
5887 return false;
5888
5889 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5874 return false; 5890 return false;
5875 5891
5876 if (!vcpu->arch.mmu.direct_map) { 5892 if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5974,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
5958 */ 5974 */
5959 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; 5975 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
5960 5976
5961 if (!(emulation_type & EMULTYPE_RETRY)) 5977 if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
5978 return false;
5979
5980 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5962 return false; 5981 return false;
5963 5982
5964 if (x86_page_table_writing_insn(ctxt)) 5983 if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6295,19 @@ restart:
6276 6295
6277 return r; 6296 return r;
6278} 6297}
6279EXPORT_SYMBOL_GPL(x86_emulate_instruction); 6298
6299int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
6300{
6301 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
6302}
6303EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
6304
6305int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
6306 void *insn, int insn_len)
6307{
6308 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
6309}
6310EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
6280 6311
6281static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 6312static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
6282 unsigned short port) 6313 unsigned short port)
@@ -7343,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
7343} 7374}
7344EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page); 7375EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
7345 7376
7377void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
7378{
7379 smp_send_reschedule(vcpu->cpu);
7380}
7381EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
7382
7346/* 7383/*
7347 * Returns 1 to let vcpu_run() continue the guest execution loop without 7384 * Returns 1 to let vcpu_run() continue the guest execution loop without
7348 * exiting to the userspace. Otherwise, the value will be returned to the 7385 * exiting to the userspace. Otherwise, the value will be returned to the
@@ -7547,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
7547 7584
7548 if (req_immediate_exit) { 7585 if (req_immediate_exit) {
7549 kvm_make_request(KVM_REQ_EVENT, vcpu); 7586 kvm_make_request(KVM_REQ_EVENT, vcpu);
7550 smp_send_reschedule(vcpu->cpu); 7587 kvm_x86_ops->request_immediate_exit(vcpu);
7551 } 7588 }
7552 7589
7553 trace_kvm_entry(vcpu->vcpu_id); 7590 trace_kvm_entry(vcpu->vcpu_id);
@@ -7734,7 +7771,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
7734{ 7771{
7735 int r; 7772 int r;
7736 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 7773 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
7737 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 7774 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
7738 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 7775 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
7739 if (r != EMULATE_DONE) 7776 if (r != EMULATE_DONE)
7740 return 0; 7777 return 0;
@@ -7811,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
7811 return 0; 7848 return 0;
7812} 7849}
7813 7850
7851/* Swap (qemu) user FPU context for the guest FPU context. */
7852static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7853{
7854 preempt_disable();
7855 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
7856 /* PKRU is separately restored in kvm_x86_ops->run. */
7857 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
7858 ~XFEATURE_MASK_PKRU);
7859 preempt_enable();
7860 trace_kvm_fpu(1);
7861}
7862
7863/* When vcpu_run ends, restore user space FPU context. */
7864static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7865{
7866 preempt_disable();
7867 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7868 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
7869 preempt_enable();
7870 ++vcpu->stat.fpu_reload;
7871 trace_kvm_fpu(0);
7872}
7873
7814int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 7874int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7815{ 7875{
7816 int r; 7876 int r;
@@ -8159,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8159 kvm_update_cpuid(vcpu); 8219 kvm_update_cpuid(vcpu);
8160 8220
8161 idx = srcu_read_lock(&vcpu->kvm->srcu); 8221 idx = srcu_read_lock(&vcpu->kvm->srcu);
8162 if (!is_long_mode(vcpu) && is_pae(vcpu)) { 8222 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
8163 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); 8223 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
8164 mmu_reset_needed = 1; 8224 mmu_reset_needed = 1;
8165 } 8225 }
@@ -8388,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
8388 vcpu->arch.cr0 |= X86_CR0_ET; 8448 vcpu->arch.cr0 |= X86_CR0_ET;
8389} 8449}
8390 8450
8391/* Swap (qemu) user FPU context for the guest FPU context. */
8392void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
8393{
8394 preempt_disable();
8395 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
8396 /* PKRU is separately restored in kvm_x86_ops->run. */
8397 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
8398 ~XFEATURE_MASK_PKRU);
8399 preempt_enable();
8400 trace_kvm_fpu(1);
8401}
8402
8403/* When vcpu_run ends, restore user space FPU context. */
8404void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
8405{
8406 preempt_disable();
8407 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
8408 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
8409 preempt_enable();
8410 ++vcpu->stat.fpu_reload;
8411 trace_kvm_fpu(0);
8412}
8413
8414void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 8451void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
8415{ 8452{
8416 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; 8453 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
@@ -8834,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
8834 kvm->arch.kvmclock_offset = -ktime_get_boot_ns(); 8871 kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
8835 pvclock_update_vm_gtod_copy(kvm); 8872 pvclock_update_vm_gtod_copy(kvm);
8836 8873
8874 kvm->arch.guest_can_read_msr_platform_info = true;
8875
8837 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); 8876 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
8838 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); 8877 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
8839 8878
@@ -9182,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
9182 kvm_page_track_flush_slot(kvm, slot); 9221 kvm_page_track_flush_slot(kvm, slot);
9183} 9222}
9184 9223
9224static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
9225{
9226 return (is_guest_mode(vcpu) &&
9227 kvm_x86_ops->guest_apic_has_interrupt &&
9228 kvm_x86_ops->guest_apic_has_interrupt(vcpu));
9229}
9230
9185static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) 9231static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
9186{ 9232{
9187 if (!list_empty_careful(&vcpu->async_pf.done)) 9233 if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9206,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
9206 return true; 9252 return true;
9207 9253
9208 if (kvm_arch_interrupt_allowed(vcpu) && 9254 if (kvm_arch_interrupt_allowed(vcpu) &&
9209 kvm_cpu_has_interrupt(vcpu)) 9255 (kvm_cpu_has_interrupt(vcpu) ||
9256 kvm_guest_apic_has_interrupt(vcpu)))
9210 return true; 9257 return true;
9211 9258
9212 if (kvm_hv_has_stimer_pending(vcpu)) 9259 if (kvm_hv_has_stimer_pending(vcpu))
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 257f27620bc2..67b9568613f3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
274bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, 274bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
275 int page_num); 275 int page_num);
276bool kvm_vector_hashing_enabled(void); 276bool kvm_vector_hashing_enabled(void);
277int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
278 int emulation_type, void *insn, int insn_len);
277 279
278#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ 280#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
279 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ 281 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index c8c6ad0d58b8..3f435d7fca5e 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/export.h> 8#include <linux/export.h>
9 9
10#include <asm/tlbflush.h>
11
10/* 12/*
11 * We rely on the nested NMI work to allow atomic faults from the NMI path; the 13 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
12 * nested NMI paths are careful to preserve CR2. 14 * nested NMI paths are careful to preserve CR2.
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
19 if (__range_not_ok(from, n, TASK_SIZE)) 21 if (__range_not_ok(from, n, TASK_SIZE))
20 return n; 22 return n;
21 23
24 if (!nmi_uaccess_okay())
25 return n;
26
22 /* 27 /*
23 * Even though this function is typically called from NMI/IRQ context 28 * Even though this function is typically called from NMI/IRQ context
24 * disable pagefaults so that its behaviour is consistent even when 29 * disable pagefaults so that its behaviour is consistent even when
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b9123c497e0a..47bebfe6efa7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
837 837
838 printk(KERN_CONT "\n"); 838 printk(KERN_CONT "\n");
839 839
840 show_opcodes((u8 *)regs->ip, loglvl); 840 show_opcodes(regs, loglvl);
841} 841}
842 842
843static void 843static void
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7a8fc26c1115..faca978ebf9d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
815 set_memory_np_noalias(begin_ul, len_pages); 815 set_memory_np_noalias(begin_ul, len_pages);
816} 816}
817 817
818void __weak mem_encrypt_free_decrypted_mem(void) { }
819
818void __ref free_initmem(void) 820void __ref free_initmem(void)
819{ 821{
820 e820__reallocate_tables(); 822 e820__reallocate_tables();
821 823
824 mem_encrypt_free_decrypted_mem();
825
822 free_kernel_image_pages(&__init_begin, &__init_end); 826 free_kernel_image_pages(&__init_begin, &__init_end);
823} 827}
824 828
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index b2de398d1fd3..006f373f54ab 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -348,6 +348,30 @@ bool sev_active(void)
348EXPORT_SYMBOL(sev_active); 348EXPORT_SYMBOL(sev_active);
349 349
350/* Architecture __weak replacement functions */ 350/* Architecture __weak replacement functions */
351void __init mem_encrypt_free_decrypted_mem(void)
352{
353 unsigned long vaddr, vaddr_end, npages;
354 int r;
355
356 vaddr = (unsigned long)__start_bss_decrypted_unused;
357 vaddr_end = (unsigned long)__end_bss_decrypted;
358 npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
359
360 /*
361 * The unused memory range was mapped decrypted, change the encryption
362 * attribute from decrypted to encrypted before freeing it.
363 */
364 if (mem_encrypt_active()) {
365 r = set_memory_encrypted(vaddr, npages);
366 if (r) {
367 pr_warn("failed to free unused decrypted pages\n");
368 return;
369 }
370 }
371
372 free_init_pages("unused decrypted", vaddr, vaddr_end);
373}
374
351void __init mem_encrypt_init(void) 375void __init mem_encrypt_init(void)
352{ 376{
353 if (!sme_me_mask) 377 if (!sme_me_mask)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8d6c34fe49be..51a5a69ecac9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1420 return 0; 1420 return 0;
1421} 1421}
1422 1422
1423/*
1424 * Machine check recovery code needs to change cache mode of poisoned
1425 * pages to UC to avoid speculative access logging another error. But
1426 * passing the address of the 1:1 mapping to set_memory_uc() is a fine
1427 * way to encourage a speculative access. So we cheat and flip the top
1428 * bit of the address. This works fine for the code that updates the
1429 * page tables. But at the end of the process we need to flush the cache
1430 * and the non-canonical address causes a #GP fault when used by the
1431 * CLFLUSH instruction.
1432 *
1433 * But in the common case we already have a canonical address. This code
1434 * will fix the top bit if needed and is a no-op otherwise.
1435 */
1436static inline unsigned long make_addr_canonical_again(unsigned long addr)
1437{
1438#ifdef CONFIG_X86_64
1439 return (long)(addr << 1) >> 1;
1440#else
1441 return addr;
1442#endif
1443}
1444
1445
1423static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1446static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1424 pgprot_t mask_set, pgprot_t mask_clr, 1447 pgprot_t mask_set, pgprot_t mask_clr,
1425 int force_split, int in_flag, 1448 int force_split, int in_flag,
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1465 * Save address for cache flush. *addr is modified in the call 1488 * Save address for cache flush. *addr is modified in the call
1466 * to __change_page_attr_set_clr() below. 1489 * to __change_page_attr_set_clr() below.
1467 */ 1490 */
1468 baddr = *addr; 1491 baddr = make_addr_canonical_again(*addr);
1469 } 1492 }
1470 1493
1471 /* Must avoid aliasing mappings in the highmem code */ 1494 /* Must avoid aliasing mappings in the highmem code */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e848a4811785..089e78c4effd 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
269 if (pgd_val(pgd) != 0) { 269 if (pgd_val(pgd) != 0) {
270 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 270 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
271 271
272 *pgdp = native_make_pgd(0); 272 pgd_clear(pgdp);
273 273
274 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 274 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
275 pmd_free(mm, pmd); 275 pmd_free(mm, pmd);
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
494 int changed = !pte_same(*ptep, entry); 494 int changed = !pte_same(*ptep, entry);
495 495
496 if (changed && dirty) 496 if (changed && dirty)
497 *ptep = entry; 497 set_pte(ptep, entry);
498 498
499 return changed; 499 return changed;
500} 500}
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
509 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 509 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
510 510
511 if (changed && dirty) { 511 if (changed && dirty) {
512 *pmdp = entry; 512 set_pmd(pmdp, entry);
513 /* 513 /*
514 * We had a write-protection fault here and changed the pmd 514 * We had a write-protection fault here and changed the pmd
515 * to to more permissive. No need to flush the TLB for that, 515 * to to more permissive. No need to flush the TLB for that,
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
529 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 529 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
530 530
531 if (changed && dirty) { 531 if (changed && dirty) {
532 *pudp = entry; 532 set_pud(pudp, entry);
533 /* 533 /*
534 * We had a write-protection fault here and changed the pud 534 * We had a write-protection fault here and changed the pud
535 * to to more permissive. No need to flush the TLB for that, 535 * to to more permissive. No need to flush the TLB for that,
@@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
637{ 637{
638 unsigned long address = __fix_to_virt(idx); 638 unsigned long address = __fix_to_virt(idx);
639 639
640#ifdef CONFIG_X86_64
641 /*
642 * Ensure that the static initial page tables are covering the
643 * fixmap completely.
644 */
645 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
646 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
647#endif
648
640 if (idx >= __end_of_fixed_addresses) { 649 if (idx >= __end_of_fixed_addresses) {
641 BUG(); 650 BUG();
642 return; 651 return;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 31341ae7309f..c1fc1ae6b429 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
248 * 248 *
249 * Returns a pointer to a PTE on success, or NULL on failure. 249 * Returns a pointer to a PTE on success, or NULL on failure.
250 */ 250 */
251static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) 251static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
252{ 252{
253 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); 253 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
254 pmd_t *pmd; 254 pmd_t *pmd;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 9517d1b2a281..e96b99eb800c 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
305 305
306 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); 306 choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
307 307
308 /* Let nmi_uaccess_okay() know that we're changing CR3. */
309 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
310 barrier();
311
308 if (need_flush) { 312 if (need_flush) {
309 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); 313 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
310 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); 314 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
335 if (next != &init_mm) 339 if (next != &init_mm)
336 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); 340 this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
337 341
342 /* Make sure we write CR3 before loaded_mm. */
343 barrier();
344
338 this_cpu_write(cpu_tlbstate.loaded_mm, next); 345 this_cpu_write(cpu_tlbstate.loaded_mm, next);
339 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid); 346 this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
340 } 347 }
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 324b93328b37..9959657127f4 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -85,12 +85,7 @@ pgd_t * __init efi_call_phys_prolog(void)
85 85
86void __init efi_call_phys_epilog(pgd_t *save_pgd) 86void __init efi_call_phys_epilog(pgd_t *save_pgd)
87{ 87{
88 struct desc_ptr gdt_descr; 88 load_fixmap_gdt(0);
89
90 gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
91 gdt_descr.size = GDT_SIZE - 1;
92 load_gdt(&gdt_descr);
93
94 load_cr3(save_pgd); 89 load_cr3(save_pgd);
95 __flush_tlb_all(); 90 __flush_tlb_all();
96} 91}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 45b700ac5fe7..dd461c0167ef 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
435static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 435static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
436{ 436{
437 trace_xen_mmu_set_pte_atomic(ptep, pte); 437 trace_xen_mmu_set_pte_atomic(ptep, pte);
438 set_64bit((u64 *)ptep, native_pte_val(pte)); 438 __xen_set_pte(ptep, pte);
439} 439}
440 440
441static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 441static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
442{ 442{
443 trace_xen_mmu_pte_clear(mm, addr, ptep); 443 trace_xen_mmu_pte_clear(mm, addr, ptep);
444 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 444 __xen_set_pte(ptep, native_make_pte(0));
445 native_pte_clear(mm, addr, ptep);
446} 445}
447 446
448static void xen_pmd_clear(pmd_t *pmdp) 447static void xen_pmd_clear(pmd_t *pmdp)
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1570 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1569 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1571 pte_val_ma(pte)); 1570 pte_val_ma(pte));
1572#endif 1571#endif
1573 native_set_pte(ptep, pte); 1572 __xen_set_pte(ptep, pte);
1574} 1573}
1575 1574
1576/* Early in boot, while setting up the initial pagetable, assume 1575/* Early in boot, while setting up the initial pagetable, assume
@@ -1908,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1908 /* L3_k[511] -> level2_fixmap_pgt */ 1907 /* L3_k[511] -> level2_fixmap_pgt */
1909 convert_pfn_mfn(level3_kernel_pgt); 1908 convert_pfn_mfn(level3_kernel_pgt);
1910 1909
1911 /* L3_k[511][506] -> level1_fixmap_pgt */ 1910 /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
1912 convert_pfn_mfn(level2_fixmap_pgt); 1911 convert_pfn_mfn(level2_fixmap_pgt);
1913 1912
1914 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1913 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1953,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1953 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1952 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1954 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1953 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1955 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1954 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1956 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); 1955
1956 for (i = 0; i < FIXMAP_PMD_NUM; i++) {
1957 set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
1958 PAGE_KERNEL_RO);
1959 }
1957 1960
1958 /* Pin down new L4 */ 1961 /* Pin down new L4 */
1959 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1962 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
@@ -2061,7 +2064,6 @@ void __init xen_relocate_p2m(void)
2061 pud_t *pud; 2064 pud_t *pud;
2062 pgd_t *pgd; 2065 pgd_t *pgd;
2063 unsigned long *new_p2m; 2066 unsigned long *new_p2m;
2064 int save_pud;
2065 2067
2066 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 2068 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2067 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; 2069 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -2091,7 +2093,6 @@ void __init xen_relocate_p2m(void)
2091 2093
2092 pgd = __va(read_cr3_pa()); 2094 pgd = __va(read_cr3_pa());
2093 new_p2m = (unsigned long *)(2 * PGDIR_SIZE); 2095 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2094 save_pud = n_pud;
2095 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { 2096 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2096 pud = early_memremap(pud_phys, PAGE_SIZE); 2097 pud = early_memremap(pud_phys, PAGE_SIZE);
2097 clear_page(pud); 2098 clear_page(pud);
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 7d00d4ad44d4..95997e6c0696 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
478irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) 478irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
479{ 479{
480 int err, ret = IRQ_NONE; 480 int err, ret = IRQ_NONE;
481 struct pt_regs regs; 481 struct pt_regs regs = {0};
482 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); 482 const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
483 uint8_t xenpmu_flags = get_xenpmu_flags(); 483 uint8_t xenpmu_flags = get_xenpmu_flags();
484 484
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 04d038f3b6fa..b9ad83a0ee5d 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -4,6 +4,7 @@ config ZONE_DMA
4 4
5config XTENSA 5config XTENSA
6 def_bool y 6 def_bool y
7 select ARCH_HAS_SG_CHAIN
7 select ARCH_HAS_SYNC_DMA_FOR_CPU 8 select ARCH_HAS_SYNC_DMA_FOR_CPU
8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
9 select ARCH_NO_COHERENT_DMA_MMAP if !MMU 10 select ARCH_NO_COHERENT_DMA_MMAP if !MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 295c120ed099..d67e30faff9c 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -64,11 +64,7 @@ endif
64vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y)) 64vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
65plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y)) 65plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
66 66
67ifeq ($(KBUILD_SRC),)
68KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
69else
70KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs)) 67KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
71endif
72 68
73KBUILD_DEFCONFIG := iss_defconfig 69KBUILD_DEFCONFIG := iss_defconfig
74 70
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index f4bbb28026f8..58709e89a8ed 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
78 78
79void __init platform_setup(char **p_cmdline) 79void __init platform_setup(char **p_cmdline)
80{ 80{
81 static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
82 static char cmdline[COMMAND_LINE_SIZE] __initdata;
81 int argc = simc_argc(); 83 int argc = simc_argc();
82 int argv_size = simc_argv_size(); 84 int argv_size = simc_argv_size();
83 85
84 if (argc > 1) { 86 if (argc > 1) {
85 void **argv = alloc_bootmem(argv_size); 87 if (argv_size > sizeof(argv)) {
86 char *cmdline = alloc_bootmem(argv_size); 88 pr_err("%s: command line too long: argv_size = %d\n",
87 int i; 89 __func__, argv_size);
90 } else {
91 int i;
88 92
89 cmdline[0] = 0; 93 cmdline[0] = 0;
90 simc_argv((void *)argv); 94 simc_argv((void *)argv);
91 95
92 for (i = 1; i < argc; ++i) { 96 for (i = 1; i < argc; ++i) {
93 if (i > 1) 97 if (i > 1)
94 strcat(cmdline, " "); 98 strcat(cmdline, " ");
95 strcat(cmdline, argv[i]); 99 strcat(cmdline, argv[i]);
100 }
101 *p_cmdline = cmdline;
96 } 102 }
97 *p_cmdline = cmdline;
98 } 103 }
99 104
100 atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block); 105 atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 58c6efa9f9a9..9fe5952d117d 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
275 275
276void bfqg_and_blkg_put(struct bfq_group *bfqg) 276void bfqg_and_blkg_put(struct bfq_group *bfqg)
277{ 277{
278 bfqg_put(bfqg);
279
280 blkg_put(bfqg_to_blkg(bfqg)); 278 blkg_put(bfqg_to_blkg(bfqg));
279
280 bfqg_put(bfqg);
281} 281}
282 282
283/* @stats = 0 */ 283/* @stats = 0 */
diff --git a/block/bio.c b/block/bio.c
index b12966e415d3..0093bed81c0e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
1684 const int sgrp = op_stat_group(req_op); 1684 const int sgrp = op_stat_group(req_op);
1685 int cpu = part_stat_lock(); 1685 int cpu = part_stat_lock();
1686 1686
1687 part_stat_add(cpu, part, ticks[sgrp], duration); 1687 part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
1688 part_round_stats(q, cpu, part); 1688 part_round_stats(q, cpu, part);
1689 part_dec_in_flight(q, part, op_is_write(req_op)); 1689 part_dec_in_flight(q, part, op_is_write(req_op));
1690 1690
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2015{ 2015{
2016 if (unlikely(bio->bi_blkg)) 2016 if (unlikely(bio->bi_blkg))
2017 return -EBUSY; 2017 return -EBUSY;
2018 blkg_get(blkg); 2018 if (!blkg_try_get(blkg))
2019 return -ENODEV;
2019 bio->bi_blkg = blkg; 2020 bio->bi_blkg = blkg;
2020 return 0; 2021 return 0;
2021} 2022}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 694595b29b8f..c630e02836a8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
310 } 310 }
311} 311}
312 312
313static void blkg_pd_offline(struct blkcg_gq *blkg)
314{
315 int i;
316
317 lockdep_assert_held(blkg->q->queue_lock);
318 lockdep_assert_held(&blkg->blkcg->lock);
319
320 for (i = 0; i < BLKCG_MAX_POLS; i++) {
321 struct blkcg_policy *pol = blkcg_policy[i];
322
323 if (blkg->pd[i] && !blkg->pd[i]->offline &&
324 pol->pd_offline_fn) {
325 pol->pd_offline_fn(blkg->pd[i]);
326 blkg->pd[i]->offline = true;
327 }
328 }
329}
330
331static void blkg_destroy(struct blkcg_gq *blkg) 313static void blkg_destroy(struct blkcg_gq *blkg)
332{ 314{
333 struct blkcg *blkcg = blkg->blkcg; 315 struct blkcg *blkcg = blkg->blkcg;
334 struct blkcg_gq *parent = blkg->parent; 316 struct blkcg_gq *parent = blkg->parent;
317 int i;
335 318
336 lockdep_assert_held(blkg->q->queue_lock); 319 lockdep_assert_held(blkg->q->queue_lock);
337 lockdep_assert_held(&blkcg->lock); 320 lockdep_assert_held(&blkcg->lock);
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
340 WARN_ON_ONCE(list_empty(&blkg->q_node)); 323 WARN_ON_ONCE(list_empty(&blkg->q_node));
341 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); 324 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
342 325
326 for (i = 0; i < BLKCG_MAX_POLS; i++) {
327 struct blkcg_policy *pol = blkcg_policy[i];
328
329 if (blkg->pd[i] && pol->pd_offline_fn)
330 pol->pd_offline_fn(blkg->pd[i]);
331 }
332
343 if (parent) { 333 if (parent) {
344 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); 334 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
345 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); 335 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q)
382 struct blkcg *blkcg = blkg->blkcg; 372 struct blkcg *blkcg = blkg->blkcg;
383 373
384 spin_lock(&blkcg->lock); 374 spin_lock(&blkcg->lock);
385 blkg_pd_offline(blkg);
386 blkg_destroy(blkg); 375 blkg_destroy(blkg);
387 spin_unlock(&blkcg->lock); 376 spin_unlock(&blkcg->lock);
388 } 377 }
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = {
1053 { } /* terminate */ 1042 { } /* terminate */
1054}; 1043};
1055 1044
1045/*
1046 * blkcg destruction is a three-stage process.
1047 *
1048 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
1049 * which offlines writeback. Here we tie the next stage of blkg destruction
1050 * to the completion of writeback associated with the blkcg. This lets us
1051 * avoid punting potentially large amounts of outstanding writeback to root
1052 * while maintaining any ongoing policies. The next stage is triggered when
1053 * the nr_cgwbs count goes to zero.
1054 *
1055 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1056 * and handles the destruction of blkgs. Here the css reference held by
1057 * the blkg is put back eventually allowing blkcg_css_free() to be called.
1058 * This work may occur in cgwb_release_workfn() on the cgwb_release
1059 * workqueue. Any submitted ios that fail to get the blkg ref will be
1060 * punted to the root_blkg.
1061 *
1062 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1063 * This finally frees the blkcg.
1064 */
1065
1056/** 1066/**
1057 * blkcg_css_offline - cgroup css_offline callback 1067 * blkcg_css_offline - cgroup css_offline callback
1058 * @css: css of interest 1068 * @css: css of interest
1059 * 1069 *
1060 * This function is called when @css is about to go away and responsible 1070 * This function is called when @css is about to go away. Here the cgwbs are
1061 * for offlining all blkgs pd and killing all wbs associated with @css. 1071 * offlined first and only once writeback associated with the blkcg has
1062 * blkgs pd offline should be done while holding both q and blkcg locks. 1072 * finished do we start step 2 (see above).
1063 * As blkcg lock is nested inside q lock, this function performs reverse
1064 * double lock dancing.
1065 *
1066 * This is the blkcg counterpart of ioc_release_fn().
1067 */ 1073 */
1068static void blkcg_css_offline(struct cgroup_subsys_state *css) 1074static void blkcg_css_offline(struct cgroup_subsys_state *css)
1069{ 1075{
1070 struct blkcg *blkcg = css_to_blkcg(css); 1076 struct blkcg *blkcg = css_to_blkcg(css);
1071 struct blkcg_gq *blkg;
1072
1073 spin_lock_irq(&blkcg->lock);
1074
1075 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1076 struct request_queue *q = blkg->q;
1077
1078 if (spin_trylock(q->queue_lock)) {
1079 blkg_pd_offline(blkg);
1080 spin_unlock(q->queue_lock);
1081 } else {
1082 spin_unlock_irq(&blkcg->lock);
1083 cpu_relax();
1084 spin_lock_irq(&blkcg->lock);
1085 }
1086 }
1087
1088 spin_unlock_irq(&blkcg->lock);
1089 1077
1078 /* this prevents anyone from attaching or migrating to this blkcg */
1090 wb_blkcg_offline(blkcg); 1079 wb_blkcg_offline(blkcg);
1080
1081 /* put the base cgwb reference allowing step 2 to be triggered */
1082 blkcg_cgwb_put(blkcg);
1091} 1083}
1092 1084
1093/** 1085/**
1094 * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg 1086 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1095 * @blkcg: blkcg of interest 1087 * @blkcg: blkcg of interest
1096 * 1088 *
1097 * This function is called when blkcg css is about to free and responsible for 1089 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1098 * destroying all blkgs associated with @blkcg.
1099 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1100 * is nested inside q lock, this function performs reverse double lock dancing. 1090 * is nested inside q lock, this function performs reverse double lock dancing.
1091 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1092 * blkcg_css_free to eventually be called.
1093 *
1094 * This is the blkcg counterpart of ioc_release_fn().
1101 */ 1095 */
1102static void blkcg_destroy_all_blkgs(struct blkcg *blkcg) 1096void blkcg_destroy_blkgs(struct blkcg *blkcg)
1103{ 1097{
1104 spin_lock_irq(&blkcg->lock); 1098 spin_lock_irq(&blkcg->lock);
1099
1105 while (!hlist_empty(&blkcg->blkg_list)) { 1100 while (!hlist_empty(&blkcg->blkg_list)) {
1106 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, 1101 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1107 struct blkcg_gq, 1102 struct blkcg_gq, blkcg_node);
1108 blkcg_node);
1109 struct request_queue *q = blkg->q; 1103 struct request_queue *q = blkg->q;
1110 1104
1111 if (spin_trylock(q->queue_lock)) { 1105 if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
1117 spin_lock_irq(&blkcg->lock); 1111 spin_lock_irq(&blkcg->lock);
1118 } 1112 }
1119 } 1113 }
1114
1120 spin_unlock_irq(&blkcg->lock); 1115 spin_unlock_irq(&blkcg->lock);
1121} 1116}
1122 1117
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
1125 struct blkcg *blkcg = css_to_blkcg(css); 1120 struct blkcg *blkcg = css_to_blkcg(css);
1126 int i; 1121 int i;
1127 1122
1128 blkcg_destroy_all_blkgs(blkcg);
1129
1130 mutex_lock(&blkcg_pol_mutex); 1123 mutex_lock(&blkcg_pol_mutex);
1131 1124
1132 list_del(&blkcg->all_blkcgs_node); 1125 list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1189 INIT_HLIST_HEAD(&blkcg->blkg_list); 1182 INIT_HLIST_HEAD(&blkcg->blkg_list);
1190#ifdef CONFIG_CGROUP_WRITEBACK 1183#ifdef CONFIG_CGROUP_WRITEBACK
1191 INIT_LIST_HEAD(&blkcg->cgwb_list); 1184 INIT_LIST_HEAD(&blkcg->cgwb_list);
1185 refcount_set(&blkcg->cgwb_refcnt, 1);
1192#endif 1186#endif
1193 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); 1187 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1194 1188
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
1480 1474
1481 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1475 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1482 if (blkg->pd[pol->plid]) { 1476 if (blkg->pd[pol->plid]) {
1483 if (!blkg->pd[pol->plid]->offline && 1477 if (pol->pd_offline_fn)
1484 pol->pd_offline_fn) {
1485 pol->pd_offline_fn(blkg->pd[pol->plid]); 1478 pol->pd_offline_fn(blkg->pd[pol->plid]);
1486 blkg->pd[pol->plid]->offline = true;
1487 }
1488 pol->pd_free_fn(blkg->pd[pol->plid]); 1479 pol->pd_free_fn(blkg->pd[pol->plid]);
1489 blkg->pd[pol->plid] = NULL; 1480 blkg->pd[pol->plid] = NULL;
1490 } 1481 }
@@ -1519,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
1519 for (i = 0; i < BLKCG_MAX_POLS; i++) 1510 for (i = 0; i < BLKCG_MAX_POLS; i++)
1520 if (!blkcg_policy[i]) 1511 if (!blkcg_policy[i])
1521 break; 1512 break;
1522 if (i >= BLKCG_MAX_POLS) 1513 if (i >= BLKCG_MAX_POLS) {
1514 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1523 goto err_unlock; 1515 goto err_unlock;
1516 }
1524 1517
1525 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ 1518 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1526 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || 1519 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
diff --git a/block/blk-core.c b/block/blk-core.c
index dee56c282efb..cff0a60ee200 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
2163{ 2163{
2164 const int op = bio_op(bio); 2164 const int op = bio_op(bio);
2165 2165
2166 if (part->policy && (op_is_write(op) && !op_is_flush(op))) { 2166 if (part->policy && op_is_write(op)) {
2167 char b[BDEVNAME_SIZE]; 2167 char b[BDEVNAME_SIZE];
2168 2168
2169 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
2170 return false;
2171
2169 WARN_ONCE(1, 2172 WARN_ONCE(1,
2170 "generic_make_request: Trying to write " 2173 "generic_make_request: Trying to write "
2171 "to read-only block-device %s (partno %d)\n", 2174 "to read-only block-device %s (partno %d)\n",
@@ -2730,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now)
2730 * containing request is enough. 2733 * containing request is enough.
2731 */ 2734 */
2732 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2735 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2733 unsigned long duration;
2734 const int sgrp = op_stat_group(req_op(req)); 2736 const int sgrp = op_stat_group(req_op(req));
2735 struct hd_struct *part; 2737 struct hd_struct *part;
2736 int cpu; 2738 int cpu;
2737 2739
2738 duration = nsecs_to_jiffies(now - req->start_time_ns);
2739 cpu = part_stat_lock(); 2740 cpu = part_stat_lock();
2740 part = req->part; 2741 part = req->part;
2741 2742
2742 part_stat_inc(cpu, part, ios[sgrp]); 2743 part_stat_inc(cpu, part, ios[sgrp]);
2743 part_stat_add(cpu, part, ticks[sgrp], duration); 2744 part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
2744 part_round_stats(req->q, cpu, part); 2745 part_round_stats(req->q, cpu, part);
2745 part_dec_in_flight(req->q, part, rq_data_dir(req)); 2746 part_dec_in_flight(req->q, part, rq_data_dir(req));
2746 2747
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 94e1ed667b6e..41317c50a446 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
322 322
323 /* 323 /*
324 * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and 324 * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
325 * queue_hw_ctx after freeze the queue. So we could use q_usage_counter 325 * queue_hw_ctx after freeze the queue, so we use q_usage_counter
326 * to avoid race with it. __blk_mq_update_nr_hw_queues will users 326 * to avoid race with it.
327 * synchronize_rcu to ensure all of the users go out of the critical
328 * section below and see zeroed q_usage_counter.
329 */ 327 */
330 rcu_read_lock(); 328 if (!percpu_ref_tryget(&q->q_usage_counter))
331 if (percpu_ref_is_zero(&q->q_usage_counter)) {
332 rcu_read_unlock();
333 return; 329 return;
334 }
335 330
336 queue_for_each_hw_ctx(q, hctx, i) { 331 queue_for_each_hw_ctx(q, hctx, i) {
337 struct blk_mq_tags *tags = hctx->tags; 332 struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
347 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); 342 bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
348 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); 343 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
349 } 344 }
350 rcu_read_unlock(); 345 blk_queue_exit(q);
351} 346}
352 347
353static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, 348static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 85a1c1a59c72..e3c39ea8e17b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1628 BUG_ON(!rq->q); 1628 BUG_ON(!rq->q);
1629 if (rq->mq_ctx != this_ctx) { 1629 if (rq->mq_ctx != this_ctx) {
1630 if (this_ctx) { 1630 if (this_ctx) {
1631 trace_block_unplug(this_q, depth, from_schedule); 1631 trace_block_unplug(this_q, depth, !from_schedule);
1632 blk_mq_sched_insert_requests(this_q, this_ctx, 1632 blk_mq_sched_insert_requests(this_q, this_ctx,
1633 &ctx_list, 1633 &ctx_list,
1634 from_schedule); 1634 from_schedule);
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1648 * on 'ctx_list'. Do those. 1648 * on 'ctx_list'. Do those.
1649 */ 1649 */
1650 if (this_ctx) { 1650 if (this_ctx) {
1651 trace_block_unplug(this_q, depth, from_schedule); 1651 trace_block_unplug(this_q, depth, !from_schedule);
1652 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, 1652 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1653 from_schedule); 1653 from_schedule);
1654 } 1654 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a3eede00d302..01d0620a4e4a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
2129static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio) 2129static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2130{ 2130{
2131#ifdef CONFIG_BLK_DEV_THROTTLING_LOW 2131#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2132 if (bio->bi_css) 2132 /* fallback to root_blkg if we fail to get a blkg ref */
2133 bio_associate_blkg(bio, tg_to_blkg(tg)); 2133 if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
2134 bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
2134 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); 2135 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
2135#endif 2136#endif
2136} 2137}
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 84507d3e9a98..8e20a0677dcf 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
123 } 123 }
124} 124}
125 125
126static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct) 126static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
127 enum wbt_flags wb_acct)
127{ 128{
128 struct rq_wb *rwb = RQWB(rqos);
129 struct rq_wait *rqw;
130 int inflight, limit; 129 int inflight, limit;
131 130
132 if (!(wb_acct & WBT_TRACKED))
133 return;
134
135 rqw = get_rq_wait(rwb, wb_acct);
136 inflight = atomic_dec_return(&rqw->inflight); 131 inflight = atomic_dec_return(&rqw->inflight);
137 132
138 /* 133 /*
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
166 int diff = limit - inflight; 161 int diff = limit - inflight;
167 162
168 if (!inflight || diff >= rwb->wb_background / 2) 163 if (!inflight || diff >= rwb->wb_background / 2)
169 wake_up(&rqw->wait); 164 wake_up_all(&rqw->wait);
170 } 165 }
171} 166}
172 167
168static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
169{
170 struct rq_wb *rwb = RQWB(rqos);
171 struct rq_wait *rqw;
172
173 if (!(wb_acct & WBT_TRACKED))
174 return;
175
176 rqw = get_rq_wait(rwb, wb_acct);
177 wbt_rqw_done(rwb, rqw, wb_acct);
178}
179
173/* 180/*
174 * Called on completion of a request. Note that it's also called when 181 * Called on completion of a request. Note that it's also called when
175 * a request is merged, when the request gets freed. 182 * a request is merged, when the request gets freed.
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
481 return limit; 488 return limit;
482} 489}
483 490
491struct wbt_wait_data {
492 struct wait_queue_entry wq;
493 struct task_struct *task;
494 struct rq_wb *rwb;
495 struct rq_wait *rqw;
496 unsigned long rw;
497 bool got_token;
498};
499
500static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
501 int wake_flags, void *key)
502{
503 struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
504 wq);
505
506 /*
507 * If we fail to get a budget, return -1 to interrupt the wake up
508 * loop in __wake_up_common.
509 */
510 if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
511 return -1;
512
513 data->got_token = true;
514 list_del_init(&curr->entry);
515 wake_up_process(data->task);
516 return 1;
517}
518
484/* 519/*
485 * Block if we will exceed our limit, or if we are currently waiting for 520 * Block if we will exceed our limit, or if we are currently waiting for
486 * the timer to kick off queuing again. 521 * the timer to kick off queuing again.
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
491 __acquires(lock) 526 __acquires(lock)
492{ 527{
493 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); 528 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
494 DECLARE_WAITQUEUE(wait, current); 529 struct wbt_wait_data data = {
530 .wq = {
531 .func = wbt_wake_function,
532 .entry = LIST_HEAD_INIT(data.wq.entry),
533 },
534 .task = current,
535 .rwb = rwb,
536 .rqw = rqw,
537 .rw = rw,
538 };
495 bool has_sleeper; 539 bool has_sleeper;
496 540
497 has_sleeper = wq_has_sleeper(&rqw->wait); 541 has_sleeper = wq_has_sleeper(&rqw->wait);
498 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) 542 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
499 return; 543 return;
500 544
501 add_wait_queue_exclusive(&rqw->wait, &wait); 545 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
502 do { 546 do {
503 set_current_state(TASK_UNINTERRUPTIBLE); 547 if (data.got_token)
548 break;
504 549
505 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) 550 if (!has_sleeper &&
551 rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
552 finish_wait(&rqw->wait, &data.wq);
553
554 /*
555 * We raced with wbt_wake_function() getting a token,
556 * which means we now have two. Put our local token
557 * and wake anyone else potentially waiting for one.
558 */
559 if (data.got_token)
560 wbt_rqw_done(rwb, rqw, wb_acct);
506 break; 561 break;
562 }
507 563
508 if (lock) { 564 if (lock) {
509 spin_unlock_irq(lock); 565 spin_unlock_irq(lock);
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
511 spin_lock_irq(lock); 567 spin_lock_irq(lock);
512 } else 568 } else
513 io_schedule(); 569 io_schedule();
570
514 has_sleeper = false; 571 has_sleeper = false;
515 } while (1); 572 } while (1);
516 573
517 __set_current_state(TASK_RUNNING); 574 finish_wait(&rqw->wait, &data.wq);
518 remove_wait_queue(&rqw->wait, &wait);
519} 575}
520 576
521static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) 577static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
580 return; 636 return;
581 } 637 }
582 638
583 if (current_is_kswapd())
584 flags |= WBT_KSWAPD;
585 if (bio_op(bio) == REQ_OP_DISCARD)
586 flags |= WBT_DISCARD;
587
588 __wbt_wait(rwb, flags, bio->bi_opf, lock); 639 __wbt_wait(rwb, flags, bio->bi_opf, lock);
589 640
590 if (!blk_stat_is_active(rwb->cb)) 641 if (!blk_stat_is_active(rwb->cb))
diff --git a/block/bsg.c b/block/bsg.c
index db588add6ba6..9a442c23a715 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -37,7 +37,7 @@ struct bsg_device {
37 struct request_queue *queue; 37 struct request_queue *queue;
38 spinlock_t lock; 38 spinlock_t lock;
39 struct hlist_node dev_list; 39 struct hlist_node dev_list;
40 atomic_t ref_count; 40 refcount_t ref_count;
41 char name[20]; 41 char name[20];
42 int max_queue; 42 int max_queue;
43}; 43};
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
252 252
253 mutex_lock(&bsg_mutex); 253 mutex_lock(&bsg_mutex);
254 254
255 if (!atomic_dec_and_test(&bd->ref_count)) { 255 if (!refcount_dec_and_test(&bd->ref_count)) {
256 mutex_unlock(&bsg_mutex); 256 mutex_unlock(&bsg_mutex);
257 return 0; 257 return 0;
258 } 258 }
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
290 290
291 bd->queue = rq; 291 bd->queue = rq;
292 292
293 atomic_set(&bd->ref_count, 1); 293 refcount_set(&bd->ref_count, 1);
294 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); 294 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
295 295
296 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); 296 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
308 308
309 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { 309 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
310 if (bd->queue == q) { 310 if (bd->queue == q) {
311 atomic_inc(&bd->ref_count); 311 refcount_inc(&bd->ref_count);
312 goto found; 312 goto found;
313 } 313 }
314 } 314 }
diff --git a/block/elevator.c b/block/elevator.c
index 5ea6e7d600e4..fae58b2f906f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
609 609
610 while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) 610 while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
611 ; 611 ;
612 if (q->nr_sorted && printed++ < 10) { 612 if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
613 printk(KERN_ERR "%s: forced dispatching is broken " 613 printk(KERN_ERR "%s: forced dispatching is broken "
614 "(nr_sorted=%u), please report this\n", 614 "(nr_sorted=%u), please report this\n",
615 q->elevator->type->elevator_name, q->nr_sorted); 615 q->elevator->type->elevator_name, q->nr_sorted);
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e)
895 spin_lock(&elv_list_lock); 895 spin_lock(&elv_list_lock);
896 if (elevator_find(e->elevator_name, e->uses_mq)) { 896 if (elevator_find(e->elevator_name, e->uses_mq)) {
897 spin_unlock(&elv_list_lock); 897 spin_unlock(&elv_list_lock);
898 if (e->icq_cache) 898 kmem_cache_destroy(e->icq_cache);
899 kmem_cache_destroy(e->icq_cache);
900 return -EBUSY; 899 return -EBUSY;
901 } 900 }
902 list_add_tail(&e->list, &elv_list); 901 list_add_tail(&e->list, &elv_list);
diff --git a/block/genhd.c b/block/genhd.c
index 8cc719a37b32..be5bab20b2ab 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
1343 part_stat_read(hd, ios[STAT_READ]), 1343 part_stat_read(hd, ios[STAT_READ]),
1344 part_stat_read(hd, merges[STAT_READ]), 1344 part_stat_read(hd, merges[STAT_READ]),
1345 part_stat_read(hd, sectors[STAT_READ]), 1345 part_stat_read(hd, sectors[STAT_READ]),
1346 jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])), 1346 (unsigned int)part_stat_read_msecs(hd, STAT_READ),
1347 part_stat_read(hd, ios[STAT_WRITE]), 1347 part_stat_read(hd, ios[STAT_WRITE]),
1348 part_stat_read(hd, merges[STAT_WRITE]), 1348 part_stat_read(hd, merges[STAT_WRITE]),
1349 part_stat_read(hd, sectors[STAT_WRITE]), 1349 part_stat_read(hd, sectors[STAT_WRITE]),
1350 jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])), 1350 (unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
1351 inflight[0], 1351 inflight[0],
1352 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1352 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1353 jiffies_to_msecs(part_stat_read(hd, time_in_queue)), 1353 jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
1354 part_stat_read(hd, ios[STAT_DISCARD]), 1354 part_stat_read(hd, ios[STAT_DISCARD]),
1355 part_stat_read(hd, merges[STAT_DISCARD]), 1355 part_stat_read(hd, merges[STAT_DISCARD]),
1356 part_stat_read(hd, sectors[STAT_DISCARD]), 1356 part_stat_read(hd, sectors[STAT_DISCARD]),
1357 jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD])) 1357 (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
1358 ); 1358 );
1359 } 1359 }
1360 disk_part_iter_exit(&piter); 1360 disk_part_iter_exit(&piter);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5a8975a1201c..d3d14e81fb12 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
136 part_stat_read(p, ios[STAT_READ]), 136 part_stat_read(p, ios[STAT_READ]),
137 part_stat_read(p, merges[STAT_READ]), 137 part_stat_read(p, merges[STAT_READ]),
138 (unsigned long long)part_stat_read(p, sectors[STAT_READ]), 138 (unsigned long long)part_stat_read(p, sectors[STAT_READ]),
139 jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])), 139 (unsigned int)part_stat_read_msecs(p, STAT_READ),
140 part_stat_read(p, ios[STAT_WRITE]), 140 part_stat_read(p, ios[STAT_WRITE]),
141 part_stat_read(p, merges[STAT_WRITE]), 141 part_stat_read(p, merges[STAT_WRITE]),
142 (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]), 142 (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
143 jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])), 143 (unsigned int)part_stat_read_msecs(p, STAT_WRITE),
144 inflight[0], 144 inflight[0],
145 jiffies_to_msecs(part_stat_read(p, io_ticks)), 145 jiffies_to_msecs(part_stat_read(p, io_ticks)),
146 jiffies_to_msecs(part_stat_read(p, time_in_queue)), 146 jiffies_to_msecs(part_stat_read(p, time_in_queue)),
147 part_stat_read(p, ios[STAT_DISCARD]), 147 part_stat_read(p, ios[STAT_DISCARD]),
148 part_stat_read(p, merges[STAT_DISCARD]), 148 part_stat_read(p, merges[STAT_DISCARD]),
149 (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]), 149 (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
150 jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD]))); 150 (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
151} 151}
152 152
153ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, 153ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9706613eecf9..bf64cfa30feb 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
879#define LPSS_GPIODEF0_DMA_LLP BIT(13) 879#define LPSS_GPIODEF0_DMA_LLP BIT(13)
880 880
881static DEFINE_MUTEX(lpss_iosf_mutex); 881static DEFINE_MUTEX(lpss_iosf_mutex);
882static bool lpss_iosf_d3_entered; 882static bool lpss_iosf_d3_entered = true;
883 883
884static void lpss_iosf_enter_d3_state(void) 884static void lpss_iosf_enter_d3_state(void)
885{ 885{
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 292088fcc624..d2e29a19890d 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -35,11 +35,11 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#ifdef CONFIG_X86 36#ifdef CONFIG_X86
37#include <asm/mpspec.h> 37#include <asm/mpspec.h>
38#include <linux/dmi.h>
38#endif 39#endif
39#include <linux/acpi_iort.h> 40#include <linux/acpi_iort.h>
40#include <linux/pci.h> 41#include <linux/pci.h>
41#include <acpi/apei.h> 42#include <acpi/apei.h>
42#include <linux/dmi.h>
43#include <linux/suspend.h> 43#include <linux/suspend.h>
44 44
45#include "internal.h" 45#include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
82 }, 82 },
83 {} 83 {}
84}; 84};
85#else
86static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
87 {}
88};
89#endif 85#endif
90 86
91/* -------------------------------------------------------------------------- 87/* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
1033 1029
1034 acpi_permanent_mmap = true; 1030 acpi_permanent_mmap = true;
1035 1031
1032#ifdef CONFIG_X86
1036 /* 1033 /*
1037 * If the machine falls into the DMI check table, 1034 * If the machine falls into the DMI check table,
1038 * DSDT will be copied to memory 1035 * DSDT will be copied to memory.
1036 * Note that calling dmi_check_system() here on other architectures
1037 * would not be OK because only x86 initializes dmi early enough.
1038 * Thankfully only x86 systems need such quirks for now.
1039 */ 1039 */
1040 dmi_check_system(dsdt_dmi_table); 1040 dmi_check_system(dsdt_dmi_table);
1041#endif
1041 1042
1042 status = acpi_reallocate_root_table(); 1043 status = acpi_reallocate_root_table();
1043 if (ACPI_FAILURE(status)) { 1044 if (ACPI_FAILURE(status)) {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3f3b7b253445..64fd96eada31 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -332,6 +332,35 @@ err_no_vma:
332 return vma ? -ENOMEM : -ESRCH; 332 return vma ? -ENOMEM : -ESRCH;
333} 333}
334 334
335
336static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
337 struct vm_area_struct *vma)
338{
339 if (vma)
340 alloc->vma_vm_mm = vma->vm_mm;
341 /*
342 * If we see alloc->vma is not NULL, buffer data structures set up
343 * completely. Look at smp_rmb side binder_alloc_get_vma.
344 * We also want to guarantee new alloc->vma_vm_mm is always visible
345 * if alloc->vma is set.
346 */
347 smp_wmb();
348 alloc->vma = vma;
349}
350
351static inline struct vm_area_struct *binder_alloc_get_vma(
352 struct binder_alloc *alloc)
353{
354 struct vm_area_struct *vma = NULL;
355
356 if (alloc->vma) {
357 /* Look at description in binder_alloc_set_vma */
358 smp_rmb();
359 vma = alloc->vma;
360 }
361 return vma;
362}
363
335static struct binder_buffer *binder_alloc_new_buf_locked( 364static struct binder_buffer *binder_alloc_new_buf_locked(
336 struct binder_alloc *alloc, 365 struct binder_alloc *alloc,
337 size_t data_size, 366 size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
348 size_t size, data_offsets_size; 377 size_t size, data_offsets_size;
349 int ret; 378 int ret;
350 379
351 if (alloc->vma == NULL) { 380 if (!binder_alloc_get_vma(alloc)) {
352 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 381 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
353 "%d: binder_alloc_buf, no vma\n", 382 "%d: binder_alloc_buf, no vma\n",
354 alloc->pid); 383 alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
723 buffer->free = 1; 752 buffer->free = 1;
724 binder_insert_free_buffer(alloc, buffer); 753 binder_insert_free_buffer(alloc, buffer);
725 alloc->free_async_space = alloc->buffer_size / 2; 754 alloc->free_async_space = alloc->buffer_size / 2;
726 barrier(); 755 binder_alloc_set_vma(alloc, vma);
727 alloc->vma = vma;
728 alloc->vma_vm_mm = vma->vm_mm;
729 mmgrab(alloc->vma_vm_mm); 756 mmgrab(alloc->vma_vm_mm);
730 757
731 return 0; 758 return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
754 int buffers, page_count; 781 int buffers, page_count;
755 struct binder_buffer *buffer; 782 struct binder_buffer *buffer;
756 783
757 BUG_ON(alloc->vma);
758
759 buffers = 0; 784 buffers = 0;
760 mutex_lock(&alloc->mutex); 785 mutex_lock(&alloc->mutex);
786 BUG_ON(alloc->vma);
787
761 while ((n = rb_first(&alloc->allocated_buffers))) { 788 while ((n = rb_first(&alloc->allocated_buffers))) {
762 buffer = rb_entry(n, struct binder_buffer, rb_node); 789 buffer = rb_entry(n, struct binder_buffer, rb_node);
763 790
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
900 */ 927 */
901void binder_alloc_vma_close(struct binder_alloc *alloc) 928void binder_alloc_vma_close(struct binder_alloc *alloc)
902{ 929{
903 WRITE_ONCE(alloc->vma, NULL); 930 binder_alloc_set_vma(alloc, NULL);
904} 931}
905 932
906/** 933/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
935 962
936 index = page - alloc->pages; 963 index = page - alloc->pages;
937 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 964 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
938 vma = alloc->vma; 965 vma = binder_alloc_get_vma(alloc);
939 if (vma) { 966 if (vma) {
940 if (!mmget_not_zero(alloc->vma_vm_mm)) 967 if (!mmget_not_zero(alloc->vma_vm_mm))
941 goto err_mmget; 968 goto err_mmget;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 172e32840256..a9dd4ea7467d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
5359 */ 5359 */
5360int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) 5360int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5361{ 5361{
5362 u64 done_mask, ap_qc_active = ap->qc_active;
5362 int nr_done = 0; 5363 int nr_done = 0;
5363 u64 done_mask;
5364 5364
5365 done_mask = ap->qc_active ^ qc_active; 5365 /*
5366 * If the internal tag is set on ap->qc_active, then we care about
5367 * bit0 on the passed in qc_active mask. Move that bit up to match
5368 * the internal tag.
5369 */
5370 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5371 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5372 qc_active ^= qc_active & 0x01;
5373 }
5374
5375 done_mask = ap_qc_active ^ qc_active;
5366 5376
5367 if (unlikely(done_mask & qc_active)) { 5377 if (unlikely(done_mask & qc_active)) {
5368 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", 5378 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
@@ -7394,4 +7404,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
7394EXPORT_SYMBOL_GPL(ata_cable_ignore); 7404EXPORT_SYMBOL_GPL(ata_cable_ignore);
7395EXPORT_SYMBOL_GPL(ata_cable_sata); 7405EXPORT_SYMBOL_GPL(ata_cable_sata);
7396EXPORT_SYMBOL_GPL(ata_host_get); 7406EXPORT_SYMBOL_GPL(ata_host_get);
7397EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file 7407EXPORT_SYMBOL_GPL(ata_host_put);
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 5d4b72e21161..569a4a662dcd 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
256 .qc_issue = ftide010_qc_issue, 256 .qc_issue = ftide010_qc_issue,
257}; 257};
258 258
259static struct ata_port_info ftide010_port_info[] = { 259static struct ata_port_info ftide010_port_info = {
260 { 260 .flags = ATA_FLAG_SLAVE_POSS,
261 .flags = ATA_FLAG_SLAVE_POSS, 261 .mwdma_mask = ATA_MWDMA2,
262 .mwdma_mask = ATA_MWDMA2, 262 .udma_mask = ATA_UDMA6,
263 .udma_mask = ATA_UDMA6, 263 .pio_mask = ATA_PIO4,
264 .pio_mask = ATA_PIO4, 264 .port_ops = &pata_ftide010_port_ops,
265 .port_ops = &pata_ftide010_port_ops,
266 },
267}; 265};
268 266
269#if IS_ENABLED(CONFIG_SATA_GEMINI) 267#if IS_ENABLED(CONFIG_SATA_GEMINI)
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
349} 347}
350 348
351static int pata_ftide010_gemini_init(struct ftide010 *ftide, 349static int pata_ftide010_gemini_init(struct ftide010 *ftide,
350 struct ata_port_info *pi,
352 bool is_ata1) 351 bool is_ata1)
353{ 352{
354 struct device *dev = ftide->dev; 353 struct device *dev = ftide->dev;
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
373 372
374 /* Flag port as SATA-capable */ 373 /* Flag port as SATA-capable */
375 if (gemini_sata_bridge_enabled(sg, is_ata1)) 374 if (gemini_sata_bridge_enabled(sg, is_ata1))
376 ftide010_port_info[0].flags |= ATA_FLAG_SATA; 375 pi->flags |= ATA_FLAG_SATA;
376
377 /* This device has broken DMA, only PIO works */
378 if (of_machine_is_compatible("itian,sq201")) {
379 pi->mwdma_mask = 0;
380 pi->udma_mask = 0;
381 }
377 382
378 /* 383 /*
379 * We assume that a simple 40-wire cable is used in the PATA mode. 384 * We assume that a simple 40-wire cable is used in the PATA mode.
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
435} 440}
436#else 441#else
437static int pata_ftide010_gemini_init(struct ftide010 *ftide, 442static int pata_ftide010_gemini_init(struct ftide010 *ftide,
443 struct ata_port_info *pi,
438 bool is_ata1) 444 bool is_ata1)
439{ 445{
440 return -ENOTSUPP; 446 return -ENOTSUPP;
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
446{ 452{
447 struct device *dev = &pdev->dev; 453 struct device *dev = &pdev->dev;
448 struct device_node *np = dev->of_node; 454 struct device_node *np = dev->of_node;
449 const struct ata_port_info pi = ftide010_port_info[0]; 455 struct ata_port_info pi = ftide010_port_info;
450 const struct ata_port_info *ppi[] = { &pi, NULL }; 456 const struct ata_port_info *ppi[] = { &pi, NULL };
451 struct ftide010 *ftide; 457 struct ftide010 *ftide;
452 struct resource *res; 458 struct resource *res;
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
490 * are ATA0. This will also set up the cable types. 496 * are ATA0. This will also set up the cable types.
491 */ 497 */
492 ret = pata_ftide010_gemini_init(ftide, 498 ret = pata_ftide010_gemini_init(ftide,
499 &pi,
493 (res->start == 0x63400000)); 500 (res->start == 0x63400000));
494 if (ret) 501 if (ret)
495 goto err_dis_clk; 502 goto err_dis_clk;
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 0943e7065e0e..b3c0498ee433 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -209,21 +209,24 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
209static int alloc_lookup_fw_priv(const char *fw_name, 209static int alloc_lookup_fw_priv(const char *fw_name,
210 struct firmware_cache *fwc, 210 struct firmware_cache *fwc,
211 struct fw_priv **fw_priv, void *dbuf, 211 struct fw_priv **fw_priv, void *dbuf,
212 size_t size) 212 size_t size, enum fw_opt opt_flags)
213{ 213{
214 struct fw_priv *tmp; 214 struct fw_priv *tmp;
215 215
216 spin_lock(&fwc->lock); 216 spin_lock(&fwc->lock);
217 tmp = __lookup_fw_priv(fw_name); 217 if (!(opt_flags & FW_OPT_NOCACHE)) {
218 if (tmp) { 218 tmp = __lookup_fw_priv(fw_name);
219 kref_get(&tmp->ref); 219 if (tmp) {
220 spin_unlock(&fwc->lock); 220 kref_get(&tmp->ref);
221 *fw_priv = tmp; 221 spin_unlock(&fwc->lock);
222 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); 222 *fw_priv = tmp;
223 return 1; 223 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
224 return 1;
225 }
224 } 226 }
227
225 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size); 228 tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
226 if (tmp) 229 if (tmp && !(opt_flags & FW_OPT_NOCACHE))
227 list_add(&tmp->list, &fwc->head); 230 list_add(&tmp->list, &fwc->head);
228 spin_unlock(&fwc->lock); 231 spin_unlock(&fwc->lock);
229 232
@@ -493,7 +496,8 @@ int assign_fw(struct firmware *fw, struct device *device,
493 */ 496 */
494static int 497static int
495_request_firmware_prepare(struct firmware **firmware_p, const char *name, 498_request_firmware_prepare(struct firmware **firmware_p, const char *name,
496 struct device *device, void *dbuf, size_t size) 499 struct device *device, void *dbuf, size_t size,
500 enum fw_opt opt_flags)
497{ 501{
498 struct firmware *firmware; 502 struct firmware *firmware;
499 struct fw_priv *fw_priv; 503 struct fw_priv *fw_priv;
@@ -511,7 +515,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
511 return 0; /* assigned */ 515 return 0; /* assigned */
512 } 516 }
513 517
514 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size); 518 ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
519 opt_flags);
515 520
516 /* 521 /*
517 * bind with 'priv' now to avoid warning in failure path 522 * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +576,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
571 goto out; 576 goto out;
572 } 577 }
573 578
574 ret = _request_firmware_prepare(&fw, name, device, buf, size); 579 ret = _request_firmware_prepare(&fw, name, device, buf, size,
580 opt_flags);
575 if (ret <= 0) /* error or already assigned */ 581 if (ret <= 0) /* error or already assigned */
576 goto out; 582 goto out;
577 583
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c8a1cb0b6136..817320c7c4c1 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev,
417 int nid; 417 int nid;
418 418
419 /* 419 /*
420 * The block contains more than one zone can not be offlined.
421 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
422 */
423 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
424 return sprintf(buf, "none\n");
425
426 start_pfn = valid_start_pfn;
427 nr_pages = valid_end_pfn - start_pfn;
428
429 /*
430 * Check the existing zone. Make sure that we do that only on the 420 * Check the existing zone. Make sure that we do that only on the
431 * online nodes otherwise the page_zone is not reliable 421 * online nodes otherwise the page_zone is not reliable
432 */ 422 */
433 if (mem->state == MEM_ONLINE) { 423 if (mem->state == MEM_ONLINE) {
424 /*
425 * The block contains more than one zone can not be offlined.
426 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
427 */
428 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
429 &valid_start_pfn, &valid_end_pfn))
430 return sprintf(buf, "none\n");
431 start_pfn = valid_start_pfn;
434 strcat(buf, page_zone(pfn_to_page(start_pfn))->name); 432 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
435 goto out; 433 goto out;
436 } 434 }
437 435
438 nid = pfn_to_nid(start_pfn); 436 nid = mem->nid;
439 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); 437 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
440 strcat(buf, default_zone->name); 438 strcat(buf, default_zone->name);
441 439
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 8e2e4757adcb..5a42ae4078c2 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
185int of_pm_clk_add_clks(struct device *dev) 185int of_pm_clk_add_clks(struct device *dev)
186{ 186{
187 struct clk **clks; 187 struct clk **clks;
188 unsigned int i, count; 188 int i, count;
189 int ret; 189 int ret;
190 190
191 if (!dev || !dev->of_node) 191 if (!dev || !dev->of_node)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 48f622728ce6..f2b6f4da1034 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 (struct floppy_struct **)&outparam); 3467 (struct floppy_struct **)&outparam);
3468 if (ret) 3468 if (ret)
3469 return ret; 3469 return ret;
3470 memcpy(&inparam.g, outparam,
3471 offsetof(struct floppy_struct, name));
3472 outparam = &inparam.g;
3470 break; 3473 break;
3471 case FDMSGON: 3474 case FDMSGON:
3472 UDP->flags |= FTD_MSG; 3475 UDP->flags |= FTD_MSG;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3863c00372bb..14a51254c3db 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1239 case NBD_SET_SOCK: 1239 case NBD_SET_SOCK:
1240 return nbd_add_socket(nbd, arg, false); 1240 return nbd_add_socket(nbd, arg, false);
1241 case NBD_SET_BLKSIZE: 1241 case NBD_SET_BLKSIZE:
1242 if (!arg || !is_power_of_2(arg) || arg < 512 ||
1243 arg > PAGE_SIZE)
1244 return -EINVAL;
1242 nbd_size_set(nbd, arg, 1245 nbd_size_set(nbd, arg,
1243 div_s64(config->bytesize, arg)); 1246 div_s64(config->bytesize, arg));
1244 return 0; 1247 return 0;
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index d81781f22dba..34e0030f0592 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,10 +87,10 @@ struct nullb {
87#ifdef CONFIG_BLK_DEV_ZONED 87#ifdef CONFIG_BLK_DEV_ZONED
88int null_zone_init(struct nullb_device *dev); 88int null_zone_init(struct nullb_device *dev);
89void null_zone_exit(struct nullb_device *dev); 89void null_zone_exit(struct nullb_device *dev);
90blk_status_t null_zone_report(struct nullb *nullb, 90blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
91 struct nullb_cmd *cmd); 91void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
92void null_zone_write(struct nullb_cmd *cmd); 92 unsigned int nr_sectors);
93void null_zone_reset(struct nullb_cmd *cmd); 93void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
94#else 94#else
95static inline int null_zone_init(struct nullb_device *dev) 95static inline int null_zone_init(struct nullb_device *dev)
96{ 96{
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
98} 98}
99static inline void null_zone_exit(struct nullb_device *dev) {} 99static inline void null_zone_exit(struct nullb_device *dev) {}
100static inline blk_status_t null_zone_report(struct nullb *nullb, 100static inline blk_status_t null_zone_report(struct nullb *nullb,
101 struct nullb_cmd *cmd) 101 struct bio *bio)
102{ 102{
103 return BLK_STS_NOTSUPP; 103 return BLK_STS_NOTSUPP;
104} 104}
105static inline void null_zone_write(struct nullb_cmd *cmd) {} 105static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
106static inline void null_zone_reset(struct nullb_cmd *cmd) {} 106 unsigned int nr_sectors)
107{
108}
109static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
107#endif /* CONFIG_BLK_DEV_ZONED */ 110#endif /* CONFIG_BLK_DEV_ZONED */
108#endif /* __NULL_BLK_H */ 111#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 6127e3ff7b4b..093b614d6524 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
1157 } 1157 }
1158} 1158}
1159 1159
1160static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
1161{
1162 struct nullb_device *dev = cmd->nq->dev;
1163
1164 if (dev->queue_mode == NULL_Q_BIO) {
1165 if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
1166 cmd->error = null_zone_report(nullb, cmd->bio);
1167 return true;
1168 }
1169 } else {
1170 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
1171 cmd->error = null_zone_report(nullb, cmd->rq->bio);
1172 return true;
1173 }
1174 }
1175
1176 return false;
1177}
1178
1160static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) 1179static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1161{ 1180{
1162 struct nullb_device *dev = cmd->nq->dev; 1181 struct nullb_device *dev = cmd->nq->dev;
1163 struct nullb *nullb = dev->nullb; 1182 struct nullb *nullb = dev->nullb;
1164 int err = 0; 1183 int err = 0;
1165 1184
1166 if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { 1185 if (cmd_report_zone(nullb, cmd))
1167 cmd->error = null_zone_report(nullb, cmd);
1168 goto out; 1186 goto out;
1169 }
1170 1187
1171 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { 1188 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1172 struct request *rq = cmd->rq; 1189 struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1234 cmd->error = errno_to_blk_status(err); 1251 cmd->error = errno_to_blk_status(err);
1235 1252
1236 if (!cmd->error && dev->zoned) { 1253 if (!cmd->error && dev->zoned) {
1237 if (req_op(cmd->rq) == REQ_OP_WRITE) 1254 sector_t sector;
1238 null_zone_write(cmd); 1255 unsigned int nr_sectors;
1239 else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) 1256 int op;
1240 null_zone_reset(cmd); 1257
1258 if (dev->queue_mode == NULL_Q_BIO) {
1259 op = bio_op(cmd->bio);
1260 sector = cmd->bio->bi_iter.bi_sector;
1261 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1262 } else {
1263 op = req_op(cmd->rq);
1264 sector = blk_rq_pos(cmd->rq);
1265 nr_sectors = blk_rq_sectors(cmd->rq);
1266 }
1267
1268 if (op == REQ_OP_WRITE)
1269 null_zone_write(cmd, sector, nr_sectors);
1270 else if (op == REQ_OP_ZONE_RESET)
1271 null_zone_reset(cmd, sector);
1241 } 1272 }
1242out: 1273out:
1243 /* Complete IO by inline, softirq or timer */ 1274 /* Complete IO by inline, softirq or timer */
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index a979ca00d7be..7c6b86d98700 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
48 kvfree(dev->zones); 48 kvfree(dev->zones);
49} 49}
50 50
51static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, 51static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
52 unsigned int zno, unsigned int nr_zones) 52 unsigned int zno, unsigned int nr_zones)
53{ 53{
54 struct blk_zone_report_hdr *hdr = NULL; 54 struct blk_zone_report_hdr *hdr = NULL;
55 struct bio_vec bvec; 55 struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
57 void *addr; 57 void *addr;
58 unsigned int zones_to_cpy; 58 unsigned int zones_to_cpy;
59 59
60 bio_for_each_segment(bvec, rq->bio, iter) { 60 bio_for_each_segment(bvec, bio, iter) {
61 addr = kmap_atomic(bvec.bv_page); 61 addr = kmap_atomic(bvec.bv_page);
62 62
63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); 63 zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
84 } 84 }
85} 85}
86 86
87blk_status_t null_zone_report(struct nullb *nullb, 87blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
88 struct nullb_cmd *cmd)
89{ 88{
90 struct nullb_device *dev = nullb->dev; 89 struct nullb_device *dev = nullb->dev;
91 struct request *rq = cmd->rq; 90 unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
92 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
93 unsigned int nr_zones = dev->nr_zones - zno; 91 unsigned int nr_zones = dev->nr_zones - zno;
94 unsigned int max_zones = (blk_rq_bytes(rq) / 92 unsigned int max_zones;
95 sizeof(struct blk_zone)) - 1;
96 93
94 max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
97 nr_zones = min_t(unsigned int, nr_zones, max_zones); 95 nr_zones = min_t(unsigned int, nr_zones, max_zones);
98 96 null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
99 null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
100 97
101 return BLK_STS_OK; 98 return BLK_STS_OK;
102} 99}
103 100
104void null_zone_write(struct nullb_cmd *cmd) 101void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
102 unsigned int nr_sectors)
105{ 103{
106 struct nullb_device *dev = cmd->nq->dev; 104 struct nullb_device *dev = cmd->nq->dev;
107 struct request *rq = cmd->rq;
108 sector_t sector = blk_rq_pos(rq);
109 unsigned int rq_sectors = blk_rq_sectors(rq);
110 unsigned int zno = null_zone_no(dev, sector); 105 unsigned int zno = null_zone_no(dev, sector);
111 struct blk_zone *zone = &dev->zones[zno]; 106 struct blk_zone *zone = &dev->zones[zno];
112 107
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
118 case BLK_ZONE_COND_EMPTY: 113 case BLK_ZONE_COND_EMPTY:
119 case BLK_ZONE_COND_IMP_OPEN: 114 case BLK_ZONE_COND_IMP_OPEN:
120 /* Writes must be at the write pointer position */ 115 /* Writes must be at the write pointer position */
121 if (blk_rq_pos(rq) != zone->wp) { 116 if (sector != zone->wp) {
122 cmd->error = BLK_STS_IOERR; 117 cmd->error = BLK_STS_IOERR;
123 break; 118 break;
124 } 119 }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
126 if (zone->cond == BLK_ZONE_COND_EMPTY) 121 if (zone->cond == BLK_ZONE_COND_EMPTY)
127 zone->cond = BLK_ZONE_COND_IMP_OPEN; 122 zone->cond = BLK_ZONE_COND_IMP_OPEN;
128 123
129 zone->wp += rq_sectors; 124 zone->wp += nr_sectors;
130 if (zone->wp == zone->start + zone->len) 125 if (zone->wp == zone->start + zone->len)
131 zone->cond = BLK_ZONE_COND_FULL; 126 zone->cond = BLK_ZONE_COND_FULL;
132 break; 127 break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
137 } 132 }
138} 133}
139 134
140void null_zone_reset(struct nullb_cmd *cmd) 135void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
141{ 136{
142 struct nullb_device *dev = cmd->nq->dev; 137 struct nullb_device *dev = cmd->nq->dev;
143 struct request *rq = cmd->rq; 138 unsigned int zno = null_zone_no(dev, sector);
144 unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
145 struct blk_zone *zone = &dev->zones[zno]; 139 struct blk_zone *zone = &dev->zones[zno];
146 140
147 zone->cond = BLK_ZONE_COND_EMPTY; 141 zone->cond = BLK_ZONE_COND_EMPTY;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7915f3b03736..73ed5f3a862d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
4207 4207
4208 count += sprintf(&buf[count], "%s" 4208 count += sprintf(&buf[count], "%s"
4209 "pool_id %llu\npool_name %s\n" 4209 "pool_id %llu\npool_name %s\n"
4210 "pool_ns %s\n"
4210 "image_id %s\nimage_name %s\n" 4211 "image_id %s\nimage_name %s\n"
4211 "snap_id %llu\nsnap_name %s\n" 4212 "snap_id %llu\nsnap_name %s\n"
4212 "overlap %llu\n", 4213 "overlap %llu\n",
4213 !count ? "" : "\n", /* first? */ 4214 !count ? "" : "\n", /* first? */
4214 spec->pool_id, spec->pool_name, 4215 spec->pool_id, spec->pool_name,
4216 spec->pool_ns ?: "",
4215 spec->image_id, spec->image_name ?: "(unknown)", 4217 spec->image_id, spec->image_name ?: "(unknown)",
4216 spec->snap_id, spec->snap_name, 4218 spec->snap_id, spec->snap_name,
4217 rbd_dev->parent_overlap); 4219 rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4584 &rbd_dev->header.features); 4586 &rbd_dev->header.features);
4585} 4587}
4586 4588
4589struct parent_image_info {
4590 u64 pool_id;
4591 const char *pool_ns;
4592 const char *image_id;
4593 u64 snap_id;
4594
4595 bool has_overlap;
4596 u64 overlap;
4597};
4598
4599/*
4600 * The caller is responsible for @pii.
4601 */
4602static int decode_parent_image_spec(void **p, void *end,
4603 struct parent_image_info *pii)
4604{
4605 u8 struct_v;
4606 u32 struct_len;
4607 int ret;
4608
4609 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4610 &struct_v, &struct_len);
4611 if (ret)
4612 return ret;
4613
4614 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4615 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4616 if (IS_ERR(pii->pool_ns)) {
4617 ret = PTR_ERR(pii->pool_ns);
4618 pii->pool_ns = NULL;
4619 return ret;
4620 }
4621 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4622 if (IS_ERR(pii->image_id)) {
4623 ret = PTR_ERR(pii->image_id);
4624 pii->image_id = NULL;
4625 return ret;
4626 }
4627 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4628 return 0;
4629
4630e_inval:
4631 return -EINVAL;
4632}
4633
4634static int __get_parent_info(struct rbd_device *rbd_dev,
4635 struct page *req_page,
4636 struct page *reply_page,
4637 struct parent_image_info *pii)
4638{
4639 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4640 size_t reply_len = PAGE_SIZE;
4641 void *p, *end;
4642 int ret;
4643
4644 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4645 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4646 req_page, sizeof(u64), reply_page, &reply_len);
4647 if (ret)
4648 return ret == -EOPNOTSUPP ? 1 : ret;
4649
4650 p = page_address(reply_page);
4651 end = p + reply_len;
4652 ret = decode_parent_image_spec(&p, end, pii);
4653 if (ret)
4654 return ret;
4655
4656 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4657 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4658 req_page, sizeof(u64), reply_page, &reply_len);
4659 if (ret)
4660 return ret;
4661
4662 p = page_address(reply_page);
4663 end = p + reply_len;
4664 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4665 if (pii->has_overlap)
4666 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4667
4668 return 0;
4669
4670e_inval:
4671 return -EINVAL;
4672}
4673
4674/*
4675 * The caller is responsible for @pii.
4676 */
4677static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4678 struct page *req_page,
4679 struct page *reply_page,
4680 struct parent_image_info *pii)
4681{
4682 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4683 size_t reply_len = PAGE_SIZE;
4684 void *p, *end;
4685 int ret;
4686
4687 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4688 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4689 req_page, sizeof(u64), reply_page, &reply_len);
4690 if (ret)
4691 return ret;
4692
4693 p = page_address(reply_page);
4694 end = p + reply_len;
4695 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4696 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4697 if (IS_ERR(pii->image_id)) {
4698 ret = PTR_ERR(pii->image_id);
4699 pii->image_id = NULL;
4700 return ret;
4701 }
4702 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4703 pii->has_overlap = true;
4704 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4705
4706 return 0;
4707
4708e_inval:
4709 return -EINVAL;
4710}
4711
4712static int get_parent_info(struct rbd_device *rbd_dev,
4713 struct parent_image_info *pii)
4714{
4715 struct page *req_page, *reply_page;
4716 void *p;
4717 int ret;
4718
4719 req_page = alloc_page(GFP_KERNEL);
4720 if (!req_page)
4721 return -ENOMEM;
4722
4723 reply_page = alloc_page(GFP_KERNEL);
4724 if (!reply_page) {
4725 __free_page(req_page);
4726 return -ENOMEM;
4727 }
4728
4729 p = page_address(req_page);
4730 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4731 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4732 if (ret > 0)
4733 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4734 pii);
4735
4736 __free_page(req_page);
4737 __free_page(reply_page);
4738 return ret;
4739}
4740
4587static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) 4741static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4588{ 4742{
4589 struct rbd_spec *parent_spec; 4743 struct rbd_spec *parent_spec;
4590 size_t size; 4744 struct parent_image_info pii = { 0 };
4591 void *reply_buf = NULL;
4592 __le64 snapid;
4593 void *p;
4594 void *end;
4595 u64 pool_id;
4596 char *image_id;
4597 u64 snap_id;
4598 u64 overlap;
4599 int ret; 4745 int ret;
4600 4746
4601 parent_spec = rbd_spec_alloc(); 4747 parent_spec = rbd_spec_alloc();
4602 if (!parent_spec) 4748 if (!parent_spec)
4603 return -ENOMEM; 4749 return -ENOMEM;
4604 4750
4605 size = sizeof (__le64) + /* pool_id */ 4751 ret = get_parent_info(rbd_dev, &pii);
4606 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */ 4752 if (ret)
4607 sizeof (__le64) + /* snap_id */
4608 sizeof (__le64); /* overlap */
4609 reply_buf = kmalloc(size, GFP_KERNEL);
4610 if (!reply_buf) {
4611 ret = -ENOMEM;
4612 goto out_err; 4753 goto out_err;
4613 }
4614 4754
4615 snapid = cpu_to_le64(rbd_dev->spec->snap_id); 4755 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4616 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid, 4756 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4617 &rbd_dev->header_oloc, "get_parent", 4757 pii.has_overlap, pii.overlap);
4618 &snapid, sizeof(snapid), reply_buf, size);
4619 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4620 if (ret < 0)
4621 goto out_err;
4622 4758
4623 p = reply_buf; 4759 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4624 end = reply_buf + ret;
4625 ret = -ERANGE;
4626 ceph_decode_64_safe(&p, end, pool_id, out_err);
4627 if (pool_id == CEPH_NOPOOL) {
4628 /* 4760 /*
4629 * Either the parent never existed, or we have 4761 * Either the parent never existed, or we have
4630 * record of it but the image got flattened so it no 4762 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4633 * overlap to 0. The effect of this is that all new 4765 * overlap to 0. The effect of this is that all new
4634 * requests will be treated as if the image had no 4766 * requests will be treated as if the image had no
4635 * parent. 4767 * parent.
4768 *
4769 * If !pii.has_overlap, the parent image spec is not
4770 * applicable. It's there to avoid duplication in each
4771 * snapshot record.
4636 */ 4772 */
4637 if (rbd_dev->parent_overlap) { 4773 if (rbd_dev->parent_overlap) {
4638 rbd_dev->parent_overlap = 0; 4774 rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4647 /* The ceph file layout needs to fit pool id in 32 bits */ 4783 /* The ceph file layout needs to fit pool id in 32 bits */
4648 4784
4649 ret = -EIO; 4785 ret = -EIO;
4650 if (pool_id > (u64)U32_MAX) { 4786 if (pii.pool_id > (u64)U32_MAX) {
4651 rbd_warn(NULL, "parent pool id too large (%llu > %u)", 4787 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4652 (unsigned long long)pool_id, U32_MAX); 4788 (unsigned long long)pii.pool_id, U32_MAX);
4653 goto out_err; 4789 goto out_err;
4654 } 4790 }
4655 4791
4656 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4657 if (IS_ERR(image_id)) {
4658 ret = PTR_ERR(image_id);
4659 goto out_err;
4660 }
4661 ceph_decode_64_safe(&p, end, snap_id, out_err);
4662 ceph_decode_64_safe(&p, end, overlap, out_err);
4663
4664 /* 4792 /*
4665 * The parent won't change (except when the clone is 4793 * The parent won't change (except when the clone is
4666 * flattened, already handled that). So we only need to 4794 * flattened, already handled that). So we only need to
4667 * record the parent spec we have not already done so. 4795 * record the parent spec we have not already done so.
4668 */ 4796 */
4669 if (!rbd_dev->parent_spec) { 4797 if (!rbd_dev->parent_spec) {
4670 parent_spec->pool_id = pool_id; 4798 parent_spec->pool_id = pii.pool_id;
4671 parent_spec->image_id = image_id; 4799 if (pii.pool_ns && *pii.pool_ns) {
4672 parent_spec->snap_id = snap_id; 4800 parent_spec->pool_ns = pii.pool_ns;
4673 4801 pii.pool_ns = NULL;
4674 /* TODO: support cloning across namespaces */
4675 if (rbd_dev->spec->pool_ns) {
4676 parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
4677 GFP_KERNEL);
4678 if (!parent_spec->pool_ns) {
4679 ret = -ENOMEM;
4680 goto out_err;
4681 }
4682 } 4802 }
4803 parent_spec->image_id = pii.image_id;
4804 pii.image_id = NULL;
4805 parent_spec->snap_id = pii.snap_id;
4683 4806
4684 rbd_dev->parent_spec = parent_spec; 4807 rbd_dev->parent_spec = parent_spec;
4685 parent_spec = NULL; /* rbd_dev now owns this */ 4808 parent_spec = NULL; /* rbd_dev now owns this */
4686 } else {
4687 kfree(image_id);
4688 } 4809 }
4689 4810
4690 /* 4811 /*
4691 * We always update the parent overlap. If it's zero we issue 4812 * We always update the parent overlap. If it's zero we issue
4692 * a warning, as we will proceed as if there was no parent. 4813 * a warning, as we will proceed as if there was no parent.
4693 */ 4814 */
4694 if (!overlap) { 4815 if (!pii.overlap) {
4695 if (parent_spec) { 4816 if (parent_spec) {
4696 /* refresh, careful to warn just once */ 4817 /* refresh, careful to warn just once */
4697 if (rbd_dev->parent_overlap) 4818 if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4702 rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); 4823 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4703 } 4824 }
4704 } 4825 }
4705 rbd_dev->parent_overlap = overlap; 4826 rbd_dev->parent_overlap = pii.overlap;
4706 4827
4707out: 4828out:
4708 ret = 0; 4829 ret = 0;
4709out_err: 4830out_err:
4710 kfree(reply_buf); 4831 kfree(pii.pool_ns);
4832 kfree(pii.image_id);
4711 rbd_spec_put(parent_spec); 4833 rbd_spec_put(parent_spec);
4712
4713 return ret; 4834 return ret;
4714} 4835}
4715 4836
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index b55b245e8052..fd1e19f1a49f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently"); 84 "Maximum number of grants to map persistently");
85 85
86/* 86/*
87 * How long a persistent grant is allowed to remain allocated without being in
88 * use. The time is in seconds, 0 means indefinitely long.
89 */
90
91static unsigned int xen_blkif_pgrant_timeout = 60;
92module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
93 uint, 0644);
94MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 "Time in seconds an unused persistent grant is allowed to "
96 "remain allocated. Default is 60, 0 means unlimited.");
97
98/*
87 * Maximum number of rings/queues blkback supports, allow as many queues as there 99 * Maximum number of rings/queues blkback supports, allow as many queues as there
88 * are CPUs if user has not specified a value. 100 * are CPUs if user has not specified a value.
89 */ 101 */
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
123/* Number of free pages to remove on each call to gnttab_free_pages */ 135/* Number of free pages to remove on each call to gnttab_free_pages */
124#define NUM_BATCH_FREE_PAGES 10 136#define NUM_BATCH_FREE_PAGES 10
125 137
138static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139{
140 return xen_blkif_pgrant_timeout &&
141 (jiffies - persistent_gnt->last_used >=
142 HZ * xen_blkif_pgrant_timeout);
143}
144
126static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page) 145static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
127{ 146{
128 unsigned long flags; 147 unsigned long flags;
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
236 } 255 }
237 } 256 }
238 257
239 bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); 258 persistent_gnt->active = true;
240 set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
241 /* Add new node and rebalance tree. */ 259 /* Add new node and rebalance tree. */
242 rb_link_node(&(persistent_gnt->node), parent, new); 260 rb_link_node(&(persistent_gnt->node), parent, new);
243 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts); 261 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
261 else if (gref > data->gnt) 279 else if (gref > data->gnt)
262 node = node->rb_right; 280 node = node->rb_right;
263 else { 281 else {
264 if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { 282 if (data->active) {
265 pr_alert_ratelimited("requesting a grant already in use\n"); 283 pr_alert_ratelimited("requesting a grant already in use\n");
266 return NULL; 284 return NULL;
267 } 285 }
268 set_bit(PERSISTENT_GNT_ACTIVE, data->flags); 286 data->active = true;
269 atomic_inc(&ring->persistent_gnt_in_use); 287 atomic_inc(&ring->persistent_gnt_in_use);
270 return data; 288 return data;
271 } 289 }
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
276static void put_persistent_gnt(struct xen_blkif_ring *ring, 294static void put_persistent_gnt(struct xen_blkif_ring *ring,
277 struct persistent_gnt *persistent_gnt) 295 struct persistent_gnt *persistent_gnt)
278{ 296{
279 if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) 297 if (!persistent_gnt->active)
280 pr_alert_ratelimited("freeing a grant already unused\n"); 298 pr_alert_ratelimited("freeing a grant already unused\n");
281 set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); 299 persistent_gnt->last_used = jiffies;
282 clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); 300 persistent_gnt->active = false;
283 atomic_dec(&ring->persistent_gnt_in_use); 301 atomic_dec(&ring->persistent_gnt_in_use);
284} 302}
285 303
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
371 struct persistent_gnt *persistent_gnt; 389 struct persistent_gnt *persistent_gnt;
372 struct rb_node *n; 390 struct rb_node *n;
373 unsigned int num_clean, total; 391 unsigned int num_clean, total;
374 bool scan_used = false, clean_used = false; 392 bool scan_used = false;
375 struct rb_root *root; 393 struct rb_root *root;
376 394
377 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
378 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
379 !ring->blkif->vbd.overflow_max_grants)) {
380 goto out;
381 }
382
383 if (work_busy(&ring->persistent_purge_work)) { 395 if (work_busy(&ring->persistent_purge_work)) {
384 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); 396 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
385 goto out; 397 goto out;
386 } 398 }
387 399
388 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; 400 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
389 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; 401 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
390 num_clean = min(ring->persistent_gnt_c, num_clean); 402 !ring->blkif->vbd.overflow_max_grants)) {
391 if ((num_clean == 0) || 403 num_clean = 0;
392 (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use)))) 404 } else {
393 goto out; 405 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
407 num_clean;
408 num_clean = min(ring->persistent_gnt_c, num_clean);
409 pr_debug("Going to purge at least %u persistent grants\n",
410 num_clean);
411 }
394 412
395 /* 413 /*
396 * At this point, we can assure that there will be no calls 414 * At this point, we can assure that there will be no calls
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
401 * number of grants. 419 * number of grants.
402 */ 420 */
403 421
404 total = num_clean; 422 total = 0;
405
406 pr_debug("Going to purge %u persistent grants\n", num_clean);
407 423
408 BUG_ON(!list_empty(&ring->persistent_purge_list)); 424 BUG_ON(!list_empty(&ring->persistent_purge_list));
409 root = &ring->persistent_gnts; 425 root = &ring->persistent_gnts;
@@ -412,46 +428,37 @@ purge_list:
412 BUG_ON(persistent_gnt->handle == 428 BUG_ON(persistent_gnt->handle ==
413 BLKBACK_INVALID_HANDLE); 429 BLKBACK_INVALID_HANDLE);
414 430
415 if (clean_used) { 431 if (persistent_gnt->active)
416 clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
417 continue; 432 continue;
418 } 433 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
419
420 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
421 continue; 434 continue;
422 if (!scan_used && 435 if (scan_used && total >= num_clean)
423 (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
424 continue; 436 continue;
425 437
426 rb_erase(&persistent_gnt->node, root); 438 rb_erase(&persistent_gnt->node, root);
427 list_add(&persistent_gnt->remove_node, 439 list_add(&persistent_gnt->remove_node,
428 &ring->persistent_purge_list); 440 &ring->persistent_purge_list);
429 if (--num_clean == 0) 441 total++;
430 goto finished;
431 } 442 }
432 /* 443 /*
433 * If we get here it means we also need to start cleaning 444 * Check whether we also need to start cleaning
434 * grants that were used since last purge in order to cope 445 * grants that were used since last purge in order to cope
435 * with the requested num 446 * with the requested num
436 */ 447 */
437 if (!scan_used && !clean_used) { 448 if (!scan_used && total < num_clean) {
438 pr_debug("Still missing %u purged frames\n", num_clean); 449 pr_debug("Still missing %u purged frames\n", num_clean - total);
439 scan_used = true; 450 scan_used = true;
440 goto purge_list; 451 goto purge_list;
441 } 452 }
442finished:
443 if (!clean_used) {
444 pr_debug("Finished scanning for grants to clean, removing used flag\n");
445 clean_used = true;
446 goto purge_list;
447 }
448 453
449 ring->persistent_gnt_c -= (total - num_clean); 454 if (total) {
450 ring->blkif->vbd.overflow_max_grants = 0; 455 ring->persistent_gnt_c -= total;
456 ring->blkif->vbd.overflow_max_grants = 0;
451 457
452 /* We can defer this work */ 458 /* We can defer this work */
453 schedule_work(&ring->persistent_purge_work); 459 schedule_work(&ring->persistent_purge_work);
454 pr_debug("Purged %u/%u\n", (total - num_clean), total); 460 pr_debug("Purged %u/%u\n", num_clean, total);
461 }
455 462
456out: 463out:
457 return; 464 return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index ecb35fe8ca8d..1d3002d773f7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -233,16 +233,6 @@ struct xen_vbd {
233 233
234struct backend_info; 234struct backend_info;
235 235
236/* Number of available flags */
237#define PERSISTENT_GNT_FLAGS_SIZE 2
238/* This persistent grant is currently in use */
239#define PERSISTENT_GNT_ACTIVE 0
240/*
241 * This persistent grant has been used, this flag is set when we remove the
242 * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
243 */
244#define PERSISTENT_GNT_WAS_ACTIVE 1
245
246/* Number of requests that we can fit in a ring */ 236/* Number of requests that we can fit in a ring */
247#define XEN_BLKIF_REQS_PER_PAGE 32 237#define XEN_BLKIF_REQS_PER_PAGE 32
248 238
@@ -250,7 +240,8 @@ struct persistent_gnt {
250 struct page *page; 240 struct page *page;
251 grant_ref_t gnt; 241 grant_ref_t gnt;
252 grant_handle_t handle; 242 grant_handle_t handle;
253 DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); 243 unsigned long last_used;
244 bool active;
254 struct rb_node node; 245 struct rb_node node;
255 struct list_head remove_node; 246 struct list_head remove_node;
256}; 247};
@@ -278,7 +269,6 @@ struct xen_blkif_ring {
278 wait_queue_head_t pending_free_wq; 269 wait_queue_head_t pending_free_wq;
279 270
280 /* Tree to store persistent grants. */ 271 /* Tree to store persistent grants. */
281 spinlock_t pers_gnts_lock;
282 struct rb_root persistent_gnts; 272 struct rb_root persistent_gnts;
283 unsigned int persistent_gnt_c; 273 unsigned int persistent_gnt_c;
284 atomic_t persistent_gnt_in_use; 274 atomic_t persistent_gnt_in_use;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8986adab9bf5..429d20131c7e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -46,6 +46,7 @@
46#include <linux/scatterlist.h> 46#include <linux/scatterlist.h>
47#include <linux/bitmap.h> 47#include <linux/bitmap.h>
48#include <linux/list.h> 48#include <linux/list.h>
49#include <linux/workqueue.h>
49 50
50#include <xen/xen.h> 51#include <xen/xen.h>
51#include <xen/xenbus.h> 52#include <xen/xenbus.h>
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
121 122
122static DEFINE_MUTEX(blkfront_mutex); 123static DEFINE_MUTEX(blkfront_mutex);
123static const struct block_device_operations xlvbd_block_fops; 124static const struct block_device_operations xlvbd_block_fops;
125static struct delayed_work blkfront_work;
126static LIST_HEAD(info_list);
124 127
125/* 128/*
126 * Maximum number of segments in indirect requests, the actual value used by 129 * Maximum number of segments in indirect requests, the actual value used by
@@ -216,6 +219,7 @@ struct blkfront_info
216 /* Save uncomplete reqs and bios for migration. */ 219 /* Save uncomplete reqs and bios for migration. */
217 struct list_head requests; 220 struct list_head requests;
218 struct bio_list bio_list; 221 struct bio_list bio_list;
222 struct list_head info_list;
219}; 223};
220 224
221static unsigned int nr_minors; 225static unsigned int nr_minors;
@@ -1759,6 +1763,12 @@ abort_transaction:
1759 return err; 1763 return err;
1760} 1764}
1761 1765
1766static void free_info(struct blkfront_info *info)
1767{
1768 list_del(&info->info_list);
1769 kfree(info);
1770}
1771
1762/* Common code used when first setting up, and when resuming. */ 1772/* Common code used when first setting up, and when resuming. */
1763static int talk_to_blkback(struct xenbus_device *dev, 1773static int talk_to_blkback(struct xenbus_device *dev,
1764 struct blkfront_info *info) 1774 struct blkfront_info *info)
@@ -1880,7 +1890,10 @@ again:
1880 destroy_blkring: 1890 destroy_blkring:
1881 blkif_free(info, 0); 1891 blkif_free(info, 0);
1882 1892
1883 kfree(info); 1893 mutex_lock(&blkfront_mutex);
1894 free_info(info);
1895 mutex_unlock(&blkfront_mutex);
1896
1884 dev_set_drvdata(&dev->dev, NULL); 1897 dev_set_drvdata(&dev->dev, NULL);
1885 1898
1886 return err; 1899 return err;
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1991 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 2004 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1992 dev_set_drvdata(&dev->dev, info); 2005 dev_set_drvdata(&dev->dev, info);
1993 2006
2007 mutex_lock(&blkfront_mutex);
2008 list_add(&info->info_list, &info_list);
2009 mutex_unlock(&blkfront_mutex);
2010
1994 return 0; 2011 return 0;
1995} 2012}
1996 2013
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
2301 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST) 2318 if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2302 indirect_segments = 0; 2319 indirect_segments = 0;
2303 info->max_indirect_segments = indirect_segments; 2320 info->max_indirect_segments = indirect_segments;
2321
2322 if (info->feature_persistent) {
2323 mutex_lock(&blkfront_mutex);
2324 schedule_delayed_work(&blkfront_work, HZ * 10);
2325 mutex_unlock(&blkfront_mutex);
2326 }
2304} 2327}
2305 2328
2306/* 2329/*
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
2482 mutex_unlock(&info->mutex); 2505 mutex_unlock(&info->mutex);
2483 2506
2484 if (!bdev) { 2507 if (!bdev) {
2485 kfree(info); 2508 mutex_lock(&blkfront_mutex);
2509 free_info(info);
2510 mutex_unlock(&blkfront_mutex);
2486 return 0; 2511 return 0;
2487 } 2512 }
2488 2513
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
2502 if (info && !bdev->bd_openers) { 2527 if (info && !bdev->bd_openers) {
2503 xlvbd_release_gendisk(info); 2528 xlvbd_release_gendisk(info);
2504 disk->private_data = NULL; 2529 disk->private_data = NULL;
2505 kfree(info); 2530 mutex_lock(&blkfront_mutex);
2531 free_info(info);
2532 mutex_unlock(&blkfront_mutex);
2506 } 2533 }
2507 2534
2508 mutex_unlock(&bdev->bd_mutex); 2535 mutex_unlock(&bdev->bd_mutex);
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
2585 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 2612 dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2586 xlvbd_release_gendisk(info); 2613 xlvbd_release_gendisk(info);
2587 disk->private_data = NULL; 2614 disk->private_data = NULL;
2588 kfree(info); 2615 free_info(info);
2589 } 2616 }
2590 2617
2591out: 2618out:
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
2618 .is_ready = blkfront_is_ready, 2645 .is_ready = blkfront_is_ready,
2619}; 2646};
2620 2647
2648static void purge_persistent_grants(struct blkfront_info *info)
2649{
2650 unsigned int i;
2651 unsigned long flags;
2652
2653 for (i = 0; i < info->nr_rings; i++) {
2654 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2655 struct grant *gnt_list_entry, *tmp;
2656
2657 spin_lock_irqsave(&rinfo->ring_lock, flags);
2658
2659 if (rinfo->persistent_gnts_c == 0) {
2660 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2661 continue;
2662 }
2663
2664 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2665 node) {
2666 if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2667 gnttab_query_foreign_access(gnt_list_entry->gref))
2668 continue;
2669
2670 list_del(&gnt_list_entry->node);
2671 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
2672 rinfo->persistent_gnts_c--;
2673 gnt_list_entry->gref = GRANT_INVALID_REF;
2674 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2675 }
2676
2677 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2678 }
2679}
2680
2681static void blkfront_delay_work(struct work_struct *work)
2682{
2683 struct blkfront_info *info;
2684 bool need_schedule_work = false;
2685
2686 mutex_lock(&blkfront_mutex);
2687
2688 list_for_each_entry(info, &info_list, info_list) {
2689 if (info->feature_persistent) {
2690 need_schedule_work = true;
2691 mutex_lock(&info->mutex);
2692 purge_persistent_grants(info);
2693 mutex_unlock(&info->mutex);
2694 }
2695 }
2696
2697 if (need_schedule_work)
2698 schedule_delayed_work(&blkfront_work, HZ * 10);
2699
2700 mutex_unlock(&blkfront_mutex);
2701}
2702
2621static int __init xlblk_init(void) 2703static int __init xlblk_init(void)
2622{ 2704{
2623 int ret; 2705 int ret;
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
2626 if (!xen_domain()) 2708 if (!xen_domain())
2627 return -ENODEV; 2709 return -ENODEV;
2628 2710
2711 if (!xen_has_pv_disk_devices())
2712 return -ENODEV;
2713
2714 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2715 pr_warn("xen_blk: can't get major %d with name %s\n",
2716 XENVBD_MAJOR, DEV_NAME);
2717 return -ENODEV;
2718 }
2719
2629 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) 2720 if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2630 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; 2721 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2631 2722
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
2641 xen_blkif_max_queues = nr_cpus; 2732 xen_blkif_max_queues = nr_cpus;
2642 } 2733 }
2643 2734
2644 if (!xen_has_pv_disk_devices()) 2735 INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2645 return -ENODEV;
2646
2647 if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2648 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2649 XENVBD_MAJOR, DEV_NAME);
2650 return -ENODEV;
2651 }
2652 2736
2653 ret = xenbus_register_frontend(&blkfront_driver); 2737 ret = xenbus_register_frontend(&blkfront_driver);
2654 if (ret) { 2738 if (ret) {
@@ -2663,6 +2747,8 @@ module_init(xlblk_init);
2663 2747
2664static void __exit xlblk_exit(void) 2748static void __exit xlblk_exit(void)
2665{ 2749{
2750 cancel_delayed_work_sync(&blkfront_work);
2751
2666 xenbus_unregister_driver(&blkfront_driver); 2752 xenbus_unregister_driver(&blkfront_driver);
2667 unregister_blkdev(XENVBD_MAJOR, DEV_NAME); 2753 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2668 kfree(minors); 2754 kfree(minors);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2df11cc08a46..845b0314ce3a 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
200 depends on BT_HCIUART 200 depends on BT_HCIUART
201 depends on BT_HCIUART_SERDEV 201 depends on BT_HCIUART_SERDEV
202 depends on GPIOLIB 202 depends on GPIOLIB
203 depends on ACPI
203 select BT_HCIUART_3WIRE 204 select BT_HCIUART_3WIRE
204 select BT_RTL 205 select BT_RTL
205 help 206 help
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index ed2a5c7cb77f..4593baff2bc9 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
144 fw_size = fw->size; 144 fw_size = fw->size;
145 145
146 /* The size of patch header is 30 bytes, should be skip */ 146 /* The size of patch header is 30 bytes, should be skip */
147 if (fw_size < 30) 147 if (fw_size < 30) {
148 return -EINVAL; 148 err = -EINVAL;
149 goto free_fw;
150 }
149 151
150 fw_size -= 30; 152 fw_size -= 30;
151 fw_ptr += 30; 153 fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
172 fw_ptr += dlen; 174 fw_ptr += dlen;
173 } 175 }
174 176
177free_fw:
175 release_firmware(fw); 178 release_firmware(fw);
176
177 return err; 179 return err;
178} 180}
179 181
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 963bb0309e25..ea6238ed5c0e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
543 } 543 }
544 clear_bit(HCI_UART_PROTO_SET, &hu->flags); 544 clear_bit(HCI_UART_PROTO_SET, &hu->flags);
545 545
546 percpu_free_rwsem(&hu->proto_lock);
547
546 kfree(hu); 548 kfree(hu);
547} 549}
548 550
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index c9bac9dc4637..e4fe954e63a9 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
498 498
499/** 499/**
500 * syc_ioremap - ioremap register space for the interconnect target module 500 * syc_ioremap - ioremap register space for the interconnect target module
501 * @ddata: deviec driver data 501 * @ddata: device driver data
502 * 502 *
503 * Note that the interconnect target module registers can be anywhere 503 * Note that the interconnect target module registers can be anywhere
504 * within the first child device address space. For example, SGX has 504 * within the interconnect target module range. For example, SGX has
505 * them at offset 0x1fc00 in the 32MB module address space. We just 505 * them at offset 0x1fc00 in the 32MB module address space. And cpsw
506 * what we need around the interconnect target module registers. 506 * has them at offset 0x1200 in the CPSW_WR child. Usually the
507 * the interconnect target module registers are at the beginning of
508 * the module range though.
507 */ 509 */
508static int sysc_ioremap(struct sysc *ddata) 510static int sysc_ioremap(struct sysc *ddata)
509{ 511{
510 u32 size = 0; 512 int size;
511
512 if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
513 size = ddata->offsets[SYSC_SYSSTATUS];
514 else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
515 size = ddata->offsets[SYSC_SYSCONFIG];
516 else if (ddata->offsets[SYSC_REVISION] >= 0)
517 size = ddata->offsets[SYSC_REVISION];
518 else
519 return -EINVAL;
520 513
521 size &= 0xfff00; 514 size = max3(ddata->offsets[SYSC_REVISION],
522 size += SZ_256; 515 ddata->offsets[SYSC_SYSCONFIG],
516 ddata->offsets[SYSC_SYSSTATUS]);
517
518 if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
519 return -EINVAL;
523 520
524 ddata->module_va = devm_ioremap(ddata->dev, 521 ddata->module_va = devm_ioremap(ddata->dev,
525 ddata->module_pa, 522 ddata->module_pa,
526 size); 523 size + sizeof(u32));
527 if (!ddata->module_va) 524 if (!ddata->module_va)
528 return -EIO; 525 return -EIO;
529 526
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
1224 if (!pm_runtime_status_suspended(dev)) { 1221 if (!pm_runtime_status_suspended(dev)) {
1225 error = pm_generic_runtime_suspend(dev); 1222 error = pm_generic_runtime_suspend(dev);
1226 if (error) { 1223 if (error) {
1227 dev_err(dev, "%s error at %i: %i\n", 1224 dev_warn(dev, "%s busy at %i: %i\n",
1228 __func__, __LINE__, error); 1225 __func__, __LINE__, error);
1229 1226
1230 return error; 1227 return 0;
1231 } 1228 }
1232 1229
1233 error = sysc_runtime_suspend(ddata->dev); 1230 error = sysc_runtime_suspend(ddata->dev);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 113fc6edb2b0..a5d5a96479bf 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
2546 if (!CDROM_CAN(CDC_SELECT_DISC) || 2546 if (!CDROM_CAN(CDC_SELECT_DISC) ||
2547 (arg == CDSL_CURRENT || arg == CDSL_NONE)) 2547 (arg == CDSL_CURRENT || arg == CDSL_NONE))
2548 return cdi->ops->drive_status(cdi, CDSL_CURRENT); 2548 return cdi->ops->drive_status(cdi, CDSL_CURRENT);
2549 if (((int)arg >= cdi->capacity)) 2549 if (arg >= cdi->capacity)
2550 return -EINVAL; 2550 return -EINVAL;
2551 return cdrom_slot_status(cdi, arg); 2551 return cdrom_slot_status(cdi, arg);
2552} 2552}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ce277ee0a28a..40728491f37b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
566 that CPU manufacturer (perhaps with the insistence or mandate 566 that CPU manufacturer (perhaps with the insistence or mandate
567 of a Nation State's intelligence or law enforcement agencies) 567 of a Nation State's intelligence or law enforcement agencies)
568 has not installed a hidden back door to compromise the CPU's 568 has not installed a hidden back door to compromise the CPU's
569 random number generation facilities. 569 random number generation facilities. This can also be configured
570 570 at boot with "random.trust_cpu=on/off".
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a3397664f800..97d6856c9c0f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -59,8 +59,6 @@ enum bt_states {
59 BT_STATE_RESET3, 59 BT_STATE_RESET3,
60 BT_STATE_RESTART, 60 BT_STATE_RESTART,
61 BT_STATE_PRINTME, 61 BT_STATE_PRINTME,
62 BT_STATE_CAPABILITIES_BEGIN,
63 BT_STATE_CAPABILITIES_END,
64 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 62 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
65}; 63};
66 64
@@ -86,7 +84,6 @@ struct si_sm_data {
86 int error_retries; /* end of "common" fields */ 84 int error_retries; /* end of "common" fields */
87 int nonzero_status; /* hung BMCs stay all 0 */ 85 int nonzero_status; /* hung BMCs stay all 0 */
88 enum bt_states complete; /* to divert the state machine */ 86 enum bt_states complete; /* to divert the state machine */
89 int BT_CAP_outreqs;
90 long BT_CAP_req2rsp; 87 long BT_CAP_req2rsp;
91 int BT_CAP_retries; /* Recommended retries */ 88 int BT_CAP_retries; /* Recommended retries */
92}; 89};
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
137 case BT_STATE_RESET3: return("RESET3"); 134 case BT_STATE_RESET3: return("RESET3");
138 case BT_STATE_RESTART: return("RESTART"); 135 case BT_STATE_RESTART: return("RESTART");
139 case BT_STATE_LONG_BUSY: return("LONG_BUSY"); 136 case BT_STATE_LONG_BUSY: return("LONG_BUSY");
140 case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
141 case BT_STATE_CAPABILITIES_END: return("CAP_END");
142 } 137 }
143 return("BAD STATE"); 138 return("BAD STATE");
144} 139}
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
185 bt->complete = BT_STATE_IDLE; /* end here */ 180 bt->complete = BT_STATE_IDLE; /* end here */
186 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC; 181 bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
187 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; 182 bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
188 /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
189 return 3; /* We claim 3 bytes of space; ought to check SPMI table */ 183 return 3; /* We claim 3 bytes of space; ought to check SPMI table */
190} 184}
191 185
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
451 445
452static enum si_sm_result bt_event(struct si_sm_data *bt, long time) 446static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
453{ 447{
454 unsigned char status, BT_CAP[8]; 448 unsigned char status;
455 static enum bt_states last_printed = BT_STATE_PRINTME; 449 static enum bt_states last_printed = BT_STATE_PRINTME;
456 int i; 450 int i;
457 451
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
504 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ 498 if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
505 BT_CONTROL(BT_H_BUSY); 499 BT_CONTROL(BT_H_BUSY);
506 500
507 bt->timeout = bt->BT_CAP_req2rsp;
508
509 /* Read BT capabilities if it hasn't been done yet */
510 if (!bt->BT_CAP_outreqs)
511 BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
512 SI_SM_CALL_WITHOUT_DELAY);
513 BT_SI_SM_RETURN(SI_SM_IDLE); 501 BT_SI_SM_RETURN(SI_SM_IDLE);
514 502
515 case BT_STATE_XACTION_START: 503 case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
614 BT_STATE_CHANGE(BT_STATE_XACTION_START, 602 BT_STATE_CHANGE(BT_STATE_XACTION_START,
615 SI_SM_CALL_WITH_DELAY); 603 SI_SM_CALL_WITH_DELAY);
616 604
617 /*
618 * Get BT Capabilities, using timing of upper level state machine.
619 * Set outreqs to prevent infinite loop on timeout.
620 */
621 case BT_STATE_CAPABILITIES_BEGIN:
622 bt->BT_CAP_outreqs = 1;
623 {
624 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
625 bt->state = BT_STATE_IDLE;
626 bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
627 }
628 bt->complete = BT_STATE_CAPABILITIES_END;
629 BT_STATE_CHANGE(BT_STATE_XACTION_START,
630 SI_SM_CALL_WITH_DELAY);
631
632 case BT_STATE_CAPABILITIES_END:
633 i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
634 bt_init_data(bt, bt->io);
635 if ((i == 8) && !BT_CAP[2]) {
636 bt->BT_CAP_outreqs = BT_CAP[3];
637 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
638 bt->BT_CAP_retries = BT_CAP[7];
639 } else
640 printk(KERN_WARNING "IPMI BT: using default values\n");
641 if (!bt->BT_CAP_outreqs)
642 bt->BT_CAP_outreqs = 1;
643 printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
644 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
645 bt->timeout = bt->BT_CAP_req2rsp;
646 return SI_SM_CALL_WITHOUT_DELAY;
647
648 default: /* should never occur */ 605 default: /* should never occur */
649 return error_recovery(bt, 606 return error_recovery(bt,
650 status, 607 status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
655 612
656static int bt_detect(struct si_sm_data *bt) 613static int bt_detect(struct si_sm_data *bt)
657{ 614{
615 unsigned char GetBT_CAP[] = { 0x18, 0x36 };
616 unsigned char BT_CAP[8];
617 enum si_sm_result smi_result;
618 int rv;
619
658 /* 620 /*
659 * It's impossible for the BT status and interrupt registers to be 621 * It's impossible for the BT status and interrupt registers to be
660 * all 1's, (assuming a properly functioning, self-initialized BMC) 622 * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
665 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 627 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
666 return 1; 628 return 1;
667 reset_flags(bt); 629 reset_flags(bt);
630
631 /*
632 * Try getting the BT capabilities here.
633 */
634 rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
635 if (rv) {
636 dev_warn(bt->io->dev,
637 "Can't start capabilities transaction: %d\n", rv);
638 goto out_no_bt_cap;
639 }
640
641 smi_result = SI_SM_CALL_WITHOUT_DELAY;
642 for (;;) {
643 if (smi_result == SI_SM_CALL_WITH_DELAY ||
644 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
645 schedule_timeout_uninterruptible(1);
646 smi_result = bt_event(bt, jiffies_to_usecs(1));
647 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
648 smi_result = bt_event(bt, 0);
649 } else
650 break;
651 }
652
653 rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
654 bt_init_data(bt, bt->io);
655 if (rv < 8) {
656 dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
657 goto out_no_bt_cap;
658 }
659
660 if (BT_CAP[2]) {
661 dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
662out_no_bt_cap:
663 dev_warn(bt->io->dev, "using default values\n");
664 } else {
665 bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
666 bt->BT_CAP_retries = BT_CAP[7];
667 }
668
669 dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
670 bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
671
668 return 0; 672 return 0;
669} 673}
670 674
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 51832b8a2c62..7fc9612070a1 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3381 3381
3382 rv = handlers->start_processing(send_info, intf); 3382 rv = handlers->start_processing(send_info, intf);
3383 if (rv) 3383 if (rv)
3384 goto out; 3384 goto out_err;
3385 3385
3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i); 3386 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3387 if (rv) { 3387 if (rv) {
3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv); 3388 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3389 goto out; 3389 goto out_err_started;
3390 } 3390 }
3391 3391
3392 mutex_lock(&intf->bmc_reg_mutex); 3392 mutex_lock(&intf->bmc_reg_mutex);
3393 rv = __scan_channels(intf, &id); 3393 rv = __scan_channels(intf, &id);
3394 mutex_unlock(&intf->bmc_reg_mutex); 3394 mutex_unlock(&intf->bmc_reg_mutex);
3395 if (rv)
3396 goto out_err_bmc_reg;
3395 3397
3396 out: 3398 /*
3397 if (rv) { 3399 * Keep memory order straight for RCU readers. Make
3398 ipmi_bmc_unregister(intf); 3400 * sure everything else is committed to memory before
3399 list_del_rcu(&intf->link); 3401 * setting intf_num to mark the interface valid.
3400 mutex_unlock(&ipmi_interfaces_mutex); 3402 */
3401 synchronize_srcu(&ipmi_interfaces_srcu); 3403 smp_wmb();
3402 cleanup_srcu_struct(&intf->users_srcu); 3404 intf->intf_num = i;
3403 kref_put(&intf->refcount, intf_free); 3405 mutex_unlock(&ipmi_interfaces_mutex);
3404 } else {
3405 /*
3406 * Keep memory order straight for RCU readers. Make
3407 * sure everything else is committed to memory before
3408 * setting intf_num to mark the interface valid.
3409 */
3410 smp_wmb();
3411 intf->intf_num = i;
3412 mutex_unlock(&ipmi_interfaces_mutex);
3413 3406
3414 /* After this point the interface is legal to use. */ 3407 /* After this point the interface is legal to use. */
3415 call_smi_watchers(i, intf->si_dev); 3408 call_smi_watchers(i, intf->si_dev);
3416 } 3409
3410 return 0;
3411
3412 out_err_bmc_reg:
3413 ipmi_bmc_unregister(intf);
3414 out_err_started:
3415 if (intf->handlers->shutdown)
3416 intf->handlers->shutdown(intf->send_info);
3417 out_err:
3418 list_del_rcu(&intf->link);
3419 mutex_unlock(&ipmi_interfaces_mutex);
3420 synchronize_srcu(&ipmi_interfaces_srcu);
3421 cleanup_srcu_struct(&intf->users_srcu);
3422 kref_put(&intf->refcount, intf_free);
3417 3423
3418 return rv; 3424 return rv;
3419} 3425}
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
3504 } 3510 }
3505 srcu_read_unlock(&intf->users_srcu, index); 3511 srcu_read_unlock(&intf->users_srcu, index);
3506 3512
3507 intf->handlers->shutdown(intf->send_info); 3513 if (intf->handlers->shutdown)
3514 intf->handlers->shutdown(intf->send_info);
3508 3515
3509 cleanup_smi_msgs(intf); 3516 cleanup_smi_msgs(intf);
3510 3517
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 90ec010bffbd..5faa917df1b6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
2083 si_to_str[new_smi->io.si_type]); 2083 si_to_str[new_smi->io.si_type]);
2084 2084
2085 WARN_ON(new_smi->io.dev->init_name != NULL); 2085 WARN_ON(new_smi->io.dev->init_name != NULL);
2086 kfree(init_name);
2087
2088 return 0;
2089
2090out_err:
2091 if (new_smi->intf) {
2092 ipmi_unregister_smi(new_smi->intf);
2093 new_smi->intf = NULL;
2094 }
2095 2086
2087 out_err:
2096 kfree(init_name); 2088 kfree(init_name);
2097
2098 return rv; 2089 return rv;
2099} 2090}
2100 2091
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
2227 2218
2228 kfree(smi_info->si_sm); 2219 kfree(smi_info->si_sm);
2229 smi_info->si_sm = NULL; 2220 smi_info->si_sm = NULL;
2221
2222 smi_info->intf = NULL;
2230} 2223}
2231 2224
2232/* 2225/*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
2240 2233
2241 list_del(&smi_info->link); 2234 list_del(&smi_info->link);
2242 2235
2243 if (smi_info->intf) { 2236 if (smi_info->intf)
2244 ipmi_unregister_smi(smi_info->intf); 2237 ipmi_unregister_smi(smi_info->intf);
2245 smi_info->intf = NULL;
2246 }
2247 2238
2248 if (smi_info->pdev) { 2239 if (smi_info->pdev) {
2249 if (smi_info->pdev_registered) 2240 if (smi_info->pdev_registered)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 18e4650c233b..29e67a80fb20 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -181,6 +181,8 @@ struct ssif_addr_info {
181 struct device *dev; 181 struct device *dev;
182 struct i2c_client *client; 182 struct i2c_client *client;
183 183
184 struct i2c_client *added_client;
185
184 struct mutex clients_mutex; 186 struct mutex clients_mutex;
185 struct list_head clients; 187 struct list_head clients;
186 188
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
1214 complete(&ssif_info->wake_thread); 1216 complete(&ssif_info->wake_thread);
1215 kthread_stop(ssif_info->thread); 1217 kthread_stop(ssif_info->thread);
1216 } 1218 }
1217
1218 /*
1219 * No message can be outstanding now, we have removed the
1220 * upper layer and it permitted us to do so.
1221 */
1222 kfree(ssif_info);
1223} 1219}
1224 1220
1225static int ssif_remove(struct i2c_client *client) 1221static int ssif_remove(struct i2c_client *client)
1226{ 1222{
1227 struct ssif_info *ssif_info = i2c_get_clientdata(client); 1223 struct ssif_info *ssif_info = i2c_get_clientdata(client);
1228 struct ipmi_smi *intf;
1229 struct ssif_addr_info *addr_info; 1224 struct ssif_addr_info *addr_info;
1230 1225
1231 if (!ssif_info) 1226 if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
1235 * After this point, we won't deliver anything asychronously 1230 * After this point, we won't deliver anything asychronously
1236 * to the message handler. We can unregister ourself. 1231 * to the message handler. We can unregister ourself.
1237 */ 1232 */
1238 intf = ssif_info->intf; 1233 ipmi_unregister_smi(ssif_info->intf);
1239 ssif_info->intf = NULL;
1240 ipmi_unregister_smi(intf);
1241 1234
1242 list_for_each_entry(addr_info, &ssif_infos, link) { 1235 list_for_each_entry(addr_info, &ssif_infos, link) {
1243 if (addr_info->client == client) { 1236 if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
1246 } 1239 }
1247 } 1240 }
1248 1241
1242 kfree(ssif_info);
1243
1249 return 0; 1244 return 0;
1250} 1245}
1251 1246
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1648 1643
1649 out: 1644 out:
1650 if (rv) { 1645 if (rv) {
1651 /* 1646 if (addr_info)
1652 * Note that if addr_info->client is assigned, we 1647 addr_info->client = NULL;
1653 * leave it. The i2c client hangs around even if we 1648
1654 * return a failure here, and the failure here is not
1655 * propagated back to the i2c code. This seems to be
1656 * design intent, strange as it may be. But if we
1657 * don't leave it, ssif_platform_remove will not remove
1658 * the client like it should.
1659 */
1660 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); 1649 dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
1661 kfree(ssif_info); 1650 kfree(ssif_info);
1662 } 1651 }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
1676 if (adev->type != &i2c_adapter_type) 1665 if (adev->type != &i2c_adapter_type)
1677 return 0; 1666 return 0;
1678 1667
1679 i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo); 1668 addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
1669 &addr_info->binfo);
1680 1670
1681 if (!addr_info->adapter_name) 1671 if (!addr_info->adapter_name)
1682 return 1; /* Only try the first I2C adapter by default. */ 1672 return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
1849 return 0; 1839 return 0;
1850 1840
1851 mutex_lock(&ssif_infos_mutex); 1841 mutex_lock(&ssif_infos_mutex);
1852 i2c_unregister_device(addr_info->client); 1842 i2c_unregister_device(addr_info->added_client);
1853 1843
1854 list_del(&addr_info->link); 1844 list_del(&addr_info->link);
1855 kfree(addr_info); 1845 kfree(addr_info);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index bb882ab161fe..e6124bd548df 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -16,6 +16,8 @@
16 16
17#include "kcs_bmc.h" 17#include "kcs_bmc.h"
18 18
19#define DEVICE_NAME "ipmi-kcs"
20
19#define KCS_MSG_BUFSIZ 1000 21#define KCS_MSG_BUFSIZ 1000
20 22
21#define KCS_ZERO_DATA 0 23#define KCS_ZERO_DATA 0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
429 if (!kcs_bmc) 431 if (!kcs_bmc)
430 return NULL; 432 return NULL;
431 433
432 dev_set_name(dev, "ipmi-kcs%u", channel);
433
434 spin_lock_init(&kcs_bmc->lock); 434 spin_lock_init(&kcs_bmc->lock);
435 kcs_bmc->channel = channel; 435 kcs_bmc->channel = channel;
436 436
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
444 return NULL; 444 return NULL;
445 445
446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; 446 kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
447 kcs_bmc->miscdev.name = dev_name(dev); 447 kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
448 DEVICE_NAME, channel);
448 kcs_bmc->miscdev.fops = &kcs_bmc_fops; 449 kcs_bmc->miscdev.fops = &kcs_bmc_fops;
449 450
450 return kcs_bmc; 451 return kcs_bmc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bf5f99fc36f1..c75b6cdf0053 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
779 779
780static void invalidate_batched_entropy(void); 780static void invalidate_batched_entropy(void);
781 781
782static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
783static int __init parse_trust_cpu(char *arg)
784{
785 return kstrtobool(arg, &trust_cpu);
786}
787early_param("random.trust_cpu", parse_trust_cpu);
788
782static void crng_initialize(struct crng_state *crng) 789static void crng_initialize(struct crng_state *crng)
783{ 790{
784 int i; 791 int i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
799 } 806 }
800 crng->state[i] ^= rv; 807 crng->state[i] ^= rv;
801 } 808 }
802#ifdef CONFIG_RANDOM_TRUST_CPU 809 if (trust_cpu && arch_init) {
803 if (arch_init) {
804 crng_init = 2; 810 crng_init = 2;
805 pr_notice("random: crng done (trusting CPU's manufacturer)\n"); 811 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
806 } 812 }
807#endif
808 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 813 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
809} 814}
810 815
diff --git a/drivers/clk/clk-npcm7xx.c b/drivers/clk/clk-npcm7xx.c
index 740af90a9508..c5edf8f2fd19 100644
--- a/drivers/clk/clk-npcm7xx.c
+++ b/drivers/clk/clk-npcm7xx.c
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
558 if (!clk_base) 558 if (!clk_base)
559 goto npcm7xx_init_error; 559 goto npcm7xx_init_error;
560 560
561 npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) * 561 npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
562 NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL); 562 NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
563 if (!npcm7xx_clk_data) 563 if (!npcm7xx_clk_data)
564 goto npcm7xx_init_np_err; 564 goto npcm7xx_init_np_err;
565 565
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 08ef69945ffb..d977193842df 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -55,6 +55,7 @@ struct clk_plt_data {
55 u8 nparents; 55 u8 nparents;
56 struct clk_plt *clks[PMC_CLK_NUM]; 56 struct clk_plt *clks[PMC_CLK_NUM];
57 struct clk_lookup *mclk_lookup; 57 struct clk_lookup *mclk_lookup;
58 struct clk_lookup *ether_clk_lookup;
58}; 59};
59 60
60/* Return an index in parent table */ 61/* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; 187 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
187 spin_lock_init(&pclk->lock); 188 spin_lock_init(&pclk->lock);
188 189
189 /*
190 * If the clock was already enabled by the firmware mark it as critical
191 * to avoid it being gated by the clock framework if no driver owns it.
192 */
193 if (plt_clk_is_enabled(&pclk->hw))
194 init.flags |= CLK_IS_CRITICAL;
195
196 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); 190 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
197 if (ret) { 191 if (ret) {
198 pclk = ERR_PTR(ret); 192 pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
351 goto err_unreg_clk_plt; 345 goto err_unreg_clk_plt;
352 } 346 }
353 347
348 data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
349 "ether_clk", NULL);
350 if (!data->ether_clk_lookup) {
351 err = -ENOMEM;
352 goto err_drop_mclk;
353 }
354
354 plt_clk_free_parent_names_loop(parent_names, data->nparents); 355 plt_clk_free_parent_names_loop(parent_names, data->nparents);
355 356
356 platform_set_drvdata(pdev, data); 357 platform_set_drvdata(pdev, data);
357 return 0; 358 return 0;
358 359
360err_drop_mclk:
361 clkdev_drop(data->mclk_lookup);
359err_unreg_clk_plt: 362err_unreg_clk_plt:
360 plt_clk_unregister_loop(data, i); 363 plt_clk_unregister_loop(data, i);
361 plt_clk_unregister_parents(data); 364 plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
369 372
370 data = platform_get_drvdata(pdev); 373 data = platform_get_drvdata(pdev);
371 374
375 clkdev_drop(data->ether_clk_lookup);
372 clkdev_drop(data->mclk_lookup); 376 clkdev_drop(data->mclk_lookup);
373 plt_clk_unregister_loop(data, PMC_CLK_NUM); 377 plt_clk_unregister_loop(data, PMC_CLK_NUM);
374 plt_clk_unregister_parents(data); 378 plt_clk_unregister_parents(data);
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c
index fb62f3938008..3a0996f2d556 100644
--- a/drivers/clk/x86/clk-st.c
+++ b/drivers/clk/x86/clk-st.c
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
46 clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents), 46 clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
47 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL); 47 0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
48 48
49 clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk); 49 clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
50 50
51 hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux", 51 hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, 52 0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index ec8a4376f74f..2fab18fae4fc 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
180 data->base = of_iomap(node, 0); 180 data->base = of_iomap(node, 0);
181 if (!data->base) { 181 if (!data->base) {
182 pr_err("Could not map PIT address\n"); 182 pr_err("Could not map PIT address\n");
183 return -ENXIO; 183 ret = -ENXIO;
184 goto exit;
184 } 185 }
185 186
186 data->mck = of_clk_get(node, 0); 187 data->mck = of_clk_get(node, 0);
187 if (IS_ERR(data->mck)) { 188 if (IS_ERR(data->mck)) {
188 pr_err("Unable to get mck clk\n"); 189 pr_err("Unable to get mck clk\n");
189 return PTR_ERR(data->mck); 190 ret = PTR_ERR(data->mck);
191 goto exit;
190 } 192 }
191 193
192 ret = clk_prepare_enable(data->mck); 194 ret = clk_prepare_enable(data->mck);
193 if (ret) { 195 if (ret) {
194 pr_err("Unable to enable mck\n"); 196 pr_err("Unable to enable mck\n");
195 return ret; 197 goto exit;
196 } 198 }
197 199
198 /* Get the interrupts property */ 200 /* Get the interrupts property */
199 data->irq = irq_of_parse_and_map(node, 0); 201 data->irq = irq_of_parse_and_map(node, 0);
200 if (!data->irq) { 202 if (!data->irq) {
201 pr_err("Unable to get IRQ from DT\n"); 203 pr_err("Unable to get IRQ from DT\n");
202 return -EINVAL; 204 ret = -EINVAL;
205 goto exit;
203 } 206 }
204 207
205 /* 208 /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
227 ret = clocksource_register_hz(&data->clksrc, pit_rate); 230 ret = clocksource_register_hz(&data->clksrc, pit_rate);
228 if (ret) { 231 if (ret) {
229 pr_err("Failed to register clocksource\n"); 232 pr_err("Failed to register clocksource\n");
230 return ret; 233 goto exit;
231 } 234 }
232 235
233 /* Set up irq handler */ 236 /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
236 "at91_tick", data); 239 "at91_tick", data);
237 if (ret) { 240 if (ret) {
238 pr_err("Unable to setup IRQ\n"); 241 pr_err("Unable to setup IRQ\n");
239 return ret; 242 clocksource_unregister(&data->clksrc);
243 goto exit;
240 } 244 }
241 245
242 /* Set up and register clockevents */ 246 /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
254 clockevents_register_device(&data->clkevt); 258 clockevents_register_device(&data->clkevt);
255 259
256 return 0; 260 return 0;
261
262exit:
263 kfree(data);
264 return ret;
257} 265}
258TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", 266TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
259 at91sam926x_pit_dt_init); 267 at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index c020038ebfab..cf93f6419b51 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
130 cr &= ~fttmr010->t1_enable_val; 130 cr &= ~fttmr010->t1_enable_val;
131 writel(cr, fttmr010->base + TIMER_CR); 131 writel(cr, fttmr010->base + TIMER_CR);
132 132
133 /* Setup the match register forward/backward in time */ 133 if (fttmr010->count_down) {
134 cr = readl(fttmr010->base + TIMER1_COUNT); 134 /*
135 if (fttmr010->count_down) 135 * ASPEED Timer Controller will load TIMER1_LOAD register
136 cr -= cycles; 136 * into TIMER1_COUNT register when the timer is re-enabled.
137 else 137 */
138 cr += cycles; 138 writel(cycles, fttmr010->base + TIMER1_LOAD);
139 writel(cr, fttmr010->base + TIMER1_MATCH1); 139 } else {
140 /* Setup the match register forward in time */
141 cr = readl(fttmr010->base + TIMER1_COUNT);
142 writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
143 }
140 144
141 /* Start */ 145 /* Start */
142 cr = readl(fttmr010->base + TIMER_CR); 146 cr = readl(fttmr010->base + TIMER_CR);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 29e2e1a78a43..6949a9113dbb 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
97 return -ENXIO; 97 return -ENXIO;
98 } 98 }
99 99
100 if (!of_machine_is_compatible("ti,am43"))
101 ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
102
100 ti_32k_timer.counter = ti_32k_timer.base; 103 ti_32k_timer.counter = ti_32k_timer.base;
101 104
102 /* 105 /*
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index a1830fa25fc5..2a3675c24032 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -44,7 +44,7 @@ enum _msm8996_version {
44 44
45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; 45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46 46
47static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) 47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
48{ 48{
49 size_t len; 49 size_t len;
50 u32 *msm_id; 50 u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
222} 222}
223module_init(qcom_cpufreq_kryo_init); 223module_init(qcom_cpufreq_kryo_init);
224 224
225static void __init qcom_cpufreq_kryo_exit(void) 225static void __exit qcom_cpufreq_kryo_exit(void)
226{ 226{
227 platform_device_unregister(kryo_cpufreq_pdev); 227 platform_device_unregister(kryo_cpufreq_pdev);
228 platform_driver_unregister(&qcom_cpufreq_kryo_driver); 228 platform_driver_unregister(&qcom_cpufreq_kryo_driver);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 110483f0e3fb..e26a40971b26 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
379 if (idx == -1) 379 if (idx == -1)
380 idx = i; /* first enabled state */ 380 idx = i; /* first enabled state */
381 if (s->target_residency > data->predicted_us) { 381 if (s->target_residency > data->predicted_us) {
382 if (!tick_nohz_tick_stopped()) 382 if (data->predicted_us < TICK_USEC)
383 break; 383 break;
384 384
385 if (!tick_nohz_tick_stopped()) {
386 /*
387 * If the state selected so far is shallow,
388 * waking up early won't hurt, so retain the
389 * tick in that case and let the governor run
390 * again in the next iteration of the loop.
391 */
392 expected_interval = drv->states[idx].target_residency;
393 break;
394 }
395
385 /* 396 /*
386 * If the state selected so far is shallow and this 397 * If the state selected so far is shallow and this
387 * state's target residency matches the time till the 398 * state's target residency matches the time till the
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 6e61cc93c2b0..d7aa7d7ff102 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
679 int ret = 0; 679 int ret = 0;
680 680
681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { 681 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
682 crypto_ablkcipher_set_flags(ablkcipher,
683 CRYPTO_TFM_RES_BAD_KEY_LEN);
684 dev_err(jrdev, "key size mismatch\n"); 682 dev_err(jrdev, "key size mismatch\n");
685 return -EINVAL; 683 goto badkey;
686 } 684 }
687 685
688 ctx->cdata.keylen = keylen; 686 ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
715 return ret; 713 return ret;
716badkey: 714badkey:
717 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 715 crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
718 return 0; 716 return -EINVAL;
719} 717}
720 718
721/* 719/*
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 578ea63a3109..f26d62e5533a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); 71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); 72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); 75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
76} 76}
77 77
78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, 78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); 90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); 91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); 94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
95} 95}
96 96
97/* RSA Job Completion handler */ 97/* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
417 goto unmap_p; 417 goto unmap_p;
418 } 418 }
419 419
420 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); 420 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
421 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 421 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
422 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 422 dev_err(dev, "Unable to map RSA tmp1 memory\n");
423 goto unmap_q; 423 goto unmap_q;
424 } 424 }
425 425
426 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); 426 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
427 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 427 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
428 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 428 dev_err(dev, "Unable to map RSA tmp2 memory\n");
429 goto unmap_tmp1; 429 goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
451 return 0; 451 return 0;
452 452
453unmap_tmp1: 453unmap_tmp1:
454 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 454 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
455unmap_q: 455unmap_q:
456 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); 456 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
457unmap_p: 457unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
504 goto unmap_dq; 504 goto unmap_dq;
505 } 505 }
506 506
507 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); 507 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
508 if (dma_mapping_error(dev, pdb->tmp1_dma)) { 508 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
509 dev_err(dev, "Unable to map RSA tmp1 memory\n"); 509 dev_err(dev, "Unable to map RSA tmp1 memory\n");
510 goto unmap_qinv; 510 goto unmap_qinv;
511 } 511 }
512 512
513 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); 513 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
514 if (dma_mapping_error(dev, pdb->tmp2_dma)) { 514 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
515 dev_err(dev, "Unable to map RSA tmp2 memory\n"); 515 dev_err(dev, "Unable to map RSA tmp2 memory\n");
516 goto unmap_tmp1; 516 goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
538 return 0; 538 return 0;
539 539
540unmap_tmp1: 540unmap_tmp1:
541 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); 541 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
542unmap_qinv: 542unmap_qinv:
543 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); 543 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
544unmap_dq: 544unmap_dq:
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f4f258075b89..acdd72016ffe 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
190 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); 190 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
191 191
192 /* Unmap just-run descriptor so we can post-process */ 192 /* Unmap just-run descriptor so we can post-process */
193 dma_unmap_single(dev, jrp->outring[hw_idx].desc, 193 dma_unmap_single(dev,
194 caam_dma_to_cpu(jrp->outring[hw_idx].desc),
194 jrp->entinfo[sw_idx].desc_size, 195 jrp->entinfo[sw_idx].desc_size,
195 DMA_TO_DEVICE); 196 DMA_TO_DEVICE);
196 197
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 9a476bb6d4c7..af596455b420 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
35 /* requests in backlog queues */ 35 /* requests in backlog queues */
36 atomic_t backlog_count; 36 atomic_t backlog_count;
37 37
38 int write_idx;
38 /* command size 32B/64B */ 39 /* command size 32B/64B */
39 u8 instr_size; 40 u8 instr_size;
40 u8 qno; 41 u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
87 struct bh_data *slc; 88 struct bh_data *slc;
88}; 89};
89 90
90/* NITROX-5 driver state */ 91/* NITROX-V driver state */
91#define NITROX_UCODE_LOADED 0 92#define NITROX_UCODE_LOADED 0
92#define NITROX_READY 1 93#define NITROX_READY 1
93 94
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index ebe267379ac9..4d31df07777f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); 36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); 37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
38 cmdq->qsize = (qsize + PKT_IN_ALIGN); 38 cmdq->qsize = (qsize + PKT_IN_ALIGN);
39 cmdq->write_idx = 0;
39 40
40 spin_lock_init(&cmdq->response_lock); 41 spin_lock_init(&cmdq->response_lock);
41 spin_lock_init(&cmdq->cmdq_lock); 42 spin_lock_init(&cmdq->cmdq_lock);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index deaefd532aaa..4a362fc22f62 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -42,6 +42,16 @@
42 * Invalid flag options in AES-CCM IV. 42 * Invalid flag options in AES-CCM IV.
43 */ 43 */
44 44
45static inline int incr_index(int index, int count, int max)
46{
47 if ((index + count) >= max)
48 index = index + count - max;
49 else
50 index += count;
51
52 return index;
53}
54
45/** 55/**
46 * dma_free_sglist - unmap and free the sg lists. 56 * dma_free_sglist - unmap and free the sg lists.
47 * @ndev: N5 device 57 * @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
426 struct nitrox_cmdq *cmdq) 436 struct nitrox_cmdq *cmdq)
427{ 437{
428 struct nitrox_device *ndev = sr->ndev; 438 struct nitrox_device *ndev = sr->ndev;
429 union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell; 439 int idx;
430 u64 offset;
431 u8 *ent; 440 u8 *ent;
432 441
433 spin_lock_bh(&cmdq->cmdq_lock); 442 spin_lock_bh(&cmdq->cmdq_lock);
434 443
435 /* get the next write offset */ 444 idx = cmdq->write_idx;
436 offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
437 pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
438 /* copy the instruction */ 445 /* copy the instruction */
439 ent = cmdq->head + pkt_in_baoff_dbell.s.aoff; 446 ent = cmdq->head + (idx * cmdq->instr_size);
440 memcpy(ent, &sr->instr, cmdq->instr_size); 447 memcpy(ent, &sr->instr, cmdq->instr_size);
441 /* flush the command queue updates */
442 dma_wmb();
443 448
444 sr->tstamp = jiffies;
445 atomic_set(&sr->status, REQ_POSTED); 449 atomic_set(&sr->status, REQ_POSTED);
446 response_list_add(sr, cmdq); 450 response_list_add(sr, cmdq);
451 sr->tstamp = jiffies;
452 /* flush the command queue updates */
453 dma_wmb();
447 454
448 /* Ring doorbell with count 1 */ 455 /* Ring doorbell with count 1 */
449 writeq(1, cmdq->dbell_csr_addr); 456 writeq(1, cmdq->dbell_csr_addr);
450 /* orders the doorbell rings */ 457 /* orders the doorbell rings */
451 mmiowb(); 458 mmiowb();
452 459
460 cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461
453 spin_unlock_bh(&cmdq->cmdq_lock); 462 spin_unlock_bh(&cmdq->cmdq_lock);
454} 463}
455 464
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
459 struct nitrox_softreq *sr, *tmp; 468 struct nitrox_softreq *sr, *tmp;
460 int ret = 0; 469 int ret = 0;
461 470
471 if (!atomic_read(&cmdq->backlog_count))
472 return 0;
473
462 spin_lock_bh(&cmdq->backlog_lock); 474 spin_lock_bh(&cmdq->backlog_lock);
463 475
464 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { 476 list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
466 478
467 /* submit until space available */ 479 /* submit until space available */
468 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 480 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
469 ret = -EBUSY; 481 ret = -ENOSPC;
470 break; 482 break;
471 } 483 }
472 /* delete from backlog list */ 484 /* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
491{ 503{
492 struct nitrox_cmdq *cmdq = sr->cmdq; 504 struct nitrox_cmdq *cmdq = sr->cmdq;
493 struct nitrox_device *ndev = sr->ndev; 505 struct nitrox_device *ndev = sr->ndev;
494 int ret = -EBUSY; 506
507 /* try to post backlog requests */
508 post_backlog_cmds(cmdq);
495 509
496 if (unlikely(cmdq_full(cmdq, ndev->qlen))) { 510 if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
497 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) 511 if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
498 return -EAGAIN; 512 return -ENOSPC;
499 513 /* add to backlog list */
500 backlog_list_add(sr, cmdq); 514 backlog_list_add(sr, cmdq);
501 } else { 515 return -EBUSY;
502 ret = post_backlog_cmds(cmdq);
503 if (ret) {
504 backlog_list_add(sr, cmdq);
505 return ret;
506 }
507 post_se_instr(sr, cmdq);
508 ret = -EINPROGRESS;
509 } 516 }
510 return ret; 517 post_se_instr(sr, cmdq);
518
519 return -EINPROGRESS;
511} 520}
512 521
513/** 522/**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
624 */ 633 */
625 sr->instr.fdata[0] = *((u64 *)&req->gph); 634 sr->instr.fdata[0] = *((u64 *)&req->gph);
626 sr->instr.fdata[1] = 0; 635 sr->instr.fdata[1] = 0;
627 /* flush the soft_req changes before posting the cmd */
628 wmb();
629 636
630 ret = nitrox_enqueue_request(sr); 637 ret = nitrox_enqueue_request(sr);
631 if (ret == -EAGAIN) 638 if (ret == -ENOSPC)
632 goto send_fail; 639 goto send_fail;
633 640
634 return ret; 641 return ret;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 218739b961fe..72790d88236d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
38static struct sev_misc_dev *misc_dev; 38static struct sev_misc_dev *misc_dev;
39static struct psp_device *psp_master; 39static struct psp_device *psp_master;
40 40
41static int psp_cmd_timeout = 100;
42module_param(psp_cmd_timeout, int, 0644);
43MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
44
45static int psp_probe_timeout = 5;
46module_param(psp_probe_timeout, int, 0644);
47MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
48
49static bool psp_dead;
50static int psp_timeout;
51
41static struct psp_device *psp_alloc_struct(struct sp_device *sp) 52static struct psp_device *psp_alloc_struct(struct sp_device *sp)
42{ 53{
43 struct device *dev = sp->dev; 54 struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
82 return IRQ_HANDLED; 93 return IRQ_HANDLED;
83} 94}
84 95
85static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) 96static int sev_wait_cmd_ioc(struct psp_device *psp,
97 unsigned int *reg, unsigned int timeout)
86{ 98{
87 wait_event(psp->sev_int_queue, psp->sev_int_rcvd); 99 int ret;
100
101 ret = wait_event_timeout(psp->sev_int_queue,
102 psp->sev_int_rcvd, timeout * HZ);
103 if (!ret)
104 return -ETIMEDOUT;
105
88 *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); 106 *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
107
108 return 0;
89} 109}
90 110
91static int sev_cmd_buffer_len(int cmd) 111static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
133 if (!psp) 153 if (!psp)
134 return -ENODEV; 154 return -ENODEV;
135 155
156 if (psp_dead)
157 return -EBUSY;
158
136 /* Get the physical address of the command buffer */ 159 /* Get the physical address of the command buffer */
137 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; 160 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
138 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; 161 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
139 162
140 dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", 163 dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
141 cmd, phys_msb, phys_lsb); 164 cmd, phys_msb, phys_lsb, psp_timeout);
142 165
143 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 166 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
144 sev_cmd_buffer_len(cmd), false); 167 sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
154 iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); 177 iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
155 178
156 /* wait for command completion */ 179 /* wait for command completion */
157 sev_wait_cmd_ioc(psp, &reg); 180 ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
181 if (ret) {
182 if (psp_ret)
183 *psp_ret = 0;
184
185 dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
186 psp_dead = true;
187
188 return ret;
189 }
190
191 psp_timeout = psp_cmd_timeout;
158 192
159 if (psp_ret) 193 if (psp_ret)
160 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 194 *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
888 922
889 psp_master = sp->psp_data; 923 psp_master = sp->psp_data;
890 924
925 psp_timeout = psp_probe_timeout;
926
891 if (sev_get_api_version()) 927 if (sev_get_api_version())
892 goto err; 928 goto err;
893 929
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index a53a0e6ba024..7725b6ee14ef 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -96,6 +96,10 @@ enum csk_flags {
96 CSK_CONN_INLINE, /* Connection on HW */ 96 CSK_CONN_INLINE, /* Connection on HW */
97}; 97};
98 98
99enum chtls_cdev_state {
100 CHTLS_CDEV_STATE_UP = 1
101};
102
99struct listen_ctx { 103struct listen_ctx {
100 struct sock *lsk; 104 struct sock *lsk;
101 struct chtls_dev *cdev; 105 struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
146 unsigned int send_page_order; 150 unsigned int send_page_order;
147 int max_host_sndbuf; 151 int max_host_sndbuf;
148 struct key_map kmap; 152 struct key_map kmap;
153 unsigned int cdev_state;
149}; 154};
150 155
151struct chtls_hws { 156struct chtls_hws {
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index 9b07f9165658..f59b044ebd25 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
160 tlsdev->hash = chtls_create_hash; 160 tlsdev->hash = chtls_create_hash;
161 tlsdev->unhash = chtls_destroy_hash; 161 tlsdev->unhash = chtls_destroy_hash;
162 tls_register_device(&cdev->tlsdev); 162 tls_register_device(&cdev->tlsdev);
163 cdev->cdev_state = CHTLS_CDEV_STATE_UP;
163} 164}
164 165
165static void chtls_unregister_dev(struct chtls_dev *cdev) 166static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
281 struct chtls_dev *cdev, *tmp; 282 struct chtls_dev *cdev, *tmp;
282 283
283 mutex_lock(&cdev_mutex); 284 mutex_lock(&cdev_mutex);
284 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) 285 list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
285 chtls_free_uld(cdev); 286 if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
287 chtls_free_uld(cdev);
288 }
286 mutex_unlock(&cdev_mutex); 289 mutex_unlock(&cdev_mutex);
287} 290}
288 291
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 5285ece4f33a..b71895871be3 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
107 ret = crypto_skcipher_encrypt(req); 107 ret = crypto_skcipher_encrypt(req);
108 skcipher_request_zero(req); 108 skcipher_request_zero(req);
109 } else { 109 } else {
110 preempt_disable();
111 pagefault_disable();
112 enable_kernel_vsx();
113
114 blkcipher_walk_init(&walk, dst, src, nbytes); 110 blkcipher_walk_init(&walk, dst, src, nbytes);
115 ret = blkcipher_walk_virt(desc, &walk); 111 ret = blkcipher_walk_virt(desc, &walk);
116 while ((nbytes = walk.nbytes)) { 112 while ((nbytes = walk.nbytes)) {
113 preempt_disable();
114 pagefault_disable();
115 enable_kernel_vsx();
117 aes_p8_cbc_encrypt(walk.src.virt.addr, 116 aes_p8_cbc_encrypt(walk.src.virt.addr,
118 walk.dst.virt.addr, 117 walk.dst.virt.addr,
119 nbytes & AES_BLOCK_MASK, 118 nbytes & AES_BLOCK_MASK,
120 &ctx->enc_key, walk.iv, 1); 119 &ctx->enc_key, walk.iv, 1);
120 disable_kernel_vsx();
121 pagefault_enable();
122 preempt_enable();
123
121 nbytes &= AES_BLOCK_SIZE - 1; 124 nbytes &= AES_BLOCK_SIZE - 1;
122 ret = blkcipher_walk_done(desc, &walk, nbytes); 125 ret = blkcipher_walk_done(desc, &walk, nbytes);
123 } 126 }
124
125 disable_kernel_vsx();
126 pagefault_enable();
127 preempt_enable();
128 } 127 }
129 128
130 return ret; 129 return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
147 ret = crypto_skcipher_decrypt(req); 146 ret = crypto_skcipher_decrypt(req);
148 skcipher_request_zero(req); 147 skcipher_request_zero(req);
149 } else { 148 } else {
150 preempt_disable();
151 pagefault_disable();
152 enable_kernel_vsx();
153
154 blkcipher_walk_init(&walk, dst, src, nbytes); 149 blkcipher_walk_init(&walk, dst, src, nbytes);
155 ret = blkcipher_walk_virt(desc, &walk); 150 ret = blkcipher_walk_virt(desc, &walk);
156 while ((nbytes = walk.nbytes)) { 151 while ((nbytes = walk.nbytes)) {
152 preempt_disable();
153 pagefault_disable();
154 enable_kernel_vsx();
157 aes_p8_cbc_encrypt(walk.src.virt.addr, 155 aes_p8_cbc_encrypt(walk.src.virt.addr,
158 walk.dst.virt.addr, 156 walk.dst.virt.addr,
159 nbytes & AES_BLOCK_MASK, 157 nbytes & AES_BLOCK_MASK,
160 &ctx->dec_key, walk.iv, 0); 158 &ctx->dec_key, walk.iv, 0);
159 disable_kernel_vsx();
160 pagefault_enable();
161 preempt_enable();
162
161 nbytes &= AES_BLOCK_SIZE - 1; 163 nbytes &= AES_BLOCK_SIZE - 1;
162 ret = blkcipher_walk_done(desc, &walk, nbytes); 164 ret = blkcipher_walk_done(desc, &walk, nbytes);
163 } 165 }
164
165 disable_kernel_vsx();
166 pagefault_enable();
167 preempt_enable();
168 } 166 }
169 167
170 return ret; 168 return ret;
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 8bd9aff0f55f..e9954a7d4694 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); 116 ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
117 skcipher_request_zero(req); 117 skcipher_request_zero(req);
118 } else { 118 } else {
119 blkcipher_walk_init(&walk, dst, src, nbytes);
120
121 ret = blkcipher_walk_virt(desc, &walk);
122
119 preempt_disable(); 123 preempt_disable();
120 pagefault_disable(); 124 pagefault_disable();
121 enable_kernel_vsx(); 125 enable_kernel_vsx();
122 126
123 blkcipher_walk_init(&walk, dst, src, nbytes);
124
125 ret = blkcipher_walk_virt(desc, &walk);
126 iv = walk.iv; 127 iv = walk.iv;
127 memset(tweak, 0, AES_BLOCK_SIZE); 128 memset(tweak, 0, AES_BLOCK_SIZE);
128 aes_p8_encrypt(iv, tweak, &ctx->tweak_key); 129 aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
129 130
131 disable_kernel_vsx();
132 pagefault_enable();
133 preempt_enable();
134
130 while ((nbytes = walk.nbytes)) { 135 while ((nbytes = walk.nbytes)) {
136 preempt_disable();
137 pagefault_disable();
138 enable_kernel_vsx();
131 if (enc) 139 if (enc)
132 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 140 aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
133 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); 141 nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
134 else 142 else
135 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 143 aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
136 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); 144 nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
145 disable_kernel_vsx();
146 pagefault_enable();
147 preempt_enable();
137 148
138 nbytes &= AES_BLOCK_SIZE - 1; 149 nbytes &= AES_BLOCK_SIZE - 1;
139 ret = blkcipher_walk_done(desc, &walk, nbytes); 150 ret = blkcipher_walk_done(desc, &walk, nbytes);
140 } 151 }
141
142 disable_kernel_vsx();
143 pagefault_enable();
144 preempt_enable();
145 } 152 }
146 return ret; 153 return ret;
147} 154}
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 6fd46083e629..948806e57cee 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
392{ 392{
393 struct file *filp = vmf->vma->vm_file; 393 struct file *filp = vmf->vma->vm_file;
394 unsigned long fault_size; 394 unsigned long fault_size;
395 int rc, id; 395 vm_fault_t rc = VM_FAULT_SIGBUS;
396 int id;
396 pfn_t pfn; 397 pfn_t pfn;
397 struct dev_dax *dev_dax = filp->private_data; 398 struct dev_dax *dev_dax = filp->private_data;
398 399
@@ -534,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
534 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 535 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
535} 536}
536 537
538static const struct address_space_operations dev_dax_aops = {
539 .set_page_dirty = noop_set_page_dirty,
540 .invalidatepage = noop_invalidatepage,
541};
542
537static int dax_open(struct inode *inode, struct file *filp) 543static int dax_open(struct inode *inode, struct file *filp)
538{ 544{
539 struct dax_device *dax_dev = inode_dax(inode); 545 struct dax_device *dax_dev = inode_dax(inode);
@@ -543,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
543 dev_dbg(&dev_dax->dev, "trace\n"); 549 dev_dbg(&dev_dax->dev, "trace\n");
544 inode->i_mapping = __dax_inode->i_mapping; 550 inode->i_mapping = __dax_inode->i_mapping;
545 inode->i_mapping->host = __dax_inode; 551 inode->i_mapping->host = __dax_inode;
552 inode->i_mapping->a_ops = &dev_dax_aops;
546 filp->f_mapping = inode->i_mapping; 553 filp->f_mapping = inode->i_mapping;
547 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 554 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
548 filp->private_data = dev_dax; 555 filp->private_data = dev_dax;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index b76cb17d879c..adfd316db1a8 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
639 int ret; 639 int ret;
640 struct device *dev = &mbdev->dev; 640 struct device *dev = &mbdev->dev;
641 641
642 mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL); 642 mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
643 if (!mic_dma_dev) { 643 if (!mic_dma_dev) {
644 ret = -ENOMEM; 644 ret = -ENOMEM;
645 goto alloc_error; 645 goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
664reg_error: 664reg_error:
665 mic_dma_uninit(mic_dma_dev); 665 mic_dma_uninit(mic_dma_dev);
666init_error: 666init_error:
667 kfree(mic_dma_dev);
668 mic_dma_dev = NULL; 667 mic_dma_dev = NULL;
669alloc_error: 668alloc_error:
670 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret); 669 dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
674static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 673static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
675{ 674{
676 mic_dma_uninit(mic_dma_dev); 675 mic_dma_uninit(mic_dma_dev);
677 kfree(mic_dma_dev);
678} 676}
679 677
680/* DEBUGFS CODE */ 678/* DEBUGFS CODE */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 721e6c57beae..64342944d917 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
166 le32_to_cpu(attr->sustained_freq_khz); 166 le32_to_cpu(attr->sustained_freq_khz);
167 dom_info->sustained_perf_level = 167 dom_info->sustained_perf_level =
168 le32_to_cpu(attr->sustained_perf_level); 168 le32_to_cpu(attr->sustained_perf_level);
169 dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / 169 if (!dom_info->sustained_freq_khz ||
170 !dom_info->sustained_perf_level)
171 /* CPUFreq converts to kHz, hence default 1000 */
172 dom_info->mult_factor = 1000;
173 else
174 dom_info->mult_factor =
175 (dom_info->sustained_freq_khz * 1000) /
170 dom_info->sustained_perf_level; 176 dom_info->sustained_perf_level;
171 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); 177 memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
172 } 178 }
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d8e159feb573..89110dfc7127 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
90config EFI_ARMSTUB_DTB_LOADER 90config EFI_ARMSTUB_DTB_LOADER
91 bool "Enable the DTB loader" 91 bool "Enable the DTB loader"
92 depends on EFI_ARMSTUB 92 depends on EFI_ARMSTUB
93 default y
93 help 94 help
94 Select this config option to add support for the dtb= command 95 Select this config option to add support for the dtb= command
95 line parameter, allowing a device tree blob to be loaded into 96 line parameter, allowing a device tree blob to be loaded into
96 memory from the EFI System Partition by the stub. 97 memory from the EFI System Partition by the stub.
97 98
98 The device tree is typically provided by the platform or by 99 If the device tree is provided by the platform or by
99 the bootloader, so this option is mostly for development 100 the bootloader this option may not be needed.
100 purposes only. 101 But, for various development reasons and to maintain existing
102 functionality for bootloaders that do not have such support
103 this option is necessary.
101 104
102config EFI_BOOTLOADER_CONTROL 105config EFI_BOOTLOADER_CONTROL
103 tristate "EFI Bootloader Control" 106 tristate "EFI Bootloader Control"
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index fc9fd2d0482f..0b840531ef33 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
420 /* Create region for each port */ 420 /* Create region for each port */
421 fme_region = dfl_fme_create_region(pdata, mgr, 421 fme_region = dfl_fme_create_region(pdata, mgr,
422 fme_br->br, i); 422 fme_br->br, i);
423 if (!fme_region) { 423 if (IS_ERR(fme_region)) {
424 ret = PTR_ERR(fme_region); 424 ret = PTR_ERR(fme_region);
425 goto destroy_region; 425 goto destroy_region;
426 } 426 }
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3530ccd17e04..da9781a2ef4a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@ struct adp5588_gpio {
41 uint8_t int_en[3]; 41 uint8_t int_en[3];
42 uint8_t irq_mask[3]; 42 uint8_t irq_mask[3];
43 uint8_t irq_stat[3]; 43 uint8_t irq_stat[3];
44 uint8_t int_input_en[3];
45 uint8_t int_lvl_cached[3];
44}; 46};
45 47
46static int adp5588_gpio_read(struct i2c_client *client, u8 reg) 48static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
173 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); 175 struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
174 int i; 176 int i;
175 177
176 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) 178 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
179 if (dev->int_input_en[i]) {
180 mutex_lock(&dev->lock);
181 dev->dir[i] &= ~dev->int_input_en[i];
182 dev->int_input_en[i] = 0;
183 adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
184 dev->dir[i]);
185 mutex_unlock(&dev->lock);
186 }
187
188 if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
189 dev->int_lvl_cached[i] = dev->int_lvl[i];
190 adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
191 dev->int_lvl[i]);
192 }
193
177 if (dev->int_en[i] ^ dev->irq_mask[i]) { 194 if (dev->int_en[i] ^ dev->irq_mask[i]) {
178 dev->int_en[i] = dev->irq_mask[i]; 195 dev->int_en[i] = dev->irq_mask[i];
179 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, 196 adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
180 dev->int_en[i]); 197 dev->int_en[i]);
181 } 198 }
199 }
182 200
183 mutex_unlock(&dev->irq_lock); 201 mutex_unlock(&dev->irq_lock);
184} 202}
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
221 else 239 else
222 return -EINVAL; 240 return -EINVAL;
223 241
224 adp5588_gpio_direction_input(&dev->gpio_chip, gpio); 242 dev->int_input_en[bank] |= bit;
225 adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
226 dev->int_lvl[bank]);
227 243
228 return 0; 244 return 0;
229} 245}
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 28da700f5f52..044888fd96a1 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
728out_unregister: 728out_unregister:
729 dwapb_gpio_unregister(gpio); 729 dwapb_gpio_unregister(gpio);
730 dwapb_irq_teardown(gpio); 730 dwapb_irq_teardown(gpio);
731 clk_disable_unprepare(gpio->clk);
731 732
732 return err; 733 return err;
733} 734}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index c48ed9d89ff5..8b9d7e42c600 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,7 +25,6 @@
25 25
26struct acpi_gpio_event { 26struct acpi_gpio_event {
27 struct list_head node; 27 struct list_head node;
28 struct list_head initial_sync_list;
29 acpi_handle handle; 28 acpi_handle handle;
30 unsigned int pin; 29 unsigned int pin;
31 unsigned int irq; 30 unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
49 struct mutex conn_lock; 48 struct mutex conn_lock;
50 struct gpio_chip *chip; 49 struct gpio_chip *chip;
51 struct list_head events; 50 struct list_head events;
51 struct list_head deferred_req_irqs_list_entry;
52}; 52};
53 53
54static LIST_HEAD(acpi_gpio_initial_sync_list); 54/*
55static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock); 55 * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
56 * (so builtin drivers) we register the ACPI GpioInt event handlers from a
57 * late_initcall_sync handler, so that other builtin drivers can register their
58 * OpRegions before the event handlers can run. This list contains gpiochips
59 * for which the acpi_gpiochip_request_interrupts() has been deferred.
60 */
61static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
62static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
63static bool acpi_gpio_deferred_req_irqs_done;
56 64
57static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) 65static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
58{ 66{
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
89 return gpiochip_get_desc(chip, pin); 97 return gpiochip_get_desc(chip, pin);
90} 98}
91 99
92static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
93{
94 mutex_lock(&acpi_gpio_initial_sync_list_lock);
95 list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
96 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
97}
98
99static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
100{
101 mutex_lock(&acpi_gpio_initial_sync_list_lock);
102 if (!list_empty(&event->initial_sync_list))
103 list_del_init(&event->initial_sync_list);
104 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
105}
106
107static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) 100static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
108{ 101{
109 struct acpi_gpio_event *event = data; 102 struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
186 179
187 gpiod_direction_input(desc); 180 gpiod_direction_input(desc);
188 181
189 value = gpiod_get_value(desc); 182 value = gpiod_get_value_cansleep(desc);
190 183
191 ret = gpiochip_lock_as_irq(chip, pin); 184 ret = gpiochip_lock_as_irq(chip, pin);
192 if (ret) { 185 if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
229 event->irq = irq; 222 event->irq = irq;
230 event->pin = pin; 223 event->pin = pin;
231 event->desc = desc; 224 event->desc = desc;
232 INIT_LIST_HEAD(&event->initial_sync_list);
233 225
234 ret = request_threaded_irq(event->irq, NULL, handler, irqflags, 226 ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
235 "ACPI:Event", event); 227 "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
251 * may refer to OperationRegions from other (builtin) drivers which 243 * may refer to OperationRegions from other (builtin) drivers which
252 * may be probed after us. 244 * may be probed after us.
253 */ 245 */
254 if (handler == acpi_gpio_irq_handler && 246 if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
255 (((irqflags & IRQF_TRIGGER_RISING) && value == 1) || 247 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
256 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))) 248 handler(event->irq, event);
257 acpi_gpio_add_to_initial_sync_list(event);
258 249
259 return AE_OK; 250 return AE_OK;
260 251
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283 struct acpi_gpio_chip *acpi_gpio; 274 struct acpi_gpio_chip *acpi_gpio;
284 acpi_handle handle; 275 acpi_handle handle;
285 acpi_status status; 276 acpi_status status;
277 bool defer;
286 278
287 if (!chip->parent || !chip->to_irq) 279 if (!chip->parent || !chip->to_irq)
288 return; 280 return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
295 if (ACPI_FAILURE(status)) 287 if (ACPI_FAILURE(status))
296 return; 288 return;
297 289
290 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
291 defer = !acpi_gpio_deferred_req_irqs_done;
292 if (defer)
293 list_add(&acpi_gpio->deferred_req_irqs_list_entry,
294 &acpi_gpio_deferred_req_irqs_list);
295 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
296
297 if (defer)
298 return;
299
298 acpi_walk_resources(handle, "_AEI", 300 acpi_walk_resources(handle, "_AEI",
299 acpi_gpiochip_request_interrupt, acpi_gpio); 301 acpi_gpiochip_request_interrupt, acpi_gpio);
300} 302}
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
325 if (ACPI_FAILURE(status)) 327 if (ACPI_FAILURE(status))
326 return; 328 return;
327 329
330 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
331 if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
332 list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
333 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
334
328 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 335 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
329 struct gpio_desc *desc; 336 struct gpio_desc *desc;
330 337
331 acpi_gpio_del_from_initial_sync_list(event);
332
333 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) 338 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
334 disable_irq_wake(event->irq); 339 disable_irq_wake(event->irq);
335 340
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
1052 1057
1053 acpi_gpio->chip = chip; 1058 acpi_gpio->chip = chip;
1054 INIT_LIST_HEAD(&acpi_gpio->events); 1059 INIT_LIST_HEAD(&acpi_gpio->events);
1060 INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
1055 1061
1056 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio); 1062 status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
1057 if (ACPI_FAILURE(status)) { 1063 if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1198 return con_id == NULL; 1204 return con_id == NULL;
1199} 1205}
1200 1206
1201/* Sync the initial state of handlers after all builtin drivers have probed */ 1207/* Run deferred acpi_gpiochip_request_interrupts() */
1202static int acpi_gpio_initial_sync(void) 1208static int acpi_gpio_handle_deferred_request_interrupts(void)
1203{ 1209{
1204 struct acpi_gpio_event *event, *ep; 1210 struct acpi_gpio_chip *acpi_gpio, *tmp;
1211
1212 mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
1213 list_for_each_entry_safe(acpi_gpio, tmp,
1214 &acpi_gpio_deferred_req_irqs_list,
1215 deferred_req_irqs_list_entry) {
1216 acpi_handle handle;
1205 1217
1206 mutex_lock(&acpi_gpio_initial_sync_list_lock); 1218 handle = ACPI_HANDLE(acpi_gpio->chip->parent);
1207 list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list, 1219 acpi_walk_resources(handle, "_AEI",
1208 initial_sync_list) { 1220 acpi_gpiochip_request_interrupt, acpi_gpio);
1209 acpi_evaluate_object(event->handle, NULL, NULL, NULL); 1221
1210 list_del_init(&event->initial_sync_list); 1222 list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
1211 } 1223 }
1212 mutex_unlock(&acpi_gpio_initial_sync_list_lock); 1224
1225 acpi_gpio_deferred_req_irqs_done = true;
1226 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
1213 1227
1214 return 0; 1228 return 0;
1215} 1229}
1216/* We must use _sync so that this runs after the first deferred_probe run */ 1230/* We must use _sync so that this runs after the first deferred_probe run */
1217late_initcall_sync(acpi_gpio_initial_sync); 1231late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a4f1157d6aa0..d4e7a09598fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
31 struct of_phandle_args *gpiospec = data; 31 struct of_phandle_args *gpiospec = data;
32 32
33 return chip->gpiodev->dev.of_node == gpiospec->np && 33 return chip->gpiodev->dev.of_node == gpiospec->np &&
34 chip->of_xlate &&
34 chip->of_xlate(chip, gpiospec, NULL) >= 0; 35 chip->of_xlate(chip, gpiospec, NULL) >= 0;
35} 36}
36 37
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f8bbbb3a9504..0c791e35acf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
272 272
273int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 273int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
274 void **mem_obj, uint64_t *gpu_addr, 274 void **mem_obj, uint64_t *gpu_addr,
275 void **cpu_ptr) 275 void **cpu_ptr, bool mqd_gfx9)
276{ 276{
277 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 277 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
278 struct amdgpu_bo *bo = NULL; 278 struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
287 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 287 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
288 bp.type = ttm_bo_type_kernel; 288 bp.type = ttm_bo_type_kernel;
289 bp.resv = NULL; 289 bp.resv = NULL;
290
291 if (mqd_gfx9)
292 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
293
290 r = amdgpu_bo_create(adev, &bp, &bo); 294 r = amdgpu_bo_create(adev, &bp, &bo);
291 if (r) { 295 if (r) {
292 dev_err(adev->dev, 296 dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2f379c183ed2..cc9aeab5468c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
136/* Shared API */ 136/* Shared API */
137int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 137int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
138 void **mem_obj, uint64_t *gpu_addr, 138 void **mem_obj, uint64_t *gpu_addr,
139 void **cpu_ptr); 139 void **cpu_ptr, bool mqd_gfx9);
140void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 140void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
141void get_local_mem_info(struct kgd_dev *kgd, 141void get_local_mem_info(struct kgd_dev *kgd,
142 struct kfd_local_mem_info *mem_info); 142 struct kfd_local_mem_info *mem_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea3f698aef5e..9803b91f3e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
685 685
686 while (true) { 686 while (true) {
687 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 687 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
688 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) 688 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
689 break; 689 break;
690 if (time_after(jiffies, end_jiffies)) 690 if (time_after(jiffies, end_jiffies))
691 return -ETIME; 691 return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 693ec5ea4950..8816c697b205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
367 break; 367 break;
368 case CHIP_POLARIS10: 368 case CHIP_POLARIS10:
369 if (type == CGS_UCODE_ID_SMU) { 369 if (type == CGS_UCODE_ID_SMU) {
370 if ((adev->pdev->device == 0x67df) && 370 if (((adev->pdev->device == 0x67df) &&
371 ((adev->pdev->revision == 0xe0) || 371 ((adev->pdev->revision == 0xe0) ||
372 (adev->pdev->revision == 0xe3) || 372 (adev->pdev->revision == 0xe3) ||
373 (adev->pdev->revision == 0xe4) || 373 (adev->pdev->revision == 0xe4) ||
374 (adev->pdev->revision == 0xe5) || 374 (adev->pdev->revision == 0xe5) ||
375 (adev->pdev->revision == 0xe7) || 375 (adev->pdev->revision == 0xe7) ||
376 (adev->pdev->revision == 0xef))) ||
377 ((adev->pdev->device == 0x6fdf) &&
376 (adev->pdev->revision == 0xef))) { 378 (adev->pdev->revision == 0xef))) {
377 info->is_kicker = true; 379 info->is_kicker = true;
378 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); 380 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 502b94fb116a..b31d121a876b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
39{ 39{
40 struct drm_gem_object *gobj; 40 struct drm_gem_object *gobj;
41 unsigned long size; 41 unsigned long size;
42 int r;
42 43
43 gobj = drm_gem_object_lookup(p->filp, data->handle); 44 gobj = drm_gem_object_lookup(p->filp, data->handle);
44 if (gobj == NULL) 45 if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
50 p->uf_entry.tv.shared = true; 51 p->uf_entry.tv.shared = true;
51 p->uf_entry.user_pages = NULL; 52 p->uf_entry.user_pages = NULL;
52 53
53 size = amdgpu_bo_size(p->uf_entry.robj);
54 if (size != PAGE_SIZE || (data->offset + 8) > size)
55 return -EINVAL;
56
57 *offset = data->offset;
58
59 drm_gem_object_put_unlocked(gobj); 54 drm_gem_object_put_unlocked(gobj);
60 55
56 size = amdgpu_bo_size(p->uf_entry.robj);
57 if (size != PAGE_SIZE || (data->offset + 8) > size) {
58 r = -EINVAL;
59 goto error_unref;
60 }
61
61 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { 62 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
62 amdgpu_bo_unref(&p->uf_entry.robj); 63 r = -EINVAL;
63 return -EINVAL; 64 goto error_unref;
64 } 65 }
65 66
67 *offset = data->offset;
68
66 return 0; 69 return 0;
70
71error_unref:
72 amdgpu_bo_unref(&p->uf_entry.robj);
73 return r;
67} 74}
68 75
69static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, 76static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
1012 if (r) 1019 if (r)
1013 return r; 1020 return r;
1014 1021
1015 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) { 1022 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
1016 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; 1023 parser->job->preamble_status |=
1017 if (!parser->ctx->preamble_presented) { 1024 AMDGPU_PREAMBLE_IB_PRESENT;
1018 parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1019 parser->ctx->preamble_presented = true;
1020 }
1021 }
1022 1025
1023 if (parser->ring && parser->ring != ring) 1026 if (parser->ring && parser->ring != ring)
1024 return -EINVAL; 1027 return -EINVAL;
@@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1207 1210
1208 int r; 1211 int r;
1209 1212
1213 job = p->job;
1214 p->job = NULL;
1215
1216 r = drm_sched_job_init(&job->base, entity, p->filp);
1217 if (r)
1218 goto error_unlock;
1219
1220 /* No memory allocation is allowed while holding the mn lock */
1210 amdgpu_mn_lock(p->mn); 1221 amdgpu_mn_lock(p->mn);
1211 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { 1222 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1212 struct amdgpu_bo *bo = e->robj; 1223 struct amdgpu_bo *bo = e->robj;
1213 1224
1214 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1225 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1215 amdgpu_mn_unlock(p->mn); 1226 r = -ERESTARTSYS;
1216 return -ERESTARTSYS; 1227 goto error_abort;
1217 } 1228 }
1218 } 1229 }
1219 1230
1220 job = p->job;
1221 p->job = NULL;
1222
1223 r = drm_sched_job_init(&job->base, entity, p->filp);
1224 if (r) {
1225 amdgpu_job_free(job);
1226 amdgpu_mn_unlock(p->mn);
1227 return r;
1228 }
1229
1230 job->owner = p->filp; 1231 job->owner = p->filp;
1231 p->fence = dma_fence_get(&job->base.s_fence->finished); 1232 p->fence = dma_fence_get(&job->base.s_fence->finished);
1232 1233
@@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1241 1242
1242 amdgpu_cs_post_dependencies(p); 1243 amdgpu_cs_post_dependencies(p);
1243 1244
1245 if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1246 !p->ctx->preamble_presented) {
1247 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1248 p->ctx->preamble_presented = true;
1249 }
1250
1244 cs->out.handle = seq; 1251 cs->out.handle = seq;
1245 job->uf_sequence = seq; 1252 job->uf_sequence = seq;
1246 1253
@@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1258 amdgpu_mn_unlock(p->mn); 1265 amdgpu_mn_unlock(p->mn);
1259 1266
1260 return 0; 1267 return 0;
1268
1269error_abort:
1270 dma_fence_put(&job->base.s_fence->finished);
1271 job->base.s_fence = NULL;
1272 amdgpu_mn_unlock(p->mn);
1273
1274error_unlock:
1275 amdgpu_job_free(job);
1276 return r;
1261} 1277}
1262 1278
1263int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 1279int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..39bf2ce548c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2063 static enum amd_ip_block_type ip_order[] = { 2063 static enum amd_ip_block_type ip_order[] = {
2064 AMD_IP_BLOCK_TYPE_GMC, 2064 AMD_IP_BLOCK_TYPE_GMC,
2065 AMD_IP_BLOCK_TYPE_COMMON, 2065 AMD_IP_BLOCK_TYPE_COMMON,
2066 AMD_IP_BLOCK_TYPE_PSP,
2066 AMD_IP_BLOCK_TYPE_IH, 2067 AMD_IP_BLOCK_TYPE_IH,
2067 }; 2068 };
2068 2069
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2093 2094
2094 static enum amd_ip_block_type ip_order[] = { 2095 static enum amd_ip_block_type ip_order[] = {
2095 AMD_IP_BLOCK_TYPE_SMC, 2096 AMD_IP_BLOCK_TYPE_SMC,
2096 AMD_IP_BLOCK_TYPE_PSP,
2097 AMD_IP_BLOCK_TYPE_DCE, 2097 AMD_IP_BLOCK_TYPE_DCE,
2098 AMD_IP_BLOCK_TYPE_GFX, 2098 AMD_IP_BLOCK_TYPE_GFX,
2099 AMD_IP_BLOCK_TYPE_SDMA, 2099 AMD_IP_BLOCK_TYPE_SDMA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..0f41d8647376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
740 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 740 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
741 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 741 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
742 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 742 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
743 {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
743 /* Polaris12 */ 744 /* Polaris12 */
744 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 745 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
745 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 746 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 5518e623fed2..51b5e977ca88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
164 return r; 164 return r;
165 } 165 }
166 166
167 need_ctx_switch = ring->current_ctx != fence_ctx;
167 if (ring->funcs->emit_pipeline_sync && job && 168 if (ring->funcs->emit_pipeline_sync && job &&
168 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || 169 ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
170 (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
169 amdgpu_vm_need_pipeline_sync(ring, job))) { 171 amdgpu_vm_need_pipeline_sync(ring, job))) {
170 need_pipe_sync = true; 172 need_pipe_sync = true;
171 dma_fence_put(tmp); 173 dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
196 } 198 }
197 199
198 skip_preamble = ring->current_ctx == fence_ctx; 200 skip_preamble = ring->current_ctx == fence_ctx;
199 need_ctx_switch = ring->current_ctx != fence_ctx;
200 if (job && ring->funcs->emit_cntxcntl) { 201 if (job && ring->funcs->emit_cntxcntl) {
201 if (need_ctx_switch) 202 if (need_ctx_switch)
202 status |= AMDGPU_HAVE_CTX_SWITCH; 203 status |= AMDGPU_HAVE_CTX_SWITCH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8f98629fbe59..7b4e657a95c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1932 amdgpu_fence_wait_empty(ring); 1932 amdgpu_fence_wait_empty(ring);
1933 } 1933 }
1934 1934
1935 mutex_lock(&adev->pm.mutex);
1936 /* update battery/ac status */
1937 if (power_supply_is_system_supplied() > 0)
1938 adev->pm.ac_power = true;
1939 else
1940 adev->pm.ac_power = false;
1941 mutex_unlock(&adev->pm.mutex);
1942
1943 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1935 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1944 if (!amdgpu_device_has_dc_support(adev)) { 1936 if (!amdgpu_device_has_dc_support(adev)) {
1945 mutex_lock(&adev->pm.mutex); 1937 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0cc5190f4f36..5f3f54073818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
258{ 258{
259 int i; 259 int i;
260 260
261 cancel_delayed_work_sync(&adev->vce.idle_work);
262
261 if (adev->vce.vcpu_bo == NULL) 263 if (adev->vce.vcpu_bo == NULL)
262 return 0; 264 return 0;
263 265
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
268 if (i == AMDGPU_MAX_VCE_HANDLES) 270 if (i == AMDGPU_MAX_VCE_HANDLES)
269 return 0; 271 return 0;
270 272
271 cancel_delayed_work_sync(&adev->vce.idle_work);
272 /* TODO: suspending running encoding sessions isn't supported */ 273 /* TODO: suspending running encoding sessions isn't supported */
273 return -EINVAL; 274 return -EINVAL;
274} 275}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index fd654a4406db..400fc74bbae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
153 unsigned size; 153 unsigned size;
154 void *ptr; 154 void *ptr;
155 155
156 cancel_delayed_work_sync(&adev->vcn.idle_work);
157
156 if (adev->vcn.vcpu_bo == NULL) 158 if (adev->vcn.vcpu_bo == NULL)
157 return 0; 159 return 0;
158 160
159 cancel_delayed_work_sync(&adev->vcn.idle_work);
160
161 size = amdgpu_bo_size(adev->vcn.vcpu_bo); 161 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162 ptr = adev->vcn.cpu_addr; 162 ptr = adev->vcn.cpu_addr;
163 163
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ece0ac703e27..b17771dd5ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
172 * is validated on next vm use to avoid fault. 172 * is validated on next vm use to avoid fault.
173 * */ 173 * */
174 list_move_tail(&base->vm_status, &vm->evicted); 174 list_move_tail(&base->vm_status, &vm->evicted);
175 base->moved = true;
175} 176}
176 177
177/** 178/**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
369 uint64_t addr; 370 uint64_t addr;
370 int r; 371 int r;
371 372
372 addr = amdgpu_bo_gpu_offset(bo);
373 entries = amdgpu_bo_size(bo) / 8; 373 entries = amdgpu_bo_size(bo) / 8;
374 374
375 if (pte_support_ats) { 375 if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
401 if (r) 401 if (r)
402 goto error; 402 goto error;
403 403
404 addr = amdgpu_bo_gpu_offset(bo);
404 if (ats_entries) { 405 if (ats_entries) {
405 uint64_t ats_value; 406 uint64_t ats_value;
406 407
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
2483 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2484 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2484 * 2485 *
2485 * @adev: amdgpu_device pointer 2486 * @adev: amdgpu_device pointer
2486 * @vm_size: the default vm size if it's set auto 2487 * @min_vm_size: the minimum vm size in GB if it's set auto
2487 * @fragment_size_default: Default PTE fragment size 2488 * @fragment_size_default: Default PTE fragment size
2488 * @max_level: max VMPT level 2489 * @max_level: max VMPT level
2489 * @max_bits: max address space size in bits 2490 * @max_bits: max address space size in bits
2490 * 2491 *
2491 */ 2492 */
2492void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, 2493void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
2493 uint32_t fragment_size_default, unsigned max_level, 2494 uint32_t fragment_size_default, unsigned max_level,
2494 unsigned max_bits) 2495 unsigned max_bits)
2495{ 2496{
2497 unsigned int max_size = 1 << (max_bits - 30);
2498 unsigned int vm_size;
2496 uint64_t tmp; 2499 uint64_t tmp;
2497 2500
2498 /* adjust vm size first */ 2501 /* adjust vm size first */
2499 if (amdgpu_vm_size != -1) { 2502 if (amdgpu_vm_size != -1) {
2500 unsigned max_size = 1 << (max_bits - 30);
2501
2502 vm_size = amdgpu_vm_size; 2503 vm_size = amdgpu_vm_size;
2503 if (vm_size > max_size) { 2504 if (vm_size > max_size) {
2504 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2505 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2505 amdgpu_vm_size, max_size); 2506 amdgpu_vm_size, max_size);
2506 vm_size = max_size; 2507 vm_size = max_size;
2507 } 2508 }
2509 } else {
2510 struct sysinfo si;
2511 unsigned int phys_ram_gb;
2512
2513 /* Optimal VM size depends on the amount of physical
2514 * RAM available. Underlying requirements and
2515 * assumptions:
2516 *
2517 * - Need to map system memory and VRAM from all GPUs
2518 * - VRAM from other GPUs not known here
2519 * - Assume VRAM <= system memory
2520 * - On GFX8 and older, VM space can be segmented for
2521 * different MTYPEs
2522 * - Need to allow room for fragmentation, guard pages etc.
2523 *
2524 * This adds up to a rough guess of system memory x3.
2525 * Round up to power of two to maximize the available
2526 * VM size with the given page table size.
2527 */
2528 si_meminfo(&si);
2529 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
2530 (1 << 30) - 1) >> 30;
2531 vm_size = roundup_pow_of_two(
2532 min(max(phys_ram_gb * 3, min_vm_size), max_size));
2508 } 2533 }
2509 2534
2510 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2535 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 67a15d439ac0..9fa9df0c5e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
321void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); 321void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
322void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 322void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
323 struct amdgpu_bo_va *bo_va); 323 struct amdgpu_bo_va *bo_va);
324void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, 324void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
325 uint32_t fragment_size_default, unsigned max_level, 325 uint32_t fragment_size_default, unsigned max_level,
326 unsigned max_bits); 326 unsigned max_bits);
327int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 327int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5cd45210113f..5a9534a82d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5664 if (amdgpu_sriov_vf(adev)) 5664 if (amdgpu_sriov_vf(adev))
5665 return 0; 5665 return 0;
5666 5666
5667 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5668 AMD_PG_SUPPORT_RLC_SMU_HS |
5669 AMD_PG_SUPPORT_CP |
5670 AMD_PG_SUPPORT_GFX_DMG))
5671 adev->gfx.rlc.funcs->enter_safe_mode(adev);
5667 switch (adev->asic_type) { 5672 switch (adev->asic_type) {
5668 case CHIP_CARRIZO: 5673 case CHIP_CARRIZO:
5669 case CHIP_STONEY: 5674 case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5713 default: 5718 default:
5714 break; 5719 break;
5715 } 5720 }
5716 5721 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5722 AMD_PG_SUPPORT_RLC_SMU_HS |
5723 AMD_PG_SUPPORT_CP |
5724 AMD_PG_SUPPORT_GFX_DMG))
5725 adev->gfx.rlc.funcs->exit_safe_mode(adev);
5717 return 0; 5726 return 0;
5718} 5727}
5719 5728
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 75317f283c69..ad151fefa41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
632 amdgpu_gart_table_vram_unpin(adev); 632 amdgpu_gart_table_vram_unpin(adev);
633} 633}
634 634
635static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
636{
637 amdgpu_gart_table_vram_free(adev);
638 amdgpu_gart_fini(adev);
639}
640
641static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, 635static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
642 u32 status, u32 addr, u32 mc_client) 636 u32 status, u32 addr, u32 mc_client)
643{ 637{
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
935 929
936 amdgpu_gem_force_release(adev); 930 amdgpu_gem_force_release(adev);
937 amdgpu_vm_manager_fini(adev); 931 amdgpu_vm_manager_fini(adev);
938 gmc_v6_0_gart_fini(adev); 932 amdgpu_gart_table_vram_free(adev);
939 amdgpu_bo_fini(adev); 933 amdgpu_bo_fini(adev);
934 amdgpu_gart_fini(adev);
940 release_firmware(adev->gmc.fw); 935 release_firmware(adev->gmc.fw);
941 adev->gmc.fw = NULL; 936 adev->gmc.fw = NULL;
942 937
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 36dc367c4b45..f8d8a3a73e42 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
747} 747}
748 748
749/** 749/**
750 * gmc_v7_0_gart_fini - vm fini callback
751 *
752 * @adev: amdgpu_device pointer
753 *
754 * Tears down the driver GART/VM setup (CIK).
755 */
756static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
757{
758 amdgpu_gart_table_vram_free(adev);
759 amdgpu_gart_fini(adev);
760}
761
762/**
763 * gmc_v7_0_vm_decode_fault - print human readable fault info 750 * gmc_v7_0_vm_decode_fault - print human readable fault info
764 * 751 *
765 * @adev: amdgpu_device pointer 752 * @adev: amdgpu_device pointer
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
1095 amdgpu_gem_force_release(adev); 1082 amdgpu_gem_force_release(adev);
1096 amdgpu_vm_manager_fini(adev); 1083 amdgpu_vm_manager_fini(adev);
1097 kfree(adev->gmc.vm_fault_info); 1084 kfree(adev->gmc.vm_fault_info);
1098 gmc_v7_0_gart_fini(adev); 1085 amdgpu_gart_table_vram_free(adev);
1099 amdgpu_bo_fini(adev); 1086 amdgpu_bo_fini(adev);
1087 amdgpu_gart_fini(adev);
1100 release_firmware(adev->gmc.fw); 1088 release_firmware(adev->gmc.fw);
1101 adev->gmc.fw = NULL; 1089 adev->gmc.fw = NULL;
1102 1090
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 70fc97b59b4f..9333109b210d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
969} 969}
970 970
971/** 971/**
972 * gmc_v8_0_gart_fini - vm fini callback
973 *
974 * @adev: amdgpu_device pointer
975 *
976 * Tears down the driver GART/VM setup (CIK).
977 */
978static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
979{
980 amdgpu_gart_table_vram_free(adev);
981 amdgpu_gart_fini(adev);
982}
983
984/**
985 * gmc_v8_0_vm_decode_fault - print human readable fault info 972 * gmc_v8_0_vm_decode_fault - print human readable fault info
986 * 973 *
987 * @adev: amdgpu_device pointer 974 * @adev: amdgpu_device pointer
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
1199 amdgpu_gem_force_release(adev); 1186 amdgpu_gem_force_release(adev);
1200 amdgpu_vm_manager_fini(adev); 1187 amdgpu_vm_manager_fini(adev);
1201 kfree(adev->gmc.vm_fault_info); 1188 kfree(adev->gmc.vm_fault_info);
1202 gmc_v8_0_gart_fini(adev); 1189 amdgpu_gart_table_vram_free(adev);
1203 amdgpu_bo_fini(adev); 1190 amdgpu_bo_fini(adev);
1191 amdgpu_gart_fini(adev);
1204 release_firmware(adev->gmc.fw); 1192 release_firmware(adev->gmc.fw);
1205 adev->gmc.fw = NULL; 1193 adev->gmc.fw = NULL;
1206 1194
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 399a5db27649..72f8018fa2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
942 return 0; 942 return 0;
943} 943}
944 944
945/**
946 * gmc_v9_0_gart_fini - vm fini callback
947 *
948 * @adev: amdgpu_device pointer
949 *
950 * Tears down the driver GART/VM setup (CIK).
951 */
952static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
953{
954 amdgpu_gart_table_vram_free(adev);
955 amdgpu_gart_fini(adev);
956}
957
958static int gmc_v9_0_sw_fini(void *handle) 945static int gmc_v9_0_sw_fini(void *handle)
959{ 946{
960 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 947 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
961 948
962 amdgpu_gem_force_release(adev); 949 amdgpu_gem_force_release(adev);
963 amdgpu_vm_manager_fini(adev); 950 amdgpu_vm_manager_fini(adev);
964 gmc_v9_0_gart_fini(adev);
965 951
966 /* 952 /*
967 * TODO: 953 * TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
974 */ 960 */
975 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); 961 amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
976 962
963 amdgpu_gart_table_vram_free(adev);
977 amdgpu_bo_fini(adev); 964 amdgpu_bo_fini(adev);
965 amdgpu_gart_fini(adev);
978 966
979 return 0; 967 return 0;
980} 968}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3f57f6463dc8..cb79a93c2eb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
65 int min_temp, int max_temp); 65 int min_temp, int max_temp);
66static int kv_init_fps_limits(struct amdgpu_device *adev); 66static int kv_init_fps_limits(struct amdgpu_device *adev);
67 67
68static void kv_dpm_powergate_uvd(void *handle, bool gate);
69static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
70static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); 68static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
71static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); 69static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
72 70
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
1354 return ret; 1352 return ret;
1355 } 1353 }
1356 1354
1357 kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
1358
1359 if (adev->irq.installed && 1355 if (adev->irq.installed &&
1360 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { 1356 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
1361 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); 1357 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
1374 1370
1375static void kv_dpm_disable(struct amdgpu_device *adev) 1371static void kv_dpm_disable(struct amdgpu_device *adev)
1376{ 1372{
1373 struct kv_power_info *pi = kv_get_pi(adev);
1374
1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1378 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); 1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1379 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, 1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
1387 /* powerup blocks */ 1385 /* powerup blocks */
1388 kv_dpm_powergate_acp(adev, false); 1386 kv_dpm_powergate_acp(adev, false);
1389 kv_dpm_powergate_samu(adev, false); 1387 kv_dpm_powergate_samu(adev, false);
1390 kv_dpm_powergate_vce(adev, false); 1388 if (pi->caps_vce_pg) /* power on the VCE block */
1391 kv_dpm_powergate_uvd(adev, false); 1389 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1390 if (pi->caps_uvd_pg) /* power on the UVD block */
1391 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1392 1392
1393 kv_enable_smc_cac(adev, false); 1393 kv_enable_smc_cac(adev, false);
1394 kv_enable_didt(adev, false); 1394 kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
1551 int ret; 1551 int ret;
1552 1552
1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { 1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
1554 kv_dpm_powergate_vce(adev, false);
1555 if (pi->caps_stable_p_state) 1554 if (pi->caps_stable_p_state)
1556 pi->vce_boot_level = table->count - 1; 1555 pi->vce_boot_level = table->count - 1;
1557 else 1556 else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
1573 kv_enable_vce_dpm(adev, true); 1572 kv_enable_vce_dpm(adev, true);
1574 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { 1573 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
1575 kv_enable_vce_dpm(adev, false); 1574 kv_enable_vce_dpm(adev, false);
1576 kv_dpm_powergate_vce(adev, true);
1577 } 1575 }
1578 1576
1579 return 0; 1577 return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
1702 } 1700 }
1703} 1701}
1704 1702
1705static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) 1703static void kv_dpm_powergate_vce(void *handle, bool gate)
1706{ 1704{
1705 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1707 struct kv_power_info *pi = kv_get_pi(adev); 1706 struct kv_power_info *pi = kv_get_pi(adev);
1708 1707 int ret;
1709 if (pi->vce_power_gated == gate)
1710 return;
1711 1708
1712 pi->vce_power_gated = gate; 1709 pi->vce_power_gated = gate;
1713 1710
1714 if (!pi->caps_vce_pg) 1711 if (gate) {
1715 return; 1712 /* stop the VCE block */
1716 1713 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1717 if (gate) 1714 AMD_PG_STATE_GATE);
1718 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); 1715 kv_enable_vce_dpm(adev, false);
1719 else 1716 if (pi->caps_vce_pg) /* power off the VCE block */
1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); 1717 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
1718 } else {
1719 if (pi->caps_vce_pg) /* power on the VCE block */
1720 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1721 kv_enable_vce_dpm(adev, true);
1722 /* re-init the VCE block */
1723 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1724 AMD_PG_STATE_UNGATE);
1725 }
1721} 1726}
1722 1727
1728
1723static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) 1729static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
1724{ 1730{
1725 struct kv_power_info *pi = kv_get_pi(adev); 1731 struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
3061 else 3067 else
3062 adev->pm.dpm_enabled = true; 3068 adev->pm.dpm_enabled = true;
3063 mutex_unlock(&adev->pm.mutex); 3069 mutex_unlock(&adev->pm.mutex);
3064 3070 amdgpu_pm_compute_clocks(adev);
3065 return ret; 3071 return ret;
3066} 3072}
3067 3073
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
3313 case AMD_IP_BLOCK_TYPE_UVD: 3319 case AMD_IP_BLOCK_TYPE_UVD:
3314 kv_dpm_powergate_uvd(handle, gate); 3320 kv_dpm_powergate_uvd(handle, gate);
3315 break; 3321 break;
3322 case AMD_IP_BLOCK_TYPE_VCE:
3323 kv_dpm_powergate_vce(handle, gate);
3324 break;
3316 default: 3325 default:
3317 break; 3326 break;
3318 } 3327 }
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e7ca4623cfb9..7c3b634d8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 70 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 71 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 72 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
73 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
73 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), 74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
74 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
75 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 76 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) 85 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
86 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
85}; 87};
86 88
87static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 89static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
109 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
110 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100), 112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
111 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 113 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
112 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) 114 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
115 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
113}; 116};
114 117
115static const struct soc15_reg_golden golden_settings_sdma_4_2[] = 118static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index db327b412562..1de96995e690 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
6887 6887
6888 si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 6888 si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
6889 si_thermal_start_thermal_controller(adev); 6889 si_thermal_start_thermal_controller(adev);
6890 ni_update_current_ps(adev, boot_ps);
6891 6890
6892 return 0; 6891 return 0;
6893} 6892}
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
7763 else 7762 else
7764 adev->pm.dpm_enabled = true; 7763 adev->pm.dpm_enabled = true;
7765 mutex_unlock(&adev->pm.mutex); 7764 mutex_unlock(&adev->pm.mutex);
7766 7765 amdgpu_pm_compute_clocks(adev);
7767 return ret; 7766 return ret;
7768} 7767}
7769 7768
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1b048715ab8a..29ac74f40dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
457 457
458 if (kfd->kfd2kgd->init_gtt_mem_allocation( 458 if (kfd->kfd2kgd->init_gtt_mem_allocation(
459 kfd->kgd, size, &kfd->gtt_mem, 459 kfd->kgd, size, &kfd->gtt_mem,
460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ 460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
461 false)) {
461 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 462 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
462 goto out; 463 goto out;
463 } 464 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7a61f38c09e6..01494752c36a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
62 struct amd_iommu_device_info iommu_info; 62 struct amd_iommu_device_info iommu_info;
63 unsigned int pasid_limit; 63 unsigned int pasid_limit;
64 int err; 64 int err;
65 struct kfd_topology_device *top_dev;
65 66
66 if (!kfd->device_info->needs_iommu_device) 67 top_dev = kfd_topology_device_by_id(kfd->id);
68
69 /*
70 * Overwrite ATS capability according to needs_iommu_device to fix
71 * potential missing corresponding bit in CRAT of BIOS.
72 */
73 if (!kfd->device_info->needs_iommu_device) {
74 top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
67 return 0; 75 return 0;
76 }
77
78 top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
68 79
69 iommu_info.flags = 0; 80 iommu_info.flags = 0;
70 err = amd_iommu_device_info(kfd->pdev, &iommu_info); 81 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f5fc3675f21e..0cedb37cf513 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
88 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), 88 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
89 &((*mqd_mem_obj)->gtt_mem), 89 &((*mqd_mem_obj)->gtt_mem),
90 &((*mqd_mem_obj)->gpu_addr), 90 &((*mqd_mem_obj)->gpu_addr),
91 (void *)&((*mqd_mem_obj)->cpu_ptr)); 91 (void *)&((*mqd_mem_obj)->cpu_ptr), true);
92 } else 92 } else
93 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), 93 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
94 mqd_mem_obj); 94 mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f971710f1c91..92b285ca73aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
806int kfd_topology_remove_device(struct kfd_dev *gpu); 806int kfd_topology_remove_device(struct kfd_dev *gpu);
807struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 807struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
808 uint32_t proximity_domain); 808 uint32_t proximity_domain);
809struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
809struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); 810struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
810struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); 811struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
811int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); 812int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bc95d4dfee2e..80f5db4ef75f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
63 return device; 63 return device;
64} 64}
65 65
66struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) 66struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
67{ 67{
68 struct kfd_topology_device *top_dev; 68 struct kfd_topology_device *top_dev = NULL;
69 struct kfd_dev *device = NULL; 69 struct kfd_topology_device *ret = NULL;
70 70
71 down_read(&topology_lock); 71 down_read(&topology_lock);
72 72
73 list_for_each_entry(top_dev, &topology_device_list, list) 73 list_for_each_entry(top_dev, &topology_device_list, list)
74 if (top_dev->gpu_id == gpu_id) { 74 if (top_dev->gpu_id == gpu_id) {
75 device = top_dev->gpu; 75 ret = top_dev;
76 break; 76 break;
77 } 77 }
78 78
79 up_read(&topology_lock); 79 up_read(&topology_lock);
80 80
81 return device; 81 return ret;
82}
83
84struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
85{
86 struct kfd_topology_device *top_dev;
87
88 top_dev = kfd_topology_device_by_id(gpu_id);
89 if (!top_dev)
90 return NULL;
91
92 return top_dev->gpu;
82} 93}
83 94
84struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) 95struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 800f481a6995..96875950845a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
641 return NULL; 641 return NULL;
642} 642}
643 643
644static void emulated_link_detect(struct dc_link *link)
645{
646 struct dc_sink_init_data sink_init_data = { 0 };
647 struct display_sink_capability sink_caps = { 0 };
648 enum dc_edid_status edid_status;
649 struct dc_context *dc_ctx = link->ctx;
650 struct dc_sink *sink = NULL;
651 struct dc_sink *prev_sink = NULL;
652
653 link->type = dc_connection_none;
654 prev_sink = link->local_sink;
655
656 if (prev_sink != NULL)
657 dc_sink_retain(prev_sink);
658
659 switch (link->connector_signal) {
660 case SIGNAL_TYPE_HDMI_TYPE_A: {
661 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
662 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
663 break;
664 }
665
666 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
667 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
668 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
669 break;
670 }
671
672 case SIGNAL_TYPE_DVI_DUAL_LINK: {
673 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
674 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
675 break;
676 }
677
678 case SIGNAL_TYPE_LVDS: {
679 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
680 sink_caps.signal = SIGNAL_TYPE_LVDS;
681 break;
682 }
683
684 case SIGNAL_TYPE_EDP: {
685 sink_caps.transaction_type =
686 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
687 sink_caps.signal = SIGNAL_TYPE_EDP;
688 break;
689 }
690
691 case SIGNAL_TYPE_DISPLAY_PORT: {
692 sink_caps.transaction_type =
693 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
694 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
695 break;
696 }
697
698 default:
699 DC_ERROR("Invalid connector type! signal:%d\n",
700 link->connector_signal);
701 return;
702 }
703
704 sink_init_data.link = link;
705 sink_init_data.sink_signal = sink_caps.signal;
706
707 sink = dc_sink_create(&sink_init_data);
708 if (!sink) {
709 DC_ERROR("Failed to create sink!\n");
710 return;
711 }
712
713 link->local_sink = sink;
714
715 edid_status = dm_helpers_read_local_edid(
716 link->ctx,
717 link,
718 sink);
719
720 if (edid_status != EDID_OK)
721 DC_ERROR("Failed to read EDID");
722
723}
724
644static int dm_resume(void *handle) 725static int dm_resume(void *handle)
645{ 726{
646 struct amdgpu_device *adev = handle; 727 struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
654 struct drm_plane *plane; 735 struct drm_plane *plane;
655 struct drm_plane_state *new_plane_state; 736 struct drm_plane_state *new_plane_state;
656 struct dm_plane_state *dm_new_plane_state; 737 struct dm_plane_state *dm_new_plane_state;
738 enum dc_connection_type new_connection_type = dc_connection_none;
657 int ret; 739 int ret;
658 int i; 740 int i;
659 741
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
684 continue; 766 continue;
685 767
686 mutex_lock(&aconnector->hpd_lock); 768 mutex_lock(&aconnector->hpd_lock);
687 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 769 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
770 DRM_ERROR("KMS: Failed to detect connector\n");
771
772 if (aconnector->base.force && new_connection_type == dc_connection_none)
773 emulated_link_detect(aconnector->dc_link);
774 else
775 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
688 776
689 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 777 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
690 aconnector->fake_enable = false; 778 aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
922 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 1010 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
923 struct drm_connector *connector = &aconnector->base; 1011 struct drm_connector *connector = &aconnector->base;
924 struct drm_device *dev = connector->dev; 1012 struct drm_device *dev = connector->dev;
1013 enum dc_connection_type new_connection_type = dc_connection_none;
925 1014
926 /* In case of failure or MST no need to update connector status or notify the OS 1015 /* In case of failure or MST no need to update connector status or notify the OS
927 * since (for MST case) MST does this in it's own context. 1016 * since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
931 if (aconnector->fake_enable) 1020 if (aconnector->fake_enable)
932 aconnector->fake_enable = false; 1021 aconnector->fake_enable = false;
933 1022
934 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 1023 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1024 DRM_ERROR("KMS: Failed to detect connector\n");
1025
1026 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1027 emulated_link_detect(aconnector->dc_link);
1028
1029
1030 drm_modeset_lock_all(dev);
1031 dm_restore_drm_connector_state(dev, connector);
1032 drm_modeset_unlock_all(dev);
1033
1034 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1035 drm_kms_helper_hotplug_event(dev);
1036
1037 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
935 amdgpu_dm_update_connector_after_detect(aconnector); 1038 amdgpu_dm_update_connector_after_detect(aconnector);
936 1039
937 1040
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
1031 struct drm_device *dev = connector->dev; 1134 struct drm_device *dev = connector->dev;
1032 struct dc_link *dc_link = aconnector->dc_link; 1135 struct dc_link *dc_link = aconnector->dc_link;
1033 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 1136 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1137 enum dc_connection_type new_connection_type = dc_connection_none;
1034 1138
1035 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio 1139 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1036 * conflict, after implement i2c helper, this mutex should be 1140 * conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
1042 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && 1146 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1043 !is_mst_root_connector) { 1147 !is_mst_root_connector) {
1044 /* Downstream Port status changed. */ 1148 /* Downstream Port status changed. */
1045 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1149 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1150 DRM_ERROR("KMS: Failed to detect connector\n");
1151
1152 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1153 emulated_link_detect(dc_link);
1154
1155 if (aconnector->fake_enable)
1156 aconnector->fake_enable = false;
1157
1158 amdgpu_dm_update_connector_after_detect(aconnector);
1159
1160
1161 drm_modeset_lock_all(dev);
1162 dm_restore_drm_connector_state(dev, connector);
1163 drm_modeset_unlock_all(dev);
1164
1165 drm_kms_helper_hotplug_event(dev);
1166 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1046 1167
1047 if (aconnector->fake_enable) 1168 if (aconnector->fake_enable)
1048 aconnector->fake_enable = false; 1169 aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1433 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1554 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1434 uint32_t link_cnt; 1555 uint32_t link_cnt;
1435 int32_t total_overlay_planes, total_primary_planes; 1556 int32_t total_overlay_planes, total_primary_planes;
1557 enum dc_connection_type new_connection_type = dc_connection_none;
1436 1558
1437 link_cnt = dm->dc->caps.max_links; 1559 link_cnt = dm->dc->caps.max_links;
1438 if (amdgpu_dm_mode_config_init(dm->adev)) { 1560 if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1499 1621
1500 link = dc_get_link_at_index(dm->dc, i); 1622 link = dc_get_link_at_index(dm->dc, i);
1501 1623
1502 if (dc_link_detect(link, DETECT_REASON_BOOT)) { 1624 if (!dc_link_detect_sink(link, &new_connection_type))
1625 DRM_ERROR("KMS: Failed to detect connector\n");
1626
1627 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1628 emulated_link_detect(link);
1629 amdgpu_dm_update_connector_after_detect(aconnector);
1630
1631 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1503 amdgpu_dm_update_connector_after_detect(aconnector); 1632 amdgpu_dm_update_connector_after_detect(aconnector);
1504 register_backlight_device(dm, link); 1633 register_backlight_device(dm, link);
1505 } 1634 }
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2494 if (dm_state && dm_state->freesync_capable) 2623 if (dm_state && dm_state->freesync_capable)
2495 stream->ignore_msa_timing_param = true; 2624 stream->ignore_msa_timing_param = true;
2496finish: 2625finish:
2497 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) 2626 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2498 dc_sink_release(sink); 2627 dc_sink_release(sink);
2499 2628
2500 return stream; 2629 return stream;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index fbe878ae1e8c..4ba0003a9d32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
480{ 480{
481 struct dc_context *ctx = pp->ctx; 481 struct dc_context *ctx = pp->ctx;
482 struct amdgpu_device *adev = ctx->driver_context; 482 struct amdgpu_device *adev = ctx->driver_context;
483 void *pp_handle = adev->powerplay.pp_handle;
483 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 484 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
485 struct pp_display_clock_request clock = {0};
484 486
485 if (!pp_funcs || !pp_funcs->display_configuration_changed) 487 if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
486 return; 488 return;
487 489
488 amdgpu_dpm_display_configuration_changed(adev); 490 clock.clock_type = amd_pp_dcf_clock;
491 clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
492 pp_funcs->display_clock_voltage_request(pp_handle, &clock);
493
494 clock.clock_type = amd_pp_f_clock;
495 clock.clock_freq_in_khz = req->hard_min_fclk_khz;
496 pp_funcs->display_clock_voltage_request(pp_handle, &clock);
489} 497}
490 498
491void pp_rv_set_wm_ranges(struct pp_smu *pp, 499void pp_rv_set_wm_ranges(struct pp_smu *pp,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 567867915d32..fced3c1c2ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
195 return result; 195 return result;
196} 196}
197 197
198static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) 198bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
199{ 199{
200 uint32_t is_hpd_high = 0; 200 uint32_t is_hpd_high = 0;
201 struct gpio *hpd_pin; 201 struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
604 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) 604 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
605 return false; 605 return false;
606 606
607 if (false == detect_sink(link, &new_connection_type)) { 607 if (false == dc_link_detect_sink(link, &new_connection_type)) {
608 BREAK_TO_DEBUGGER(); 608 BREAK_TO_DEBUGGER();
609 return false; 609 return false;
610 } 610 }
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
754 * fail-safe mode 754 * fail-safe mode
755 */ 755 */
756 if (dc_is_hdmi_signal(link->connector_signal) || 756 if (dc_is_hdmi_signal(link->connector_signal) ||
757 dc_is_dvi_signal(link->connector_signal)) 757 dc_is_dvi_signal(link->connector_signal)) {
758 if (prev_sink != NULL)
759 dc_sink_release(prev_sink);
760
758 return false; 761 return false;
762 }
759 default: 763 default:
760 break; 764 break;
761 } 765 }
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d43cefbc43d3..1b48ab9aea89 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
215 215
216bool dc_link_is_dp_sink_present(struct dc_link *link); 216bool dc_link_is_dp_sink_present(struct dc_link *link);
217 217
218bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
218/* 219/*
219 * DPCD access interfaces 220 * DPCD access interfaces
220 */ 221 */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 14384d9675a8..b2f308766a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
2560 dc->prev_display_config = *pp_display_cfg; 2560 dc->prev_display_config = *pp_display_cfg;
2561} 2561}
2562 2562
2563void dce110_set_bandwidth( 2563static void dce110_set_bandwidth(
2564 struct dc *dc, 2564 struct dc *dc,
2565 struct dc_state *context, 2565 struct dc_state *context,
2566 bool decrease_allowed) 2566 bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db75c4c6..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
68 const struct dc_state *context, 68 const struct dc_state *context,
69 struct dm_pp_display_configuration *pp_display_cfg); 69 struct dm_pp_display_configuration *pp_display_cfg);
70 70
71void dce110_set_bandwidth(
72 struct dc *dc,
73 struct dc_state *context,
74 bool decrease_allowed);
75
76uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); 71uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
77 72
78void dp_receiver_power_ctrl(struct dc_link *link, bool on); 73void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522a6182..eb0f5f9a973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
244 dh_data->dchub_info_valid = false; 244 dh_data->dchub_info_valid = false;
245} 245}
246 246
247static void dce120_set_bandwidth(
248 struct dc *dc,
249 struct dc_state *context,
250 bool decrease_allowed)
251{
252 if (context->stream_count <= 0)
253 return;
254
255 dce110_set_bandwidth(dc, context, decrease_allowed);
256}
257
258void dce120_hw_sequencer_construct(struct dc *dc) 247void dce120_hw_sequencer_construct(struct dc *dc)
259{ 248{
260 /* All registers used by dce11.2 match those in dce11 in offset and 249 /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
263 dce110_hw_sequencer_construct(dc); 252 dce110_hw_sequencer_construct(dc);
264 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; 253 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
265 dc->hwss.update_dchub = dce120_update_dchub; 254 dc->hwss.update_dchub = dce120_update_dchub;
266 dc->hwss.set_bandwidth = dce120_set_bandwidth;
267} 255}
268 256
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 14391b06080c..43b82e14007e 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -292,7 +292,7 @@ struct tile_config {
292struct kfd2kgd_calls { 292struct kfd2kgd_calls {
293 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, 293 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
294 void **mem_obj, uint64_t *gpu_addr, 294 void **mem_obj, uint64_t *gpu_addr,
295 void **cpu_ptr); 295 void **cpu_ptr, bool mqd_gfx9);
296 296
297 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); 297 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
298 298
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 08b5bb219816..94d6dabec2dc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
754 drm->irq_enabled = true; 754 drm->irq_enabled = true;
755 755
756 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 756 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
757 drm_crtc_vblank_reset(&malidp->crtc);
757 if (ret < 0) { 758 if (ret < 0) {
758 DRM_ERROR("failed to initialise vblank\n"); 759 DRM_ERROR("failed to initialise vblank\n");
759 goto vblank_fail; 760 goto vblank_fail;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index c94a4422e0e9..2781e462c1ed 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
384 384
385static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, 385static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
386 dma_addr_t *addrs, s32 *pitches, 386 dma_addr_t *addrs, s32 *pitches,
387 int num_planes, u16 w, u16 h, u32 fmt_id) 387 int num_planes, u16 w, u16 h, u32 fmt_id,
388 const s16 *rgb2yuv_coeffs)
388{ 389{
389 u32 base = MALIDP500_SE_MEMWRITE_BASE; 390 u32 base = MALIDP500_SE_MEMWRITE_BASE;
390 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); 391 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
416 417
417 malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), 418 malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
418 MALIDP500_SE_MEMWRITE_OUT_SIZE); 419 MALIDP500_SE_MEMWRITE_OUT_SIZE);
420
421 if (rgb2yuv_coeffs) {
422 int i;
423
424 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
425 malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
426 MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
427 }
428 }
429
419 malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); 430 malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
420 431
421 return 0; 432 return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
658 669
659static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, 670static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
660 dma_addr_t *addrs, s32 *pitches, 671 dma_addr_t *addrs, s32 *pitches,
661 int num_planes, u16 w, u16 h, u32 fmt_id) 672 int num_planes, u16 w, u16 h, u32 fmt_id,
673 const s16 *rgb2yuv_coeffs)
662{ 674{
663 u32 base = MALIDP550_SE_MEMWRITE_BASE; 675 u32 base = MALIDP550_SE_MEMWRITE_BASE;
664 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); 676 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
689 malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, 701 malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
690 MALIDP550_SE_CONTROL); 702 MALIDP550_SE_CONTROL);
691 703
704 if (rgb2yuv_coeffs) {
705 int i;
706
707 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
708 malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
709 MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
710 }
711 }
712
692 return 0; 713 return 0;
693} 714}
694 715
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index ad2e96915d44..9fc94c08190f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -191,7 +191,8 @@ struct malidp_hw {
191 * @param fmt_id - internal format ID of output buffer 191 * @param fmt_id - internal format ID of output buffer
192 */ 192 */
193 int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, 193 int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
194 s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); 194 s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
195 const s16 *rgb2yuv_coeffs);
195 196
196 /* 197 /*
197 * Disable the writing to memory of the next frame's content. 198 * Disable the writing to memory of the next frame's content.
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index ba6ae66387c9..91472e5e0c8b 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
26 s32 pitches[2]; 26 s32 pitches[2];
27 u8 format; 27 u8 format;
28 u8 n_planes; 28 u8 n_planes;
29 bool rgb2yuv_initialized;
30 const s16 *rgb2yuv_coeffs;
29}; 31};
30 32
31static int malidp_mw_connector_get_modes(struct drm_connector *connector) 33static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
84static struct drm_connector_state * 86static struct drm_connector_state *
85malidp_mw_connector_duplicate_state(struct drm_connector *connector) 87malidp_mw_connector_duplicate_state(struct drm_connector *connector)
86{ 88{
87 struct malidp_mw_connector_state *mw_state; 89 struct malidp_mw_connector_state *mw_state, *mw_current_state;
88 90
89 if (WARN_ON(!connector->state)) 91 if (WARN_ON(!connector->state))
90 return NULL; 92 return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
93 if (!mw_state) 95 if (!mw_state)
94 return NULL; 96 return NULL;
95 97
96 /* No need to preserve any of our driver-local data */ 98 mw_current_state = to_mw_state(connector->state);
99 mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
100 mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
101
97 __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); 102 __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
98 103
99 return &mw_state->base; 104 return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
108 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 113 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
109}; 114};
110 115
116static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
117 47, 157, 16,
118 -26, -87, 112,
119 112, -102, -10,
120 16, 128, 128
121};
122
111static int 123static int
112malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, 124malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
113 struct drm_crtc_state *crtc_state, 125 struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
157 } 169 }
158 mw_state->n_planes = n_planes; 170 mw_state->n_planes = n_planes;
159 171
172 if (fb->format->is_yuv)
173 mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
174
160 return 0; 175 return 0;
161} 176}
162 177
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
239 254
240 drm_writeback_queue_job(mw_conn, conn_state->writeback_job); 255 drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
241 conn_state->writeback_job = NULL; 256 conn_state->writeback_job = NULL;
242
243 hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, 257 hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
244 mw_state->pitches, mw_state->n_planes, 258 mw_state->pitches, mw_state->n_planes,
245 fb->width, fb->height, mw_state->format); 259 fb->width, fb->height, mw_state->format,
260 !mw_state->rgb2yuv_initialized ?
261 mw_state->rgb2yuv_coeffs : NULL);
262 mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
246 } else { 263 } else {
247 DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); 264 DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
248 hwdev->hw->disable_memwrite(hwdev); 265 hwdev->hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 3579d36b2a71..6ffe849774f2 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -205,6 +205,7 @@
205#define MALIDP500_SE_BASE 0x00c00 205#define MALIDP500_SE_BASE 0x00c00
206#define MALIDP500_SE_CONTROL 0x00c0c 206#define MALIDP500_SE_CONTROL 0x00c0c
207#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c 207#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
208#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
208#define MALIDP500_SE_MEMWRITE_BASE 0x00e00 209#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
209#define MALIDP500_DC_IRQ_BASE 0x00f00 210#define MALIDP500_DC_IRQ_BASE 0x00f00
210#define MALIDP500_CONFIG_VALID 0x00f00 211#define MALIDP500_CONFIG_VALID 0x00f00
@@ -238,6 +239,7 @@
238#define MALIDP550_SE_CONTROL 0x08010 239#define MALIDP550_SE_CONTROL 0x08010
239#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) 240#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
240#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 241#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
242#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
241#define MALIDP550_SE_MEMWRITE_BASE 0x08100 243#define MALIDP550_SE_MEMWRITE_BASE 0x08100
242#define MALIDP550_DC_BASE 0x0c000 244#define MALIDP550_DC_BASE 0x0c000
243#define MALIDP550_DC_CONTROL 0x0c010 245#define MALIDP550_DC_CONTROL 0x0c010
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..018fcdb353d2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
2067 struct drm_connector *connector; 2067 struct drm_connector *connector;
2068 struct drm_connector_list_iter conn_iter; 2068 struct drm_connector_list_iter conn_iter;
2069 2069
2070 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2070 if (!drm_drv_uses_atomic_modeset(dev))
2071 return; 2071 return;
2072 2072
2073 list_for_each_entry(plane, &config->plane_list, head) { 2073 list_for_each_entry(plane, &config->plane_list, head) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6f28fe58f169..373bd4c2b698 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
151 return ret; 151 return ret;
152 } 152 }
153 153
154 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 154 if (drm_drv_uses_atomic_modeset(dev)) {
155 ret = drm_atomic_debugfs_init(minor); 155 ret = drm_atomic_debugfs_init(minor);
156 if (ret) { 156 if (ret) {
157 DRM_ERROR("Failed to create atomic debugfs files\n"); 157 DRM_ERROR("Failed to create atomic debugfs files\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..16ec93b75dbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2370{ 2370{
2371 int c, o; 2371 int c, o;
2372 struct drm_connector *connector; 2372 struct drm_connector *connector;
2373 const struct drm_connector_helper_funcs *connector_funcs;
2374 int my_score, best_score, score; 2373 int my_score, best_score, score;
2375 struct drm_fb_helper_crtc **crtcs, *crtc; 2374 struct drm_fb_helper_crtc **crtcs, *crtc;
2376 struct drm_fb_helper_connector *fb_helper_conn; 2375 struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2399 if (drm_has_preferred_mode(fb_helper_conn, width, height)) 2398 if (drm_has_preferred_mode(fb_helper_conn, width, height))
2400 my_score++; 2399 my_score++;
2401 2400
2402 connector_funcs = connector->helper_private;
2403
2404 /* 2401 /*
2405 * select a crtc for this connector and then attempt to configure 2402 * select a crtc for this connector and then attempt to configure
2406 * remaining connectors 2403 * remaining connectors
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..1d9a9d2fe0e0 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,7 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include <drm/drm_device.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include <drm/drm_panel.h> 28#include <drm/drm_panel.h>
30 29
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
105 if (panel->connector) 104 if (panel->connector)
106 return -EBUSY; 105 return -EBUSY;
107 106
108 panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
109 if (!panel->link) {
110 dev_err(panel->dev, "failed to link panel to %s\n",
111 dev_name(connector->dev->dev));
112 return -EINVAL;
113 }
114
115 panel->connector = connector; 107 panel->connector = connector;
116 panel->drm = connector->dev; 108 panel->drm = connector->dev;
117 109
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
133 */ 125 */
134int drm_panel_detach(struct drm_panel *panel) 126int drm_panel_detach(struct drm_panel *panel)
135{ 127{
136 device_link_del(panel->link);
137
138 panel->connector = NULL; 128 panel->connector = NULL;
139 panel->drm = NULL; 129 panel->drm = NULL;
140 130
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..759278fef35a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
97{ 97{
98 int ret; 98 int ret;
99 99
100 WARN_ON(*fence);
101
100 *fence = drm_syncobj_fence_get(syncobj); 102 *fence = drm_syncobj_fence_get(syncobj);
101 if (*fence) 103 if (*fence)
102 return 1; 104 return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
743 745
744 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 746 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
745 for (i = 0; i < count; ++i) { 747 for (i = 0; i < count; ++i) {
748 if (entries[i].fence)
749 continue;
750
746 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 751 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
747 &entries[i].fence, 752 &entries[i].fence,
748 &entries[i].syncobj_cb, 753 &entries[i].syncobj_cb,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 9b2720b41571..83c1f46670bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
592 struct device *dev = &pdev->dev; 592 struct device *dev = &pdev->dev;
593 struct component_match *match = NULL; 593 struct component_match *match = NULL;
594 594
595 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
596
597 if (!dev->platform_data) { 595 if (!dev->platform_data) {
598 struct device_node *core_node; 596 struct device_node *core_node;
599 597
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
655 for_each_compatible_node(np, NULL, "vivante,gc") { 653 for_each_compatible_node(np, NULL, "vivante,gc") {
656 if (!of_device_is_available(np)) 654 if (!of_device_is_available(np))
657 continue; 655 continue;
658 pdev = platform_device_register_simple("etnaviv", -1, 656
659 NULL, 0); 657 pdev = platform_device_alloc("etnaviv", -1);
660 if (IS_ERR(pdev)) { 658 if (!pdev) {
661 ret = PTR_ERR(pdev); 659 ret = -ENOMEM;
660 of_node_put(np);
661 goto unregister_platform_driver;
662 }
663 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
664 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
665
666 /*
667 * Apply the same DMA configuration to the virtual etnaviv
668 * device as the GPU we found. This assumes that all Vivante
669 * GPUs in the system share the same DMA constraints.
670 */
671 of_dma_configure(&pdev->dev, np, true);
672
673 ret = platform_device_add(pdev);
674 if (ret) {
675 platform_device_put(pdev);
662 of_node_put(np); 676 of_node_put(np);
663 goto unregister_platform_driver; 677 goto unregister_platform_driver;
664 } 678 }
679
665 etnaviv_drm = pdev; 680 etnaviv_drm = pdev;
666 of_node_put(np); 681 of_node_put(np);
667 break; 682 break;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 87f6b5672e11..797d9ee5f15a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
55static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv, 55static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
56 unsigned long start, unsigned long size) 56 unsigned long start, unsigned long size)
57{ 57{
58 struct iommu_domain *domain; 58 priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
59 int ret;
60
61 domain = iommu_domain_alloc(priv->dma_dev->bus);
62 if (!domain)
63 return -ENOMEM;
64
65 ret = iommu_get_dma_cookie(domain);
66 if (ret)
67 goto free_domain;
68
69 ret = iommu_dma_init_domain(domain, start, size, NULL);
70 if (ret)
71 goto put_cookie;
72
73 priv->mapping = domain;
74 return 0; 59 return 0;
75
76put_cookie:
77 iommu_put_dma_cookie(domain);
78free_domain:
79 iommu_domain_free(domain);
80 return ret;
81} 60}
82 61
83static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv) 62static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
84{ 63{
85 struct iommu_domain *domain = priv->mapping;
86
87 iommu_put_dma_cookie(domain);
88 iommu_domain_free(domain);
89 priv->mapping = NULL; 64 priv->mapping = NULL;
90} 65}
91 66
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
94{ 69{
95 struct iommu_domain *domain = priv->mapping; 70 struct iommu_domain *domain = priv->mapping;
96 71
97 return iommu_attach_device(domain, dev); 72 if (dev != priv->dma_dev)
73 return iommu_attach_device(domain, dev);
74 return 0;
98} 75}
99 76
100static inline void __exynos_iommu_detach(struct exynos_drm_private *priv, 77static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
102{ 79{
103 struct iommu_domain *domain = priv->mapping; 80 struct iommu_domain *domain = priv->mapping;
104 81
105 iommu_detach_device(domain, dev); 82 if (dev != priv->dma_dev)
83 iommu_detach_device(domain, dev);
106} 84}
107#else 85#else
108#error Unsupported architecture and IOMMU/DMA-mapping glue code 86#error Unsupported architecture and IOMMU/DMA-mapping glue code
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5d2f0d548469..250b5e02a314 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
191 break; 191 break;
192 } 192 }
193 /* TDA9950 executes all retries for us */ 193 /* TDA9950 executes all retries for us */
194 tx_status |= CEC_TX_STATUS_MAX_RETRIES; 194 if (tx_status != CEC_TX_STATUS_OK)
195 tx_status |= CEC_TX_STATUS_MAX_RETRIES;
195 cec_transmit_done(priv->adap, tx_status, arb_lost_cnt, 196 cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
196 nack_cnt, 0, err_cnt); 197 nack_cnt, 0, err_cnt);
197 break; 198 break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
310 /* Wait up to .5s for it to signal non-busy */ 311 /* Wait up to .5s for it to signal non-busy */
311 do { 312 do {
312 csr = tda9950_read(client, REG_CSR); 313 csr = tda9950_read(client, REG_CSR);
313 if (!(csr & CSR_BUSY) || --timeout) 314 if (!(csr & CSR_BUSY) || !--timeout)
314 break; 315 break;
315 msleep(10); 316 msleep(10);
316 } while (1); 317 } while (1);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 6e3f56684f4e..51ed99a37803 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
170 unsigned int tiling_mode = 0; 170 unsigned int tiling_mode = 0;
171 unsigned int stride = 0; 171 unsigned int stride = 0;
172 172
173 switch (info->drm_format_mod << 10) { 173 switch (info->drm_format_mod) {
174 case PLANE_CTL_TILED_LINEAR: 174 case DRM_FORMAT_MOD_LINEAR:
175 tiling_mode = I915_TILING_NONE; 175 tiling_mode = I915_TILING_NONE;
176 break; 176 break;
177 case PLANE_CTL_TILED_X: 177 case I915_FORMAT_MOD_X_TILED:
178 tiling_mode = I915_TILING_X; 178 tiling_mode = I915_TILING_X;
179 stride = info->stride; 179 stride = info->stride;
180 break; 180 break;
181 case PLANE_CTL_TILED_Y: 181 case I915_FORMAT_MOD_Y_TILED:
182 case I915_FORMAT_MOD_Yf_TILED:
182 tiling_mode = I915_TILING_Y; 183 tiling_mode = I915_TILING_Y;
183 stride = info->stride; 184 stride = info->stride;
184 break; 185 break;
185 default: 186 default:
186 gvt_dbg_core("not supported tiling mode\n"); 187 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
188 info->drm_format_mod);
187 } 189 }
188 obj->tiling_and_stride = tiling_mode | stride; 190 obj->tiling_and_stride = tiling_mode | stride;
189 } else { 191 } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
222 info->height = p.height; 224 info->height = p.height;
223 info->stride = p.stride; 225 info->stride = p.stride;
224 info->drm_format = p.drm_format; 226 info->drm_format = p.drm_format;
225 info->drm_format_mod = p.tiled; 227
228 switch (p.tiled) {
229 case PLANE_CTL_TILED_LINEAR:
230 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
231 break;
232 case PLANE_CTL_TILED_X:
233 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
234 break;
235 case PLANE_CTL_TILED_Y:
236 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
237 break;
238 case PLANE_CTL_TILED_YF:
239 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
240 break;
241 default:
242 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
243 }
244
226 info->size = (((p.stride * p.height * p.bpp) / 8) + 245 info->size = (((p.stride * p.height * p.bpp) / 8) +
227 (PAGE_SIZE - 1)) >> PAGE_SHIFT; 246 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
228 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 247 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
229 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 248 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
230 if (ret) 249 if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index face664be3e8..481896fb712a 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
220 if (IS_SKYLAKE(dev_priv) 220 if (IS_SKYLAKE(dev_priv)
221 || IS_KABYLAKE(dev_priv) 221 || IS_KABYLAKE(dev_priv)
222 || IS_BROXTON(dev_priv)) { 222 || IS_BROXTON(dev_priv)) {
223 plane->tiled = (val & PLANE_CTL_TILED_MASK) >> 223 plane->tiled = val & PLANE_CTL_TILED_MASK;
224 _PLANE_CTL_TILED_SHIFT;
225 fmt = skl_format_to_drm( 224 fmt = skl_format_to_drm(
226 val & PLANE_CTL_FORMAT_MASK, 225 val & PLANE_CTL_FORMAT_MASK,
227 val & PLANE_CTL_ORDER_RGBX, 226 val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
260 return -EINVAL; 259 return -EINVAL;
261 } 260 }
262 261
263 plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), 262 plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
264 (IS_SKYLAKE(dev_priv) 263 (IS_SKYLAKE(dev_priv)
265 || IS_KABYLAKE(dev_priv) 264 || IS_KABYLAKE(dev_priv)
266 || IS_BROXTON(dev_priv)) ? 265 || IS_BROXTON(dev_priv)) ?
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index cb055f3c81a2..60c155085029 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -101,7 +101,7 @@ struct intel_gvt;
101/* color space conversion and gamma correction are not included */ 101/* color space conversion and gamma correction are not included */
102struct intel_vgpu_primary_plane_format { 102struct intel_vgpu_primary_plane_format {
103 u8 enabled; /* plane is enabled */ 103 u8 enabled; /* plane is enabled */
104 u8 tiled; /* X-tiled */ 104 u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */
105 u8 bpp; /* bits per pixel */ 105 u8 bpp; /* bits per pixel */
106 u32 hw_format; /* format field in the PRI_CTL register */ 106 u32 hw_format; /* format field in the PRI_CTL register */
107 u32 drm_format; /* format in DRM definition */ 107 u32 drm_format; /* format in DRM definition */
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 7a58ca555197..94c1089ecf59 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
1296 return 0; 1296 return 0;
1297} 1297}
1298 1298
1299static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
1300 unsigned int offset, void *p_data, unsigned int bytes)
1301{
1302 write_vreg(vgpu, offset, p_data, bytes);
1303
1304 if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
1305 vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
1306 else
1307 vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
1308
1309 return 0;
1310}
1311
1299static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, 1312static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
1300 unsigned int offset, void *p_data, unsigned int bytes) 1313 unsigned int offset, void *p_data, unsigned int bytes)
1301{ 1314{
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
1525 u32 v = *(u32 *)p_data; 1538 u32 v = *(u32 *)p_data;
1526 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; 1539 u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
1527 1540
1528 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; 1541 switch (offset) {
1529 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; 1542 case _PHY_CTL_FAMILY_EDP:
1530 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; 1543 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
1544 break;
1545 case _PHY_CTL_FAMILY_DDI:
1546 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
1547 vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
1548 break;
1549 }
1531 1550
1532 vgpu_vreg(vgpu, offset) = v; 1551 vgpu_vreg(vgpu, offset) = v;
1533 1552
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2812 MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, 2831 MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
2813 skl_power_well_ctl_write); 2832 skl_power_well_ctl_write);
2814 2833
2834 MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
2835
2815 MMIO_D(_MMIO(0xa210), D_SKL_PLUS); 2836 MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
2816 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2837 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
2817 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2838 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2987 NULL, gen9_trtte_write); 3008 NULL, gen9_trtte_write);
2988 MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); 3009 MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
2989 3010
2990 MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
2991
2992 MMIO_D(_MMIO(0x46430), D_SKL_PLUS); 3011 MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
2993 3012
2994 MMIO_D(_MMIO(0x46520), D_SKL_PLUS); 3013 MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
3025 MMIO_D(_MMIO(0x44500), D_SKL_PLUS); 3044 MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
3026 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 3045 MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
3027 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, 3046 MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3028 NULL, NULL); 3047 NULL, NULL);
3048 MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
3049 NULL, NULL);
3029 3050
3030 MMIO_D(_MMIO(0x4ab8), D_KBL); 3051 MMIO_D(_MMIO(0x4ab8), D_KBL);
3031 MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); 3052 MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
@@ -3189,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
3189 MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); 3210 MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
3190 3211
3191 MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); 3212 MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
3213 MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
3192 3214
3193 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); 3215 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
3194 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); 3216 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..9ad89e38f6c0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -32,6 +32,7 @@
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/mmu_context.h> 34#include <linux/mmu_context.h>
35#include <linux/sched/mm.h>
35#include <linux/types.h> 36#include <linux/types.h>
36#include <linux/list.h> 37#include <linux/list.h>
37#include <linux/rbtree.h> 38#include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1792 info = (struct kvmgt_guest_info *)handle; 1793 info = (struct kvmgt_guest_info *)handle;
1793 kvm = info->kvm; 1794 kvm = info->kvm;
1794 1795
1795 if (kthread) 1796 if (kthread) {
1797 if (!mmget_not_zero(kvm->mm))
1798 return -EFAULT;
1796 use_mm(kvm->mm); 1799 use_mm(kvm->mm);
1800 }
1797 1801
1798 idx = srcu_read_lock(&kvm->srcu); 1802 idx = srcu_read_lock(&kvm->srcu);
1799 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1803 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1800 kvm_read_guest(kvm, gpa, buf, len); 1804 kvm_read_guest(kvm, gpa, buf, len);
1801 srcu_read_unlock(&kvm->srcu, idx); 1805 srcu_read_unlock(&kvm->srcu, idx);
1802 1806
1803 if (kthread) 1807 if (kthread) {
1804 unuse_mm(kvm->mm); 1808 unuse_mm(kvm->mm);
1809 mmput(kvm->mm);
1810 }
1805 1811
1806 return ret; 1812 return ret;
1807} 1813}
@@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1827{ 1833{
1828 struct kvmgt_guest_info *info; 1834 struct kvmgt_guest_info *info;
1829 struct kvm *kvm; 1835 struct kvm *kvm;
1836 int idx;
1837 bool ret;
1830 1838
1831 if (!handle_valid(handle)) 1839 if (!handle_valid(handle))
1832 return false; 1840 return false;
@@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1834 info = (struct kvmgt_guest_info *)handle; 1842 info = (struct kvmgt_guest_info *)handle;
1835 kvm = info->kvm; 1843 kvm = info->kvm;
1836 1844
1837 return kvm_is_visible_gfn(kvm, gfn); 1845 idx = srcu_read_lock(&kvm->srcu);
1846 ret = kvm_is_visible_gfn(kvm, gfn);
1847 srcu_read_unlock(&kvm->srcu, idx);
1838 1848
1849 return ret;
1839} 1850}
1840 1851
1841struct intel_gvt_mpt kvmgt_mpt = { 1852struct intel_gvt_mpt kvmgt_mpt = {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..9bb9a85c992c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
244 244
245 /* set the bit 0:2(Core C-State ) to C0 */ 245 /* set the bit 0:2(Core C-State ) to C0 */
246 vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; 246 vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
247
248 if (IS_BROXTON(vgpu->gvt->dev_priv)) {
249 vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
250 ~(BIT(0) | BIT(1));
251 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
252 ~PHY_POWER_GOOD;
253 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
254 ~PHY_POWER_GOOD;
255 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
256 ~BIT(30);
257 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
258 ~BIT(30);
259 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
260 ~BXT_PHY_LANE_ENABLED;
261 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
262 BXT_PHY_CMNLANE_POWERDOWN_ACK |
263 BXT_PHY_LANE_POWERDOWN_ACK;
264 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
265 ~BXT_PHY_LANE_ENABLED;
266 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
267 BXT_PHY_CMNLANE_POWERDOWN_ACK |
268 BXT_PHY_LANE_POWERDOWN_ACK;
269 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
270 ~BXT_PHY_LANE_ENABLED;
271 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
272 BXT_PHY_CMNLANE_POWERDOWN_ACK |
273 BXT_PHY_LANE_POWERDOWN_ACK;
274 }
247 } else { 275 } else {
248#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) 276#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
249 /* only reset the engine related, so starting with 0x44200 277 /* only reset the engine related, so starting with 0x44200
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 42e1e6bdcc2c..e872f4847fbe 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
562 * performace for batch mmio read/write, so we need 562 * performace for batch mmio read/write, so we need
563 * handle forcewake mannually. 563 * handle forcewake mannually.
564 */ 564 */
565 intel_runtime_pm_get(dev_priv);
566 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
567 switch_mmio(pre, next, ring_id); 566 switch_mmio(pre, next, ring_id);
568 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 567 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
569 intel_runtime_pm_put(dev_priv);
570} 568}
571 569
572/** 570/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..b0d3a43ccd03 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -42,8 +42,6 @@
42#define DEVICE_TYPE_EFP3 0x20 42#define DEVICE_TYPE_EFP3 0x20
43#define DEVICE_TYPE_EFP4 0x10 43#define DEVICE_TYPE_EFP4 0x10
44 44
45#define DEV_SIZE 38
46
47struct opregion_header { 45struct opregion_header {
48 u8 signature[16]; 46 u8 signature[16];
49 u32 size; 47 u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
63 u16 size; /* data size */ 61 u16 size; /* data size */
64} __packed; 62} __packed;
65 63
64/* For supporting windows guest with opregion, here hardcode the emulated
65 * bdb header version as '186', and the corresponding child_device_config
66 * length should be '33' but not '38'.
67 */
66struct efp_child_device_config { 68struct efp_child_device_config {
67 u16 handle; 69 u16 handle;
68 u16 device_type; 70 u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
109 u8 mipi_bridge_type; /* 171 */ 111 u8 mipi_bridge_type; /* 171 */
110 u16 device_class_ext; 112 u16 device_class_ext;
111 u8 dvo_function; 113 u8 dvo_function;
112 u8 dp_usb_type_c:1; /* 195 */
113 u8 skip6:7;
114 u8 dp_usb_type_c_2x_gpio_index; /* 195 */
115 u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
116 u8 iboost_dp:4; /* 196 */
117 u8 iboost_hdmi:4; /* 196 */
118} __packed; 114} __packed;
119 115
120struct vbt { 116struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
155 v->header.bdb_offset = offsetof(struct vbt, bdb_header); 151 v->header.bdb_offset = offsetof(struct vbt, bdb_header);
156 152
157 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); 153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
158 v->bdb_header.version = 186; /* child_dev_size = 38 */ 154 v->bdb_header.version = 186; /* child_dev_size = 33 */
159 v->bdb_header.header_size = sizeof(v->bdb_header); 155 v->bdb_header.header_size = sizeof(v->bdb_header);
160 156
161 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) 157 v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
169 165
170 /* child device */ 166 /* child device */
171 num_child = 4; /* each port has one child */ 167 num_child = 4; /* each port has one child */
168 v->general_definitions.child_dev_size =
169 sizeof(struct efp_child_device_config);
172 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; 170 v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
173 /* size will include child devices */ 171 /* size will include child devices */
174 v->general_definitions_header.size = 172 v->general_definitions_header.size =
175 sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; 173 sizeof(struct bdb_general_definitions) +
176 v->general_definitions.child_dev_size = DEV_SIZE; 174 num_child * v->general_definitions.child_dev_size;
177 175
178 /* portA */ 176 /* portA */
179 v->child0.handle = DEVICE_TYPE_EFP1; 177 v->child0.handle = DEVICE_TYPE_EFP1;
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 09d7bb72b4ff..c32e7d5e8629 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
47 return false; 47 return false;
48} 48}
49 49
50/* We give 2 seconds higher prio for vGPU during start */
51#define GVT_SCHED_VGPU_PRI_TIME 2
52
50struct vgpu_sched_data { 53struct vgpu_sched_data {
51 struct list_head lru_list; 54 struct list_head lru_list;
52 struct intel_vgpu *vgpu; 55 struct intel_vgpu *vgpu;
53 bool active; 56 bool active;
54 57 bool pri_sched;
58 ktime_t pri_time;
55 ktime_t sched_in_time; 59 ktime_t sched_in_time;
56 ktime_t sched_time; 60 ktime_t sched_time;
57 ktime_t left_ts; 61 ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
183 if (!vgpu_has_pending_workload(vgpu_data->vgpu)) 187 if (!vgpu_has_pending_workload(vgpu_data->vgpu))
184 continue; 188 continue;
185 189
190 if (vgpu_data->pri_sched) {
191 if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
192 vgpu = vgpu_data->vgpu;
193 break;
194 } else
195 vgpu_data->pri_sched = false;
196 }
197
186 /* Return the vGPU only if it has time slice left */ 198 /* Return the vGPU only if it has time slice left */
187 if (vgpu_data->left_ts > 0) { 199 if (vgpu_data->left_ts > 0) {
188 vgpu = vgpu_data->vgpu; 200 vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 214 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
203 struct vgpu_sched_data *vgpu_data; 215 struct vgpu_sched_data *vgpu_data;
204 struct intel_vgpu *vgpu = NULL; 216 struct intel_vgpu *vgpu = NULL;
217
205 /* no active vgpu or has already had a target */ 218 /* no active vgpu or has already had a target */
206 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) 219 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
207 goto out; 220 goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
209 vgpu = find_busy_vgpu(sched_data); 222 vgpu = find_busy_vgpu(sched_data);
210 if (vgpu) { 223 if (vgpu) {
211 scheduler->next_vgpu = vgpu; 224 scheduler->next_vgpu = vgpu;
212
213 /* Move the last used vGPU to the tail of lru_list */
214 vgpu_data = vgpu->sched_data; 225 vgpu_data = vgpu->sched_data;
215 list_del_init(&vgpu_data->lru_list); 226 if (!vgpu_data->pri_sched) {
216 list_add_tail(&vgpu_data->lru_list, 227 /* Move the last used vGPU to the tail of lru_list */
217 &sched_data->lru_runq_head); 228 list_del_init(&vgpu_data->lru_list);
229 list_add_tail(&vgpu_data->lru_list,
230 &sched_data->lru_runq_head);
231 }
218 } else { 232 } else {
219 scheduler->next_vgpu = gvt->idle_vgpu; 233 scheduler->next_vgpu = gvt->idle_vgpu;
220 } 234 }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
328{ 342{
329 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; 343 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
330 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 344 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
345 ktime_t now;
331 346
332 if (!list_empty(&vgpu_data->lru_list)) 347 if (!list_empty(&vgpu_data->lru_list))
333 return; 348 return;
334 349
335 list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); 350 now = ktime_get();
351 vgpu_data->pri_time = ktime_add(now,
352 ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
353 vgpu_data->pri_sched = true;
354
355 list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
336 356
337 if (!hrtimer_active(&sched_data->timer)) 357 if (!hrtimer_active(&sched_data->timer))
338 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), 358 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
426 &vgpu->gvt->scheduler; 446 &vgpu->gvt->scheduler;
427 int ring_id; 447 int ring_id;
428 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 448 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
449 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
429 450
430 if (!vgpu_data->active) 451 if (!vgpu_data->active)
431 return; 452 return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
444 scheduler->current_vgpu = NULL; 465 scheduler->current_vgpu = NULL;
445 } 466 }
446 467
468 intel_runtime_pm_get(dev_priv);
447 spin_lock_bh(&scheduler->mmio_context_lock); 469 spin_lock_bh(&scheduler->mmio_context_lock);
448 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 470 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
449 if (scheduler->engine_owner[ring_id] == vgpu) { 471 if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
452 } 474 }
453 } 475 }
454 spin_unlock_bh(&scheduler->mmio_context_lock); 476 spin_unlock_bh(&scheduler->mmio_context_lock);
477 intel_runtime_pm_put(dev_priv);
455 mutex_unlock(&vgpu->gvt->sched_lock); 478 mutex_unlock(&vgpu->gvt->sched_lock);
456} 479}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a4e8e3cf74fd..c628be05fbfe 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
281 intel_vgpu_clean_submission(vgpu); 281 intel_vgpu_clean_submission(vgpu);
282 intel_vgpu_clean_display(vgpu); 282 intel_vgpu_clean_display(vgpu);
283 intel_vgpu_clean_opregion(vgpu); 283 intel_vgpu_clean_opregion(vgpu);
284 intel_vgpu_reset_ggtt(vgpu, true);
284 intel_vgpu_clean_gtt(vgpu); 285 intel_vgpu_clean_gtt(vgpu);
285 intel_gvt_hypervisor_detach_vgpu(vgpu); 286 intel_gvt_hypervisor_detach_vgpu(vgpu);
286 intel_vgpu_free_resource(vgpu); 287 intel_vgpu_free_resource(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..a262a64f5625 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
232 return true; 232 return true;
233} 233}
234 234
235static void *compress_next_page(struct drm_i915_error_object *dst)
236{
237 unsigned long page;
238
239 if (dst->page_count >= dst->num_pages)
240 return ERR_PTR(-ENOSPC);
241
242 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
243 if (!page)
244 return ERR_PTR(-ENOMEM);
245
246 return dst->pages[dst->page_count++] = (void *)page;
247}
248
235static int compress_page(struct compress *c, 249static int compress_page(struct compress *c,
236 void *src, 250 void *src,
237 struct drm_i915_error_object *dst) 251 struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
245 259
246 do { 260 do {
247 if (zstream->avail_out == 0) { 261 if (zstream->avail_out == 0) {
248 unsigned long page; 262 zstream->next_out = compress_next_page(dst);
249 263 if (IS_ERR(zstream->next_out))
250 page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); 264 return PTR_ERR(zstream->next_out);
251 if (!page)
252 return -ENOMEM;
253 265
254 dst->pages[dst->page_count++] = (void *)page;
255
256 zstream->next_out = (void *)page;
257 zstream->avail_out = PAGE_SIZE; 266 zstream->avail_out = PAGE_SIZE;
258 } 267 }
259 268
260 if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK) 269 if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
261 return -EIO; 270 return -EIO;
262 } while (zstream->avail_in); 271 } while (zstream->avail_in);
263 272
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
268 return 0; 277 return 0;
269} 278}
270 279
271static void compress_fini(struct compress *c, 280static int compress_flush(struct compress *c,
272 struct drm_i915_error_object *dst) 281 struct drm_i915_error_object *dst)
273{ 282{
274 struct z_stream_s *zstream = &c->zstream; 283 struct z_stream_s *zstream = &c->zstream;
275 284
276 if (dst) { 285 do {
277 zlib_deflate(zstream, Z_FINISH); 286 switch (zlib_deflate(zstream, Z_FINISH)) {
278 dst->unused = zstream->avail_out; 287 case Z_OK: /* more space requested */
279 } 288 zstream->next_out = compress_next_page(dst);
289 if (IS_ERR(zstream->next_out))
290 return PTR_ERR(zstream->next_out);
291
292 zstream->avail_out = PAGE_SIZE;
293 break;
294
295 case Z_STREAM_END:
296 goto end;
297
298 default: /* any error */
299 return -EIO;
300 }
301 } while (1);
302
303end:
304 memset(zstream->next_out, 0, zstream->avail_out);
305 dst->unused = zstream->avail_out;
306 return 0;
307}
308
309static void compress_fini(struct compress *c,
310 struct drm_i915_error_object *dst)
311{
312 struct z_stream_s *zstream = &c->zstream;
280 313
281 zlib_deflateEnd(zstream); 314 zlib_deflateEnd(zstream);
282 kfree(zstream->workspace); 315 kfree(zstream->workspace);
283
284 if (c->tmp) 316 if (c->tmp)
285 free_page((unsigned long)c->tmp); 317 free_page((unsigned long)c->tmp);
286} 318}
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
319 return 0; 351 return 0;
320} 352}
321 353
354static int compress_flush(struct compress *c,
355 struct drm_i915_error_object *dst)
356{
357 return 0;
358}
359
322static void compress_fini(struct compress *c, 360static void compress_fini(struct compress *c,
323 struct drm_i915_error_object *dst) 361 struct drm_i915_error_object *dst)
324{ 362{
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
917 unsigned long num_pages; 955 unsigned long num_pages;
918 struct sgt_iter iter; 956 struct sgt_iter iter;
919 dma_addr_t dma; 957 dma_addr_t dma;
958 int ret;
920 959
921 if (!vma) 960 if (!vma)
922 return NULL; 961 return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
930 969
931 dst->gtt_offset = vma->node.start; 970 dst->gtt_offset = vma->node.start;
932 dst->gtt_size = vma->node.size; 971 dst->gtt_size = vma->node.size;
972 dst->num_pages = num_pages;
933 dst->page_count = 0; 973 dst->page_count = 0;
934 dst->unused = 0; 974 dst->unused = 0;
935 975
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
938 return NULL; 978 return NULL;
939 } 979 }
940 980
981 ret = -EINVAL;
941 for_each_sgt_dma(dma, iter, vma->pages) { 982 for_each_sgt_dma(dma, iter, vma->pages) {
942 void __iomem *s; 983 void __iomem *s;
943 int ret;
944 984
945 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); 985 ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
946 986
947 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); 987 s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
948 ret = compress_page(&compress, (void __force *)s, dst); 988 ret = compress_page(&compress, (void __force *)s, dst);
949 io_mapping_unmap_atomic(s); 989 io_mapping_unmap_atomic(s);
950
951 if (ret) 990 if (ret)
952 goto unwind; 991 break;
953 } 992 }
954 goto out;
955 993
956unwind: 994 if (ret || compress_flush(&compress, dst)) {
957 while (dst->page_count--) 995 while (dst->page_count--)
958 free_page((unsigned long)dst->pages[dst->page_count]); 996 free_page((unsigned long)dst->pages[dst->page_count]);
959 kfree(dst); 997 kfree(dst);
960 dst = NULL; 998 dst = NULL;
999 }
961 1000
962out:
963 compress_fini(&compress, dst); 1001 compress_fini(&compress, dst);
964 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); 1002 ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
965 return dst; 1003 return dst;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index f893a4e8b783..8710fb18ed74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -135,6 +135,7 @@ struct i915_gpu_state {
135 struct drm_i915_error_object { 135 struct drm_i915_error_object {
136 u64 gtt_offset; 136 u64 gtt_offset;
137 u64 gtt_size; 137 u64 gtt_size;
138 int num_pages;
138 int page_count; 139 int page_count;
139 int unused; 140 int unused;
140 u32 *pages[0]; 141 u32 *pages[0];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90628a47ae17..29877969310d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
3091 spin_unlock(&i915->irq_lock); 3091 spin_unlock(&i915->irq_lock);
3092} 3092}
3093 3093
3094static void 3094static u32
3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, 3095gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
3096 u32 *iir)
3097{ 3096{
3098 void __iomem * const regs = dev_priv->regs; 3097 void __iomem * const regs = dev_priv->regs;
3098 u32 iir;
3099 3099
3100 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3100 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3101 return; 3101 return 0;
3102
3103 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
3104 if (likely(iir))
3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
3102 3106
3103 *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3107 return iir;
3104 if (likely(*iir))
3105 raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
3106} 3108}
3107 3109
3108static void 3110static void
3109gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, 3111gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
3110 const u32 master_ctl, const u32 iir)
3111{ 3112{
3112 if (!(master_ctl & GEN11_GU_MISC_IRQ))
3113 return;
3114
3115 if (unlikely(!iir)) {
3116 DRM_ERROR("GU_MISC iir blank!\n");
3117 return;
3118 }
3119
3120 if (iir & GEN11_GU_MISC_GSE) 3113 if (iir & GEN11_GU_MISC_GSE)
3121 intel_opregion_asle_intr(dev_priv); 3114 intel_opregion_asle_intr(dev_priv);
3122 else
3123 DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
3124} 3115}
3125 3116
3126static irqreturn_t gen11_irq_handler(int irq, void *arg) 3117static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
3157 enable_rpm_wakeref_asserts(i915); 3148 enable_rpm_wakeref_asserts(i915);
3158 } 3149 }
3159 3150
3160 gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); 3151 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
3161 3152
3162 /* Acknowledge and enable interrupts. */ 3153 /* Acknowledge and enable interrupts. */
3163 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3154 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
3164 3155
3165 gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); 3156 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
3166 3157
3167 return IRQ_HANDLED; 3158 return IRQ_HANDLED;
3168} 3159}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6a4d1388ad2d..1df3ce134cd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
592 GEN10_FEATURES, \ 592 GEN10_FEATURES, \
593 GEN(11), \ 593 GEN(11), \
594 .ddb_size = 2048, \ 594 .ddb_size = 2048, \
595 .has_csr = 0, \
596 .has_logical_ring_elsq = 1 595 .has_logical_ring_elsq = 1
597 596
598static const struct intel_device_info intel_icelake_11_info = { 597static const struct intel_device_info intel_icelake_11_info = {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 08ec7446282e..9e63cd47b60f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
10422 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10422 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
10423 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) 10423 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
10424#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10424#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10425 _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 10425 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
10426 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) 10426 _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
10427#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) 10427#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
10428#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) 10428#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
10437 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ 10437 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
10438 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) 10438 _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
10439#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 10439#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
10440 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ 10440 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
10441 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) 10441 _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
10442#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) 10442#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
10443#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) 10443#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 11d834f94220..98358b4b36de 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
199 vma->flags |= I915_VMA_GGTT; 199 vma->flags |= I915_VMA_GGTT;
200 list_add(&vma->obj_link, &obj->vma_list); 200 list_add(&vma->obj_link, &obj->vma_list);
201 } else { 201 } else {
202 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
203 list_add_tail(&vma->obj_link, &obj->vma_list); 202 list_add_tail(&vma->obj_link, &obj->vma_list);
204 } 203 }
205 204
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
807 if (vma->obj) 806 if (vma->obj)
808 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 807 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
809 808
810 if (!i915_vma_is_ggtt(vma))
811 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
812
813 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { 809 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
814 GEM_BUG_ON(i915_gem_active_isset(&iter->base)); 810 GEM_BUG_ON(i915_gem_active_isset(&iter->base));
815 kfree(iter); 811 kfree(iter);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b725835b47ef..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
962{ 962{
963 int ret; 963 int ret;
964 964
965 if (INTEL_INFO(dev_priv)->num_pipes == 0)
966 return;
967
968 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); 965 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
969 if (ret < 0) { 966 if (ret < 0) {
970 DRM_ERROR("failed to add audio component (%d)\n", ret); 967 DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 8761513f3532..c9af34861d9e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2708 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
2709 intel_dp_stop_link_train(intel_dp); 2709 intel_dp_stop_link_train(intel_dp);
2710 2710
2711 intel_ddi_enable_pipe_clock(crtc_state); 2711 if (!is_mst)
2712 intel_ddi_enable_pipe_clock(crtc_state);
2712} 2713}
2713 2714
2714static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, 2715static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
2810 bool is_mst = intel_crtc_has_type(old_crtc_state, 2811 bool is_mst = intel_crtc_has_type(old_crtc_state,
2811 INTEL_OUTPUT_DP_MST); 2812 INTEL_OUTPUT_DP_MST);
2812 2813
2813 intel_ddi_disable_pipe_clock(old_crtc_state); 2814 if (!is_mst) {
2814 2815 intel_ddi_disable_pipe_clock(old_crtc_state);
2815 /* 2816 /*
2816 * Power down sink before disabling the port, otherwise we end 2817 * Power down sink before disabling the port, otherwise we end
2817 * up getting interrupts from the sink on detecting link loss. 2818 * up getting interrupts from the sink on detecting link loss.
2818 */ 2819 */
2819 if (!is_mst)
2820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2821 }
2821 2822
2822 intel_disable_ddi_buf(encoder); 2823 intel_disable_ddi_buf(encoder);
2823 2824
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ed3fa1c8a983..d2951096bca0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
2988 int w = drm_rect_width(&plane_state->base.src) >> 16; 2988 int w = drm_rect_width(&plane_state->base.src) >> 16;
2989 int h = drm_rect_height(&plane_state->base.src) >> 16; 2989 int h = drm_rect_height(&plane_state->base.src) >> 16;
2990 int dst_x = plane_state->base.dst.x1; 2990 int dst_x = plane_state->base.dst.x1;
2991 int dst_w = drm_rect_width(&plane_state->base.dst);
2991 int pipe_src_w = crtc_state->pipe_src_w; 2992 int pipe_src_w = crtc_state->pipe_src_w;
2992 int max_width = skl_max_plane_width(fb, 0, rotation); 2993 int max_width = skl_max_plane_width(fb, 0, rotation);
2993 int max_height = 4096; 2994 int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
3009 * screen may cause FIFO underflow and display corruption. 3010 * screen may cause FIFO underflow and display corruption.
3010 */ 3011 */
3011 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 3012 if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3012 (dst_x + w < 4 || dst_x > pipe_src_w - 4)) { 3013 (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
3013 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", 3014 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3014 dst_x + w < 4 ? "end" : "start", 3015 dst_x + dst_w < 4 ? "end" : "start",
3015 dst_x + w < 4 ? dst_x + w : dst_x, 3016 dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
3016 4, pipe_src_w - 4); 3017 4, pipe_src_w - 4);
3017 return -ERANGE; 3018 return -ERANGE;
3018 } 3019 }
@@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5078 mutex_lock(&dev_priv->pcu_lock); 5079 mutex_lock(&dev_priv->pcu_lock);
5079 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 5080 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5080 mutex_unlock(&dev_priv->pcu_lock); 5081 mutex_unlock(&dev_priv->pcu_lock);
5081 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 5082 /*
5083 * Wait for PCODE to finish disabling IPS. The BSpec specified
5084 * 42ms timeout value leads to occasional timeouts so use 100ms
5085 * instead.
5086 */
5082 if (intel_wait_for_register(dev_priv, 5087 if (intel_wait_for_register(dev_priv,
5083 IPS_CTL, IPS_ENABLE, 0, 5088 IPS_CTL, IPS_ENABLE, 0,
5084 42)) 5089 100))
5085 DRM_ERROR("Timed out waiting for IPS disable\n"); 5090 DRM_ERROR("Timed out waiting for IPS disable\n");
5086 } else { 5091 } else {
5087 I915_WRITE(IPS_CTL, 0); 5092 I915_WRITE(IPS_CTL, 0);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cd0f649b57a5..1193202766a2 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 4160 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4161} 4161}
4162 4162
4163/*
4164 * If display is now connected check links status,
4165 * there has been known issues of link loss triggering
4166 * long pulse.
4167 *
4168 * Some sinks (eg. ASUS PB287Q) seem to perform some
4169 * weird HPD ping pong during modesets. So we can apparently
4170 * end up with HPD going low during a modeset, and then
4171 * going back up soon after. And once that happens we must
4172 * retrain the link to get a picture. That's in case no
4173 * userspace component reacted to intermittent HPD dip.
4174 */
4175int intel_dp_retrain_link(struct intel_encoder *encoder, 4163int intel_dp_retrain_link(struct intel_encoder *encoder,
4176 struct drm_modeset_acquire_ctx *ctx) 4164 struct drm_modeset_acquire_ctx *ctx)
4177{ 4165{
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
4661} 4649}
4662 4650
4663static int 4651static int
4664intel_dp_long_pulse(struct intel_connector *connector) 4652intel_dp_long_pulse(struct intel_connector *connector,
4653 struct drm_modeset_acquire_ctx *ctx)
4665{ 4654{
4666 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 4655 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4667 struct intel_dp *intel_dp = intel_attached_dp(&connector->base); 4656 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
4720 */ 4709 */
4721 status = connector_status_disconnected; 4710 status = connector_status_disconnected;
4722 goto out; 4711 goto out;
4712 } else {
4713 /*
4714 * If display is now connected check links status,
4715 * there has been known issues of link loss triggering
4716 * long pulse.
4717 *
4718 * Some sinks (eg. ASUS PB287Q) seem to perform some
4719 * weird HPD ping pong during modesets. So we can apparently
4720 * end up with HPD going low during a modeset, and then
4721 * going back up soon after. And once that happens we must
4722 * retrain the link to get a picture. That's in case no
4723 * userspace component reacted to intermittent HPD dip.
4724 */
4725 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4726
4727 intel_dp_retrain_link(encoder, ctx);
4723 } 4728 }
4724 4729
4725 /* 4730 /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
4781 return ret; 4786 return ret;
4782 } 4787 }
4783 4788
4784 status = intel_dp_long_pulse(intel_dp->attached_connector); 4789 status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
4785 } 4790 }
4786 4791
4787 intel_dp->detect_done = false; 4792 intel_dp->detect_done = false;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7e3e01607643..4ecd65375603 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
166 struct intel_connector *connector = 166 struct intel_connector *connector =
167 to_intel_connector(old_conn_state->connector); 167 to_intel_connector(old_conn_state->connector);
168 168
169 intel_ddi_disable_pipe_clock(old_crtc_state);
170
169 /* this can fail */ 171 /* this can fail */
170 drm_dp_check_act_status(&intel_dp->mst_mgr); 172 drm_dp_check_act_status(&intel_dp->mst_mgr);
171 /* and this can also fail */ 173 /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
252 I915_WRITE(DP_TP_STATUS(port), temp); 254 I915_WRITE(DP_TP_STATUS(port), temp);
253 255
254 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); 256 ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
257
258 intel_ddi_enable_pipe_clock(pipe_config);
255} 259}
256 260
257static void intel_mst_enable_dp(struct intel_encoder *encoder, 261static void intel_mst_enable_dp(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a9076402dcb0..192972a7d287 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
943 943
944 ret = i2c_transfer(adapter, &msg, 1); 944 ret = i2c_transfer(adapter, &msg, 1);
945 if (ret == 1) 945 if (ret == 1)
946 return 0; 946 ret = 0;
947 return ret >= 0 ? -EIO : ret; 947 else if (ret >= 0)
948 ret = -EIO;
949
950 kfree(write_buf);
951 return ret;
948} 952}
949 953
950static 954static
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5dae16ccd9f1..3e085c5f2b81 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
74 DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", 74 DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
75 lspcon_mode_name(mode)); 75 lspcon_mode_name(mode));
76 76
77 wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100); 77 wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
78 if (current_mode != mode) 78 if (current_mode != mode)
79 DRM_ERROR("LSPCON mode hasn't settled\n"); 79 DRM_ERROR("LSPCON mode hasn't settled\n");
80 80
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c2f10d899329..443dfaefd7a6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -181,8 +181,9 @@ struct intel_overlay {
181 u32 brightness, contrast, saturation; 181 u32 brightness, contrast, saturation;
182 u32 old_xscale, old_yscale; 182 u32 old_xscale, old_yscale;
183 /* register access */ 183 /* register access */
184 u32 flip_addr;
185 struct drm_i915_gem_object *reg_bo; 184 struct drm_i915_gem_object *reg_bo;
185 struct overlay_registers __iomem *regs;
186 u32 flip_addr;
186 /* flip handling */ 187 /* flip handling */
187 struct i915_gem_active last_flip; 188 struct i915_gem_active last_flip;
188}; 189};
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
210 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); 211 PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
211} 212}
212 213
213static struct overlay_registers __iomem *
214intel_overlay_map_regs(struct intel_overlay *overlay)
215{
216 struct drm_i915_private *dev_priv = overlay->i915;
217 struct overlay_registers __iomem *regs;
218
219 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
220 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
221 else
222 regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
223 overlay->flip_addr,
224 PAGE_SIZE);
225
226 return regs;
227}
228
229static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
230 struct overlay_registers __iomem *regs)
231{
232 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
233 io_mapping_unmap(regs);
234}
235
236static void intel_overlay_submit_request(struct intel_overlay *overlay, 214static void intel_overlay_submit_request(struct intel_overlay *overlay,
237 struct i915_request *rq, 215 struct i915_request *rq,
238 i915_gem_retire_fn retire) 216 i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
784 struct drm_i915_gem_object *new_bo, 762 struct drm_i915_gem_object *new_bo,
785 struct put_image_params *params) 763 struct put_image_params *params)
786{ 764{
787 int ret, tmp_width; 765 struct overlay_registers __iomem *regs = overlay->regs;
788 struct overlay_registers __iomem *regs;
789 bool scale_changed = false;
790 struct drm_i915_private *dev_priv = overlay->i915; 766 struct drm_i915_private *dev_priv = overlay->i915;
791 u32 swidth, swidthsw, sheight, ostride; 767 u32 swidth, swidthsw, sheight, ostride;
792 enum pipe pipe = overlay->crtc->pipe; 768 enum pipe pipe = overlay->crtc->pipe;
769 bool scale_changed = false;
793 struct i915_vma *vma; 770 struct i915_vma *vma;
771 int ret, tmp_width;
794 772
795 lockdep_assert_held(&dev_priv->drm.struct_mutex); 773 lockdep_assert_held(&dev_priv->drm.struct_mutex);
796 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 774 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
815 793
816 if (!overlay->active) { 794 if (!overlay->active) {
817 u32 oconfig; 795 u32 oconfig;
818 regs = intel_overlay_map_regs(overlay); 796
819 if (!regs) {
820 ret = -ENOMEM;
821 goto out_unpin;
822 }
823 oconfig = OCONF_CC_OUT_8BIT; 797 oconfig = OCONF_CC_OUT_8BIT;
824 if (IS_GEN4(dev_priv)) 798 if (IS_GEN4(dev_priv))
825 oconfig |= OCONF_CSC_MODE_BT709; 799 oconfig |= OCONF_CSC_MODE_BT709;
826 oconfig |= pipe == 0 ? 800 oconfig |= pipe == 0 ?
827 OCONF_PIPE_A : OCONF_PIPE_B; 801 OCONF_PIPE_A : OCONF_PIPE_B;
828 iowrite32(oconfig, &regs->OCONFIG); 802 iowrite32(oconfig, &regs->OCONFIG);
829 intel_overlay_unmap_regs(overlay, regs);
830 803
831 ret = intel_overlay_on(overlay); 804 ret = intel_overlay_on(overlay);
832 if (ret != 0) 805 if (ret != 0)
833 goto out_unpin; 806 goto out_unpin;
834 } 807 }
835 808
836 regs = intel_overlay_map_regs(overlay);
837 if (!regs) {
838 ret = -ENOMEM;
839 goto out_unpin;
840 }
841
842 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS); 809 iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
843 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ); 810 iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
844 811
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
882 849
883 iowrite32(overlay_cmd_reg(params), &regs->OCMD); 850 iowrite32(overlay_cmd_reg(params), &regs->OCMD);
884 851
885 intel_overlay_unmap_regs(overlay, regs);
886
887 ret = intel_overlay_continue(overlay, vma, scale_changed); 852 ret = intel_overlay_continue(overlay, vma, scale_changed);
888 if (ret) 853 if (ret)
889 goto out_unpin; 854 goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
901int intel_overlay_switch_off(struct intel_overlay *overlay) 866int intel_overlay_switch_off(struct intel_overlay *overlay)
902{ 867{
903 struct drm_i915_private *dev_priv = overlay->i915; 868 struct drm_i915_private *dev_priv = overlay->i915;
904 struct overlay_registers __iomem *regs;
905 int ret; 869 int ret;
906 870
907 lockdep_assert_held(&dev_priv->drm.struct_mutex); 871 lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
918 if (ret != 0) 882 if (ret != 0)
919 return ret; 883 return ret;
920 884
921 regs = intel_overlay_map_regs(overlay); 885 iowrite32(0, &overlay->regs->OCMD);
922 iowrite32(0, &regs->OCMD);
923 intel_overlay_unmap_regs(overlay, regs);
924 886
925 return intel_overlay_off(overlay); 887 return intel_overlay_off(overlay);
926} 888}
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1305 struct drm_intel_overlay_attrs *attrs = data; 1267 struct drm_intel_overlay_attrs *attrs = data;
1306 struct drm_i915_private *dev_priv = to_i915(dev); 1268 struct drm_i915_private *dev_priv = to_i915(dev);
1307 struct intel_overlay *overlay; 1269 struct intel_overlay *overlay;
1308 struct overlay_registers __iomem *regs;
1309 int ret; 1270 int ret;
1310 1271
1311 overlay = dev_priv->overlay; 1272 overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1345 overlay->contrast = attrs->contrast; 1306 overlay->contrast = attrs->contrast;
1346 overlay->saturation = attrs->saturation; 1307 overlay->saturation = attrs->saturation;
1347 1308
1348 regs = intel_overlay_map_regs(overlay); 1309 update_reg_attrs(overlay, overlay->regs);
1349 if (!regs) {
1350 ret = -ENOMEM;
1351 goto out_unlock;
1352 }
1353
1354 update_reg_attrs(overlay, regs);
1355
1356 intel_overlay_unmap_regs(overlay, regs);
1357 1310
1358 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1311 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1359 if (IS_GEN2(dev_priv)) 1312 if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
1386 return ret; 1339 return ret;
1387} 1340}
1388 1341
1342static int get_registers(struct intel_overlay *overlay, bool use_phys)
1343{
1344 struct drm_i915_gem_object *obj;
1345 struct i915_vma *vma;
1346 int err;
1347
1348 obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
1349 if (obj == NULL)
1350 obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
1351 if (IS_ERR(obj))
1352 return PTR_ERR(obj);
1353
1354 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
1355 if (IS_ERR(vma)) {
1356 err = PTR_ERR(vma);
1357 goto err_put_bo;
1358 }
1359
1360 if (use_phys)
1361 overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
1362 else
1363 overlay->flip_addr = i915_ggtt_offset(vma);
1364 overlay->regs = i915_vma_pin_iomap(vma);
1365 i915_vma_unpin(vma);
1366
1367 if (IS_ERR(overlay->regs)) {
1368 err = PTR_ERR(overlay->regs);
1369 goto err_put_bo;
1370 }
1371
1372 overlay->reg_bo = obj;
1373 return 0;
1374
1375err_put_bo:
1376 i915_gem_object_put(obj);
1377 return err;
1378}
1379
1389void intel_setup_overlay(struct drm_i915_private *dev_priv) 1380void intel_setup_overlay(struct drm_i915_private *dev_priv)
1390{ 1381{
1391 struct intel_overlay *overlay; 1382 struct intel_overlay *overlay;
1392 struct drm_i915_gem_object *reg_bo;
1393 struct overlay_registers __iomem *regs;
1394 struct i915_vma *vma = NULL;
1395 int ret; 1383 int ret;
1396 1384
1397 if (!HAS_OVERLAY(dev_priv)) 1385 if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1401 if (!overlay) 1389 if (!overlay)
1402 return; 1390 return;
1403 1391
1404 mutex_lock(&dev_priv->drm.struct_mutex);
1405 if (WARN_ON(dev_priv->overlay))
1406 goto out_free;
1407
1408 overlay->i915 = dev_priv; 1392 overlay->i915 = dev_priv;
1409 1393
1410 reg_bo = NULL;
1411 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1412 reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
1413 if (reg_bo == NULL)
1414 reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
1415 if (IS_ERR(reg_bo))
1416 goto out_free;
1417 overlay->reg_bo = reg_bo;
1418
1419 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1420 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1421 if (ret) {
1422 DRM_ERROR("failed to attach phys overlay regs\n");
1423 goto out_free_bo;
1424 }
1425 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1426 } else {
1427 vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
1428 0, PAGE_SIZE, PIN_MAPPABLE);
1429 if (IS_ERR(vma)) {
1430 DRM_ERROR("failed to pin overlay register bo\n");
1431 ret = PTR_ERR(vma);
1432 goto out_free_bo;
1433 }
1434 overlay->flip_addr = i915_ggtt_offset(vma);
1435
1436 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1437 if (ret) {
1438 DRM_ERROR("failed to move overlay register bo into the GTT\n");
1439 goto out_unpin_bo;
1440 }
1441 }
1442
1443 /* init all values */
1444 overlay->color_key = 0x0101fe; 1394 overlay->color_key = 0x0101fe;
1445 overlay->color_key_enabled = true; 1395 overlay->color_key_enabled = true;
1446 overlay->brightness = -19; 1396 overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1449 1399
1450 init_request_active(&overlay->last_flip, NULL); 1400 init_request_active(&overlay->last_flip, NULL);
1451 1401
1452 regs = intel_overlay_map_regs(overlay); 1402 mutex_lock(&dev_priv->drm.struct_mutex);
1453 if (!regs) 1403
1454 goto out_unpin_bo; 1404 ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
1405 if (ret)
1406 goto out_free;
1407
1408 ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
1409 if (ret)
1410 goto out_reg_bo;
1455 1411
1456 memset_io(regs, 0, sizeof(struct overlay_registers)); 1412 mutex_unlock(&dev_priv->drm.struct_mutex);
1457 update_polyphase_filter(regs);
1458 update_reg_attrs(overlay, regs);
1459 1413
1460 intel_overlay_unmap_regs(overlay, regs); 1414 memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
1415 update_polyphase_filter(overlay->regs);
1416 update_reg_attrs(overlay, overlay->regs);
1461 1417
1462 dev_priv->overlay = overlay; 1418 dev_priv->overlay = overlay;
1463 mutex_unlock(&dev_priv->drm.struct_mutex); 1419 DRM_INFO("Initialized overlay support.\n");
1464 DRM_INFO("initialized overlay support\n");
1465 return; 1420 return;
1466 1421
1467out_unpin_bo: 1422out_reg_bo:
1468 if (vma) 1423 i915_gem_object_put(overlay->reg_bo);
1469 i915_vma_unpin(vma);
1470out_free_bo:
1471 i915_gem_object_put(reg_bo);
1472out_free: 1424out_free:
1473 mutex_unlock(&dev_priv->drm.struct_mutex); 1425 mutex_unlock(&dev_priv->drm.struct_mutex);
1474 kfree(overlay); 1426 kfree(overlay);
1475 return;
1476} 1427}
1477 1428
1478void intel_cleanup_overlay(struct drm_i915_private *dev_priv) 1429void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1479{ 1430{
1480 if (!dev_priv->overlay) 1431 struct intel_overlay *overlay;
1432
1433 overlay = fetch_and_zero(&dev_priv->overlay);
1434 if (!overlay)
1481 return; 1435 return;
1482 1436
1483 /* The bo's should be free'd by the generic code already. 1437 /*
1438 * The bo's should be free'd by the generic code already.
1484 * Furthermore modesetting teardown happens beforehand so the 1439 * Furthermore modesetting teardown happens beforehand so the
1485 * hardware should be off already */ 1440 * hardware should be off already.
1486 WARN_ON(dev_priv->overlay->active); 1441 */
1442 WARN_ON(overlay->active);
1443
1444 i915_gem_object_put(overlay->reg_bo);
1487 1445
1488 i915_gem_object_put(dev_priv->overlay->reg_bo); 1446 kfree(overlay);
1489 kfree(dev_priv->overlay);
1490} 1447}
1491 1448
1492#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1449#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
1498 u32 isr; 1455 u32 isr;
1499}; 1456};
1500 1457
1501static struct overlay_registers __iomem *
1502intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1503{
1504 struct drm_i915_private *dev_priv = overlay->i915;
1505 struct overlay_registers __iomem *regs;
1506
1507 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1508 /* Cast to make sparse happy, but it's wc memory anyway, so
1509 * equivalent to the wc io mapping on X86. */
1510 regs = (struct overlay_registers __iomem *)
1511 overlay->reg_bo->phys_handle->vaddr;
1512 else
1513 regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
1514 overlay->flip_addr);
1515
1516 return regs;
1517}
1518
1519static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1520 struct overlay_registers __iomem *regs)
1521{
1522 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1523 io_mapping_unmap_atomic(regs);
1524}
1525
1526struct intel_overlay_error_state * 1458struct intel_overlay_error_state *
1527intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) 1459intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1528{ 1460{
1529 struct intel_overlay *overlay = dev_priv->overlay; 1461 struct intel_overlay *overlay = dev_priv->overlay;
1530 struct intel_overlay_error_state *error; 1462 struct intel_overlay_error_state *error;
1531 struct overlay_registers __iomem *regs;
1532 1463
1533 if (!overlay || !overlay->active) 1464 if (!overlay || !overlay->active)
1534 return NULL; 1465 return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1541 error->isr = I915_READ(ISR); 1472 error->isr = I915_READ(ISR);
1542 error->base = overlay->flip_addr; 1473 error->base = overlay->flip_addr;
1543 1474
1544 regs = intel_overlay_map_regs_atomic(overlay); 1475 memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
1545 if (!regs)
1546 goto err;
1547
1548 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1549 intel_overlay_unmap_regs_atomic(overlay, regs);
1550 1476
1551 return error; 1477 return error;
1552
1553err:
1554 kfree(error);
1555 return NULL;
1556} 1478}
1557 1479
1558void 1480void
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
132 writel(0x0, comp->regs + DISP_REG_OVL_RST); 132 writel(0x0, comp->regs + DISP_REG_OVL_RST);
133} 133}
134 134
135static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
136{
137 return 4;
138}
139
135static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) 140static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
136{ 141{
137 unsigned int reg; 142 unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
157 162
158static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) 163static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
159{ 164{
165 /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
166 * is defined in mediatek HW data sheet.
167 * The alphabet order in XXX is no relation to data
168 * arrangement in memory.
169 */
160 switch (fmt) { 170 switch (fmt) {
161 default: 171 default:
162 case DRM_FORMAT_RGB565: 172 case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
221 .stop = mtk_ovl_stop, 231 .stop = mtk_ovl_stop,
222 .enable_vblank = mtk_ovl_enable_vblank, 232 .enable_vblank = mtk_ovl_enable_vblank,
223 .disable_vblank = mtk_ovl_disable_vblank, 233 .disable_vblank = mtk_ovl_disable_vblank,
234 .layer_nr = mtk_ovl_layer_nr,
224 .layer_on = mtk_ovl_layer_on, 235 .layer_on = mtk_ovl_layer_on,
225 .layer_off = mtk_ovl_layer_off, 236 .layer_off = mtk_ovl_layer_off,
226 .layer_config = mtk_ovl_layer_config, 237 .layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
31#define RDMA_REG_UPDATE_INT BIT(0) 31#define RDMA_REG_UPDATE_INT BIT(0)
32#define DISP_REG_RDMA_GLOBAL_CON 0x0010 32#define DISP_REG_RDMA_GLOBAL_CON 0x0010
33#define RDMA_ENGINE_EN BIT(0) 33#define RDMA_ENGINE_EN BIT(0)
34#define RDMA_MODE_MEMORY BIT(1)
34#define DISP_REG_RDMA_SIZE_CON_0 0x0014 35#define DISP_REG_RDMA_SIZE_CON_0 0x0014
36#define RDMA_MATRIX_ENABLE BIT(17)
37#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
38#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
35#define DISP_REG_RDMA_SIZE_CON_1 0x0018 39#define DISP_REG_RDMA_SIZE_CON_1 0x0018
36#define DISP_REG_RDMA_TARGET_LINE 0x001c 40#define DISP_REG_RDMA_TARGET_LINE 0x001c
41#define DISP_RDMA_MEM_CON 0x0024
42#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
43#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
44#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
45#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
46#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
47#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
48#define MEM_MODE_INPUT_SWAP BIT(8)
49#define DISP_RDMA_MEM_SRC_PITCH 0x002c
50#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
37#define DISP_REG_RDMA_FIFO_CON 0x0040 51#define DISP_REG_RDMA_FIFO_CON 0x0040
38#define RDMA_FIFO_UNDERFLOW_EN BIT(31) 52#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
39#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) 53#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
40#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) 54#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
41#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) 55#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
56#define DISP_RDMA_MEM_START_ADDR 0x0f00
57
58#define RDMA_MEM_GMC 0x40402020
42 59
43struct mtk_disp_rdma_data { 60struct mtk_disp_rdma_data {
44 unsigned int fifo_size; 61 unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
138 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); 155 writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
139} 156}
140 157
158static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
159 unsigned int fmt)
160{
161 /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
162 * is defined in mediatek HW data sheet.
163 * The alphabet order in XXX is no relation to data
164 * arrangement in memory.
165 */
166 switch (fmt) {
167 default:
168 case DRM_FORMAT_RGB565:
169 return MEM_MODE_INPUT_FORMAT_RGB565;
170 case DRM_FORMAT_BGR565:
171 return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
172 case DRM_FORMAT_RGB888:
173 return MEM_MODE_INPUT_FORMAT_RGB888;
174 case DRM_FORMAT_BGR888:
175 return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
176 case DRM_FORMAT_RGBX8888:
177 case DRM_FORMAT_RGBA8888:
178 return MEM_MODE_INPUT_FORMAT_ARGB8888;
179 case DRM_FORMAT_BGRX8888:
180 case DRM_FORMAT_BGRA8888:
181 return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
182 case DRM_FORMAT_XRGB8888:
183 case DRM_FORMAT_ARGB8888:
184 return MEM_MODE_INPUT_FORMAT_RGBA8888;
185 case DRM_FORMAT_XBGR8888:
186 case DRM_FORMAT_ABGR8888:
187 return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
188 case DRM_FORMAT_UYVY:
189 return MEM_MODE_INPUT_FORMAT_UYVY;
190 case DRM_FORMAT_YUYV:
191 return MEM_MODE_INPUT_FORMAT_YUYV;
192 }
193}
194
195static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
196{
197 return 1;
198}
199
200static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
201 struct mtk_plane_state *state)
202{
203 struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
204 struct mtk_plane_pending_state *pending = &state->pending;
205 unsigned int addr = pending->addr;
206 unsigned int pitch = pending->pitch & 0xffff;
207 unsigned int fmt = pending->format;
208 unsigned int con;
209
210 con = rdma_fmt_convert(rdma, fmt);
211 writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
212
213 if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
214 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
215 RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
216 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
217 RDMA_MATRIX_INT_MTX_SEL,
218 RDMA_MATRIX_INT_MTX_BT601_to_RGB);
219 } else {
220 rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
221 RDMA_MATRIX_ENABLE, 0);
222 }
223
224 writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
225 writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
226 writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
227 rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
228 RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
229}
230
141static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = { 231static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
142 .config = mtk_rdma_config, 232 .config = mtk_rdma_config,
143 .start = mtk_rdma_start, 233 .start = mtk_rdma_start,
144 .stop = mtk_rdma_stop, 234 .stop = mtk_rdma_stop,
145 .enable_vblank = mtk_rdma_enable_vblank, 235 .enable_vblank = mtk_rdma_enable_vblank,
146 .disable_vblank = mtk_rdma_disable_vblank, 236 .disable_vblank = mtk_rdma_disable_vblank,
237 .layer_nr = mtk_rdma_layer_nr,
238 .layer_config = mtk_rdma_layer_config,
147}; 239};
148 240
149static int mtk_disp_rdma_bind(struct device *dev, struct device *master, 241static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
45 bool pending_needs_vblank; 45 bool pending_needs_vblank;
46 struct drm_pending_vblank_event *event; 46 struct drm_pending_vblank_event *event;
47 47
48 struct drm_plane planes[OVL_LAYER_NR]; 48 struct drm_plane *planes;
49 unsigned int layer_nr;
49 bool pending_planes; 50 bool pending_planes;
50 51
51 void __iomem *config_regs; 52 void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
171static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc) 172static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
172{ 173{
173 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 174 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
174 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 175 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
175 176
176 mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base); 177 mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
177 178
178 return 0; 179 return 0;
179} 180}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
181static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc) 182static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
182{ 183{
183 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 184 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
184 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 185 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
185 186
186 mtk_ddp_comp_disable_vblank(ovl); 187 mtk_ddp_comp_disable_vblank(comp);
187} 188}
188 189
189static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc) 190static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
286 } 287 }
287 288
288 /* Initially configure all planes */ 289 /* Initially configure all planes */
289 for (i = 0; i < OVL_LAYER_NR; i++) { 290 for (i = 0; i < mtk_crtc->layer_nr; i++) {
290 struct drm_plane *plane = &mtk_crtc->planes[i]; 291 struct drm_plane *plane = &mtk_crtc->planes[i];
291 struct mtk_plane_state *plane_state; 292 struct mtk_plane_state *plane_state;
292 293
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
334{ 335{
335 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 336 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
336 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); 337 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
337 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 338 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
338 unsigned int i; 339 unsigned int i;
339 340
340 /* 341 /*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
343 * queue update module registers on vblank. 344 * queue update module registers on vblank.
344 */ 345 */
345 if (state->pending_config) { 346 if (state->pending_config) {
346 mtk_ddp_comp_config(ovl, state->pending_width, 347 mtk_ddp_comp_config(comp, state->pending_width,
347 state->pending_height, 348 state->pending_height,
348 state->pending_vrefresh, 0); 349 state->pending_vrefresh, 0);
349 350
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
351 } 352 }
352 353
353 if (mtk_crtc->pending_planes) { 354 if (mtk_crtc->pending_planes) {
354 for (i = 0; i < OVL_LAYER_NR; i++) { 355 for (i = 0; i < mtk_crtc->layer_nr; i++) {
355 struct drm_plane *plane = &mtk_crtc->planes[i]; 356 struct drm_plane *plane = &mtk_crtc->planes[i];
356 struct mtk_plane_state *plane_state; 357 struct mtk_plane_state *plane_state;
357 358
358 plane_state = to_mtk_plane_state(plane->state); 359 plane_state = to_mtk_plane_state(plane->state);
359 360
360 if (plane_state->pending.config) { 361 if (plane_state->pending.config) {
361 mtk_ddp_comp_layer_config(ovl, i, plane_state); 362 mtk_ddp_comp_layer_config(comp, i, plane_state);
362 plane_state->pending.config = false; 363 plane_state->pending.config = false;
363 } 364 }
364 } 365 }
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
370 struct drm_crtc_state *old_state) 371 struct drm_crtc_state *old_state)
371{ 372{
372 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 373 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
373 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 374 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
374 int ret; 375 int ret;
375 376
376 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 377 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
377 378
378 ret = mtk_smi_larb_get(ovl->larb_dev); 379 ret = mtk_smi_larb_get(comp->larb_dev);
379 if (ret) { 380 if (ret) {
380 DRM_ERROR("Failed to get larb: %d\n", ret); 381 DRM_ERROR("Failed to get larb: %d\n", ret);
381 return; 382 return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
383 384
384 ret = mtk_crtc_ddp_hw_init(mtk_crtc); 385 ret = mtk_crtc_ddp_hw_init(mtk_crtc);
385 if (ret) { 386 if (ret) {
386 mtk_smi_larb_put(ovl->larb_dev); 387 mtk_smi_larb_put(comp->larb_dev);
387 return; 388 return;
388 } 389 }
389 390
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
395 struct drm_crtc_state *old_state) 396 struct drm_crtc_state *old_state)
396{ 397{
397 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 398 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
398 struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; 399 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
399 int i; 400 int i;
400 401
401 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id); 402 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
403 return; 404 return;
404 405
405 /* Set all pending plane state to disabled */ 406 /* Set all pending plane state to disabled */
406 for (i = 0; i < OVL_LAYER_NR; i++) { 407 for (i = 0; i < mtk_crtc->layer_nr; i++) {
407 struct drm_plane *plane = &mtk_crtc->planes[i]; 408 struct drm_plane *plane = &mtk_crtc->planes[i];
408 struct mtk_plane_state *plane_state; 409 struct mtk_plane_state *plane_state;
409 410
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
418 419
419 drm_crtc_vblank_off(crtc); 420 drm_crtc_vblank_off(crtc);
420 mtk_crtc_ddp_hw_fini(mtk_crtc); 421 mtk_crtc_ddp_hw_fini(mtk_crtc);
421 mtk_smi_larb_put(ovl->larb_dev); 422 mtk_smi_larb_put(comp->larb_dev);
422 423
423 mtk_crtc->enabled = false; 424 mtk_crtc->enabled = false;
424} 425}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
450 451
451 if (mtk_crtc->event) 452 if (mtk_crtc->event)
452 mtk_crtc->pending_needs_vblank = true; 453 mtk_crtc->pending_needs_vblank = true;
453 for (i = 0; i < OVL_LAYER_NR; i++) { 454 for (i = 0; i < mtk_crtc->layer_nr; i++) {
454 struct drm_plane *plane = &mtk_crtc->planes[i]; 455 struct drm_plane *plane = &mtk_crtc->planes[i];
455 struct mtk_plane_state *plane_state; 456 struct mtk_plane_state *plane_state;
456 457
@@ -516,7 +517,7 @@ err_cleanup_crtc:
516 return ret; 517 return ret;
517} 518}
518 519
519void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) 520void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
520{ 521{
521 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 522 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
522 struct mtk_drm_private *priv = crtc->dev->dev_private; 523 struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
598 mtk_crtc->ddp_comp[i] = comp; 599 mtk_crtc->ddp_comp[i] = comp;
599 } 600 }
600 601
601 for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) { 602 mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
603 mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
604 sizeof(struct drm_plane),
605 GFP_KERNEL);
606
607 for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
602 type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : 608 type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
603 (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : 609 (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
604 DRM_PLANE_TYPE_OVERLAY; 610 DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
609 } 615 }
610 616
611 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0], 617 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
612 &mtk_crtc->planes[1], pipe); 618 mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
619 NULL, pipe);
613 if (ret < 0) 620 if (ret < 0)
614 goto unprepare; 621 goto unprepare;
615 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); 622 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
18#include "mtk_drm_ddp_comp.h" 18#include "mtk_drm_ddp_comp.h"
19#include "mtk_drm_plane.h" 19#include "mtk_drm_plane.h"
20 20
21#define OVL_LAYER_NR 4
22#define MTK_LUT_SIZE 512 21#define MTK_LUT_SIZE 512
23#define MTK_MAX_BPC 10 22#define MTK_MAX_BPC 10
24#define MTK_MIN_BPC 3 23#define MTK_MIN_BPC 3
25 24
26void mtk_drm_crtc_commit(struct drm_crtc *crtc); 25void mtk_drm_crtc_commit(struct drm_crtc *crtc);
27void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl); 26void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
28int mtk_drm_crtc_create(struct drm_device *drm_dev, 27int mtk_drm_crtc_create(struct drm_device *drm_dev,
29 const enum mtk_ddp_comp_id *path, 28 const enum mtk_ddp_comp_id *path,
30 unsigned int path_len); 29 unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
106#define OVL1_MOUT_EN_COLOR1 0x1 106#define OVL1_MOUT_EN_COLOR1 0x1
107#define GAMMA_MOUT_EN_RDMA1 0x1 107#define GAMMA_MOUT_EN_RDMA1 0x1
108#define RDMA0_SOUT_DPI0 0x2 108#define RDMA0_SOUT_DPI0 0x2
109#define RDMA0_SOUT_DPI1 0x3
110#define RDMA0_SOUT_DSI1 0x1
109#define RDMA0_SOUT_DSI2 0x4 111#define RDMA0_SOUT_DSI2 0x4
110#define RDMA0_SOUT_DSI3 0x5 112#define RDMA0_SOUT_DSI3 0x5
111#define RDMA1_SOUT_DPI0 0x2 113#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
122#define DPI0_SEL_IN_RDMA2 0x3 124#define DPI0_SEL_IN_RDMA2 0x3
123#define DPI1_SEL_IN_RDMA1 (0x1 << 8) 125#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
124#define DPI1_SEL_IN_RDMA2 (0x3 << 8) 126#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
127#define DSI0_SEL_IN_RDMA1 0x1
128#define DSI0_SEL_IN_RDMA2 0x4
125#define DSI1_SEL_IN_RDMA1 0x1 129#define DSI1_SEL_IN_RDMA1 0x1
126#define DSI1_SEL_IN_RDMA2 0x4 130#define DSI1_SEL_IN_RDMA2 0x4
127#define DSI2_SEL_IN_RDMA1 (0x1 << 16) 131#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
224 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) { 228 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
225 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; 229 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
226 value = RDMA0_SOUT_DPI0; 230 value = RDMA0_SOUT_DPI0;
231 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
232 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
233 value = RDMA0_SOUT_DPI1;
234 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
235 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
236 value = RDMA0_SOUT_DSI1;
227 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) { 237 } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
228 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN; 238 *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
229 value = RDMA0_SOUT_DSI2; 239 value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
282 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) { 292 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
283 *addr = DISP_REG_CONFIG_DPI_SEL_IN; 293 *addr = DISP_REG_CONFIG_DPI_SEL_IN;
284 value = DPI1_SEL_IN_RDMA1; 294 value = DPI1_SEL_IN_RDMA1;
295 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
296 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
297 value = DSI0_SEL_IN_RDMA1;
285 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) { 298 } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
286 *addr = DISP_REG_CONFIG_DSIO_SEL_IN; 299 *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
287 value = DSI1_SEL_IN_RDMA1; 300 value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
297 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) { 310 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
298 *addr = DISP_REG_CONFIG_DPI_SEL_IN; 311 *addr = DISP_REG_CONFIG_DPI_SEL_IN;
299 value = DPI1_SEL_IN_RDMA2; 312 value = DPI1_SEL_IN_RDMA2;
300 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) { 313 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
301 *addr = DISP_REG_CONFIG_DSIE_SEL_IN; 314 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
315 value = DSI0_SEL_IN_RDMA2;
316 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
317 *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
302 value = DSI1_SEL_IN_RDMA2; 318 value = DSI1_SEL_IN_RDMA2;
303 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) { 319 } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
304 *addr = DISP_REG_CONFIG_DSIE_SEL_IN; 320 *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
78 void (*stop)(struct mtk_ddp_comp *comp); 78 void (*stop)(struct mtk_ddp_comp *comp);
79 void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); 79 void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
80 void (*disable_vblank)(struct mtk_ddp_comp *comp); 80 void (*disable_vblank)(struct mtk_ddp_comp *comp);
81 unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
81 void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); 82 void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
82 void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); 83 void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
83 void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, 84 void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
128 comp->funcs->disable_vblank(comp); 129 comp->funcs->disable_vblank(comp);
129} 130}
130 131
132static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
133{
134 if (comp->funcs && comp->funcs->layer_nr)
135 return comp->funcs->layer_nr(comp);
136
137 return 0;
138}
139
131static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp, 140static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
132 unsigned int idx) 141 unsigned int idx)
133{ 142{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
381err_deinit: 381err_deinit:
382 mtk_drm_kms_deinit(drm); 382 mtk_drm_kms_deinit(drm);
383err_free: 383err_free:
384 drm_dev_unref(drm); 384 drm_dev_put(drm);
385 return ret; 385 return ret;
386} 386}
387 387
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
390 struct mtk_drm_private *private = dev_get_drvdata(dev); 390 struct mtk_drm_private *private = dev_get_drvdata(dev);
391 391
392 drm_dev_unregister(private->drm); 392 drm_dev_unregister(private->drm);
393 drm_dev_unref(private->drm); 393 drm_dev_put(private->drm);
394 private->drm = NULL; 394 private->drm = NULL;
395} 395}
396 396
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
564 564
565 drm_dev_unregister(drm); 565 drm_dev_unregister(drm);
566 mtk_drm_kms_deinit(drm); 566 mtk_drm_kms_deinit(drm);
567 drm_dev_unref(drm); 567 drm_dev_put(drm);
568 568
569 component_master_del(&pdev->dev, &mtk_drm_ops); 569 component_master_del(&pdev->dev, &mtk_drm_ops);
570 pm_runtime_disable(&pdev->dev); 570 pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
580{ 580{
581 struct mtk_drm_private *private = dev_get_drvdata(dev); 581 struct mtk_drm_private *private = dev_get_drvdata(dev);
582 struct drm_device *drm = private->drm; 582 struct drm_device *drm = private->drm;
583 int ret;
583 584
584 drm_kms_helper_poll_disable(drm); 585 ret = drm_mode_config_helper_suspend(drm);
585
586 private->suspend_state = drm_atomic_helper_suspend(drm);
587 if (IS_ERR(private->suspend_state)) {
588 drm_kms_helper_poll_enable(drm);
589 return PTR_ERR(private->suspend_state);
590 }
591
592 DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); 586 DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
593 return 0; 587
588 return ret;
594} 589}
595 590
596static int mtk_drm_sys_resume(struct device *dev) 591static int mtk_drm_sys_resume(struct device *dev)
597{ 592{
598 struct mtk_drm_private *private = dev_get_drvdata(dev); 593 struct mtk_drm_private *private = dev_get_drvdata(dev);
599 struct drm_device *drm = private->drm; 594 struct drm_device *drm = private->drm;
595 int ret;
600 596
601 drm_atomic_helper_resume(drm, private->suspend_state); 597 ret = drm_mode_config_helper_resume(drm);
602 drm_kms_helper_poll_enable(drm);
603
604 DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); 598 DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
605 return 0; 599
600 return ret;
606} 601}
607#endif 602#endif
608 603
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..5691dfa1db6f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
1123 int ret; 1123 int ret;
1124 1124
1125 if (dpcd >= 0x12) { 1125 if (dpcd >= 0x12) {
1126 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd); 1126 /* Even if we're enabling MST, start with disabling the
1127 * branching unit to clear any sink-side MST topology state
1128 * that wasn't set by us
1129 */
1130 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
1127 if (ret < 0) 1131 if (ret < 0)
1128 return ret; 1132 return ret;
1129 1133
1130 dpcd &= ~DP_MST_EN; 1134 if (state) {
1131 if (state) 1135 /* Now, start initializing */
1132 dpcd |= DP_MST_EN; 1136 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
1133 1137 DP_MST_EN);
1134 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd); 1138 if (ret < 0)
1135 if (ret < 0) 1139 return ret;
1136 return ret; 1140 }
1137 } 1141 }
1138 1142
1139 return nvif_mthd(disp, 0, &args, sizeof(args)); 1143 return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
1142int 1146int
1143nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) 1147nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
1144{ 1148{
1145 int ret, state = 0; 1149 struct drm_dp_aux *aux;
1150 int ret;
1151 bool old_state, new_state;
1152 u8 mstm_ctrl;
1146 1153
1147 if (!mstm) 1154 if (!mstm)
1148 return 0; 1155 return 0;
1149 1156
1150 if (dpcd[0] >= 0x12) { 1157 mutex_lock(&mstm->mgr.lock);
1151 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]); 1158
1159 old_state = mstm->mgr.mst_state;
1160 new_state = old_state;
1161 aux = mstm->mgr.aux;
1162
1163 if (old_state) {
1164 /* Just check that the MST hub is still as we expect it */
1165 ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
1166 if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
1167 DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
1168 new_state = false;
1169 }
1170 } else if (dpcd[0] >= 0x12) {
1171 ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
1152 if (ret < 0) 1172 if (ret < 0)
1153 return ret; 1173 goto probe_error;
1154 1174
1155 if (!(dpcd[1] & DP_MST_CAP)) 1175 if (!(dpcd[1] & DP_MST_CAP))
1156 dpcd[0] = 0x11; 1176 dpcd[0] = 0x11;
1157 else 1177 else
1158 state = allow; 1178 new_state = allow;
1179 }
1180
1181 if (new_state == old_state) {
1182 mutex_unlock(&mstm->mgr.lock);
1183 return new_state;
1159 } 1184 }
1160 1185
1161 ret = nv50_mstm_enable(mstm, dpcd[0], state); 1186 ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
1162 if (ret) 1187 if (ret)
1163 return ret; 1188 goto probe_error;
1189
1190 mutex_unlock(&mstm->mgr.lock);
1164 1191
1165 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state); 1192 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
1166 if (ret) 1193 if (ret)
1167 return nv50_mstm_enable(mstm, dpcd[0], 0); 1194 return nv50_mstm_enable(mstm, dpcd[0], 0);
1168 1195
1169 return mstm->mgr.mst_state; 1196 return new_state;
1197
1198probe_error:
1199 mutex_unlock(&mstm->mgr.lock);
1200 return ret;
1170} 1201}
1171 1202
1172static void 1203static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
2074static const struct drm_mode_config_funcs 2105static const struct drm_mode_config_funcs
2075nv50_disp_func = { 2106nv50_disp_func = {
2076 .fb_create = nouveau_user_framebuffer_create, 2107 .fb_create = nouveau_user_framebuffer_create,
2077 .output_poll_changed = drm_fb_helper_output_poll_changed, 2108 .output_poll_changed = nouveau_fbcon_output_poll_changed,
2078 .atomic_check = nv50_disp_atomic_check, 2109 .atomic_check = nv50_disp_atomic_check,
2079 .atomic_commit = nv50_disp_atomic_commit, 2110 .atomic_commit = nv50_disp_atomic_commit,
2080 .atomic_state_alloc = nv50_disp_atomic_state_alloc, 2111 .atomic_state_alloc = nv50_disp_atomic_state_alloc,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 51932c72334e..247f72cc4d10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
409nouveau_connector_ddc_detect(struct drm_connector *connector) 409nouveau_connector_ddc_detect(struct drm_connector *connector)
410{ 410{
411 struct drm_device *dev = connector->dev; 411 struct drm_device *dev = connector->dev;
412 struct nouveau_connector *nv_connector = nouveau_connector(connector); 412 struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
413 struct nouveau_drm *drm = nouveau_drm(dev);
414 struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
415 struct nouveau_encoder *nv_encoder = NULL;
416 struct drm_encoder *encoder; 413 struct drm_encoder *encoder;
417 int i, panel = -ENODEV; 414 int i, ret;
418 415 bool switcheroo_ddc = false;
419 /* eDP panels need powering on by us (if the VBIOS doesn't default it
420 * to on) before doing any AUX channel transactions. LVDS panel power
421 * is handled by the SOR itself, and not required for LVDS DDC.
422 */
423 if (nv_connector->type == DCB_CONNECTOR_eDP) {
424 panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
425 if (panel == 0) {
426 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
427 msleep(300);
428 }
429 }
430 416
431 drm_connector_for_each_possible_encoder(connector, encoder, i) { 417 drm_connector_for_each_possible_encoder(connector, encoder, i) {
432 nv_encoder = nouveau_encoder(encoder); 418 nv_encoder = nouveau_encoder(encoder);
433 419
434 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 420 switch (nv_encoder->dcb->type) {
435 int ret = nouveau_dp_detect(nv_encoder); 421 case DCB_OUTPUT_DP:
422 ret = nouveau_dp_detect(nv_encoder);
436 if (ret == NOUVEAU_DP_MST) 423 if (ret == NOUVEAU_DP_MST)
437 return NULL; 424 return NULL;
438 if (ret == NOUVEAU_DP_SST) 425 else if (ret == NOUVEAU_DP_SST)
439 break; 426 found = nv_encoder;
440 } else 427
441 if ((vga_switcheroo_handler_flags() & 428 break;
442 VGA_SWITCHEROO_CAN_SWITCH_DDC) && 429 case DCB_OUTPUT_LVDS:
443 nv_encoder->dcb->type == DCB_OUTPUT_LVDS && 430 switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
444 nv_encoder->i2c) { 431 VGA_SWITCHEROO_CAN_SWITCH_DDC);
445 int ret; 432 /* fall-through */
446 vga_switcheroo_lock_ddc(dev->pdev); 433 default:
447 ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50); 434 if (!nv_encoder->i2c)
448 vga_switcheroo_unlock_ddc(dev->pdev);
449 if (ret)
450 break; 435 break;
451 } else 436
452 if (nv_encoder->i2c) { 437 if (switcheroo_ddc)
438 vga_switcheroo_lock_ddc(dev->pdev);
453 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) 439 if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
454 break; 440 found = nv_encoder;
441 if (switcheroo_ddc)
442 vga_switcheroo_unlock_ddc(dev->pdev);
443
444 break;
455 } 445 }
446 if (found)
447 break;
456 } 448 }
457 449
458 /* eDP panel not detected, restore panel power GPIO to previous 450 return found;
459 * state to avoid confusing the SOR for other output types.
460 */
461 if (!nv_encoder && panel == 0)
462 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
463
464 return nv_encoder;
465} 451}
466 452
467static struct nouveau_encoder * 453static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
555 nv_connector->edid = NULL; 541 nv_connector->edid = NULL;
556 } 542 }
557 543
558 /* Outputs are only polled while runtime active, so acquiring a 544 /* Outputs are only polled while runtime active, so resuming the
559 * runtime PM ref here is unnecessary (and would deadlock upon 545 * device here is unnecessary (and would deadlock upon runtime suspend
560 * runtime suspend because it waits for polling to finish). 546 * because it waits for polling to finish). We do however, want to
547 * prevent the autosuspend timer from elapsing during this operation
548 * if possible.
561 */ 549 */
562 if (!drm_kms_helper_is_poll_worker()) { 550 if (drm_kms_helper_is_poll_worker()) {
563 ret = pm_runtime_get_sync(connector->dev->dev); 551 pm_runtime_get_noresume(dev->dev);
552 } else {
553 ret = pm_runtime_get_sync(dev->dev);
564 if (ret < 0 && ret != -EACCES) 554 if (ret < 0 && ret != -EACCES)
565 return conn_status; 555 return conn_status;
566 } 556 }
@@ -638,10 +628,8 @@ detect_analog:
638 628
639 out: 629 out:
640 630
641 if (!drm_kms_helper_is_poll_worker()) { 631 pm_runtime_mark_last_busy(dev->dev);
642 pm_runtime_mark_last_busy(connector->dev->dev); 632 pm_runtime_put_autosuspend(dev->dev);
643 pm_runtime_put_autosuspend(connector->dev->dev);
644 }
645 633
646 return conn_status; 634 return conn_status;
647} 635}
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
1105 const struct nvif_notify_conn_rep_v0 *rep = notify->data; 1093 const struct nvif_notify_conn_rep_v0 *rep = notify->data;
1106 const char *name = connector->name; 1094 const char *name = connector->name;
1107 struct nouveau_encoder *nv_encoder; 1095 struct nouveau_encoder *nv_encoder;
1096 int ret;
1097
1098 ret = pm_runtime_get(drm->dev->dev);
1099 if (ret == 0) {
1100 /* We can't block here if there's a pending PM request
1101 * running, as we'll deadlock nouveau_display_fini() when it
1102 * calls nvif_put() on our nvif_notify struct. So, simply
1103 * defer the hotplug event until the device finishes resuming
1104 */
1105 NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
1106 name);
1107 schedule_work(&drm->hpd_work);
1108
1109 pm_runtime_put_noidle(drm->dev->dev);
1110 return NVIF_NOTIFY_KEEP;
1111 } else if (ret != 1 && ret != -EACCES) {
1112 NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
1113 name, ret);
1114 return NVIF_NOTIFY_DROP;
1115 }
1108 1116
1109 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { 1117 if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
1110 NV_DEBUG(drm, "service %s\n", name); 1118 NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
1122 drm_helper_hpd_irq_event(connector->dev); 1130 drm_helper_hpd_irq_event(connector->dev);
1123 } 1131 }
1124 1132
1133 pm_runtime_mark_last_busy(drm->dev->dev);
1134 pm_runtime_put_autosuspend(drm->dev->dev);
1125 return NVIF_NOTIFY_KEEP; 1135 return NVIF_NOTIFY_KEEP;
1126} 1136}
1127 1137
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 139368b31916..540c0cbbfcee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
293 293
294static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 294static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
295 .fb_create = nouveau_user_framebuffer_create, 295 .fb_create = nouveau_user_framebuffer_create,
296 .output_poll_changed = drm_fb_helper_output_poll_changed, 296 .output_poll_changed = nouveau_fbcon_output_poll_changed,
297}; 297};
298 298
299 299
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
355 pm_runtime_get_sync(drm->dev->dev); 355 pm_runtime_get_sync(drm->dev->dev);
356 356
357 drm_helper_hpd_irq_event(drm->dev); 357 drm_helper_hpd_irq_event(drm->dev);
358 /* enable polling for external displays */
359 drm_kms_helper_poll_enable(drm->dev);
360 358
361 pm_runtime_mark_last_busy(drm->dev->dev); 359 pm_runtime_mark_last_busy(drm->dev->dev);
362 pm_runtime_put_sync(drm->dev->dev); 360 pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
379{ 377{
380 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); 378 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
381 struct acpi_bus_event *info = data; 379 struct acpi_bus_event *info = data;
380 int ret;
382 381
383 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { 382 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
384 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { 383 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
385 /* 384 ret = pm_runtime_get(drm->dev->dev);
386 * This may be the only indication we receive of a 385 if (ret == 1 || ret == -EACCES) {
387 * connector hotplug on a runtime suspended GPU, 386 /* If the GPU is already awake, or in a state
388 * schedule hpd_work to check. 387 * where we can't wake it up, it can handle
389 */ 388 * it's own hotplug events.
390 schedule_work(&drm->hpd_work); 389 */
390 pm_runtime_put_autosuspend(drm->dev->dev);
391 } else if (ret == 0) {
392 /* This may be the only indication we receive
393 * of a connector hotplug on a runtime
394 * suspended GPU, schedule hpd_work to check.
395 */
396 NV_DEBUG(drm, "ACPI requested connector reprobe\n");
397 schedule_work(&drm->hpd_work);
398 pm_runtime_put_noidle(drm->dev->dev);
399 } else {
400 NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
401 ret);
402 }
391 403
392 /* acpi-video should not generate keypresses for this */ 404 /* acpi-video should not generate keypresses for this */
393 return NOTIFY_BAD; 405 return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
411 if (ret) 423 if (ret)
412 return ret; 424 return ret;
413 425
426 /* enable connector detection and polling for connectors without HPD
427 * support
428 */
429 drm_kms_helper_poll_enable(dev);
430
414 /* enable hotplug interrupts */ 431 /* enable hotplug interrupts */
415 drm_connector_list_iter_begin(dev, &conn_iter); 432 drm_connector_list_iter_begin(dev, &conn_iter);
416 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { 433 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
425} 442}
426 443
427void 444void
428nouveau_display_fini(struct drm_device *dev, bool suspend) 445nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
429{ 446{
430 struct nouveau_display *disp = nouveau_display(dev); 447 struct nouveau_display *disp = nouveau_display(dev);
431 struct nouveau_drm *drm = nouveau_drm(dev); 448 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
450 } 467 }
451 drm_connector_list_iter_end(&conn_iter); 468 drm_connector_list_iter_end(&conn_iter);
452 469
470 if (!runtime)
471 cancel_work_sync(&drm->hpd_work);
472
453 drm_kms_helper_poll_disable(dev); 473 drm_kms_helper_poll_disable(dev);
454 disp->fini(dev); 474 disp->fini(dev);
455} 475}
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
618 } 638 }
619 } 639 }
620 640
621 nouveau_display_fini(dev, true); 641 nouveau_display_fini(dev, true, runtime);
622 return 0; 642 return 0;
623 } 643 }
624 644
625 nouveau_display_fini(dev, true); 645 nouveau_display_fini(dev, true, runtime);
626 646
627 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 647 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
628 struct nouveau_framebuffer *nouveau_fb; 648 struct nouveau_framebuffer *nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 54aa7c3fa42d..ff92b54ce448 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
62int nouveau_display_create(struct drm_device *dev); 62int nouveau_display_create(struct drm_device *dev);
63void nouveau_display_destroy(struct drm_device *dev); 63void nouveau_display_destroy(struct drm_device *dev);
64int nouveau_display_init(struct drm_device *dev); 64int nouveau_display_init(struct drm_device *dev);
65void nouveau_display_fini(struct drm_device *dev, bool suspend); 65void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
66int nouveau_display_suspend(struct drm_device *dev, bool runtime); 66int nouveau_display_suspend(struct drm_device *dev, bool runtime);
67void nouveau_display_resume(struct drm_device *dev, bool runtime); 67void nouveau_display_resume(struct drm_device *dev, bool runtime);
68int nouveau_display_vblank_enable(struct drm_device *, unsigned int); 68int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c7ec86d6c3c9..74d2283f2c28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
230 mutex_unlock(&drm->master.lock); 230 mutex_unlock(&drm->master.lock);
231 } 231 }
232 if (ret) { 232 if (ret) {
233 NV_ERROR(drm, "Client allocation failed: %d\n", ret); 233 NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
234 goto done; 234 goto done;
235 } 235 }
236 236
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
240 }, sizeof(struct nv_device_v0), 240 }, sizeof(struct nv_device_v0),
241 &cli->device); 241 &cli->device);
242 if (ret) { 242 if (ret) {
243 NV_ERROR(drm, "Device allocation failed: %d\n", ret); 243 NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
244 goto done; 244 goto done;
245 } 245 }
246 246
247 ret = nvif_mclass(&cli->device.object, mmus); 247 ret = nvif_mclass(&cli->device.object, mmus);
248 if (ret < 0) { 248 if (ret < 0) {
249 NV_ERROR(drm, "No supported MMU class\n"); 249 NV_PRINTK(err, cli, "No supported MMU class\n");
250 goto done; 250 goto done;
251 } 251 }
252 252
253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu); 253 ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
254 if (ret) { 254 if (ret) {
255 NV_ERROR(drm, "MMU allocation failed: %d\n", ret); 255 NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
256 goto done; 256 goto done;
257 } 257 }
258 258
259 ret = nvif_mclass(&cli->mmu.object, vmms); 259 ret = nvif_mclass(&cli->mmu.object, vmms);
260 if (ret < 0) { 260 if (ret < 0) {
261 NV_ERROR(drm, "No supported VMM class\n"); 261 NV_PRINTK(err, cli, "No supported VMM class\n");
262 goto done; 262 goto done;
263 } 263 }
264 264
265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm); 265 ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
266 if (ret) { 266 if (ret) {
267 NV_ERROR(drm, "VMM allocation failed: %d\n", ret); 267 NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
268 goto done; 268 goto done;
269 } 269 }
270 270
271 ret = nvif_mclass(&cli->mmu.object, mems); 271 ret = nvif_mclass(&cli->mmu.object, mems);
272 if (ret < 0) { 272 if (ret < 0) {
273 NV_ERROR(drm, "No supported MEM class\n"); 273 NV_PRINTK(err, cli, "No supported MEM class\n");
274 goto done; 274 goto done;
275 } 275 }
276 276
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
592 pm_runtime_allow(dev->dev); 592 pm_runtime_allow(dev->dev);
593 pm_runtime_mark_last_busy(dev->dev); 593 pm_runtime_mark_last_busy(dev->dev);
594 pm_runtime_put(dev->dev); 594 pm_runtime_put(dev->dev);
595 } else {
596 /* enable polling for external displays */
597 drm_kms_helper_poll_enable(dev);
598 } 595 }
596
599 return 0; 597 return 0;
600 598
601fail_dispinit: 599fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
629 nouveau_debugfs_fini(drm); 627 nouveau_debugfs_fini(drm);
630 628
631 if (dev->mode_config.num_crtc) 629 if (dev->mode_config.num_crtc)
632 nouveau_display_fini(dev, false); 630 nouveau_display_fini(dev, false, false);
633 nouveau_display_destroy(dev); 631 nouveau_display_destroy(dev);
634 632
635 nouveau_bios_takedown(dev); 633 nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
835 return -EBUSY; 833 return -EBUSY;
836 } 834 }
837 835
838 drm_kms_helper_poll_disable(drm_dev);
839 nouveau_switcheroo_optimus_dsm(); 836 nouveau_switcheroo_optimus_dsm();
840 ret = nouveau_do_suspend(drm_dev, true); 837 ret = nouveau_do_suspend(drm_dev, true);
841 pci_save_state(pdev); 838 pci_save_state(pdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..0f64c0a1d4b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
466 console_unlock(); 466 console_unlock();
467 467
468 if (state == FBINFO_STATE_RUNNING) { 468 if (state == FBINFO_STATE_RUNNING) {
469 nouveau_fbcon_hotplug_resume(drm->fbcon);
469 pm_runtime_mark_last_busy(drm->dev->dev); 470 pm_runtime_mark_last_busy(drm->dev->dev);
470 pm_runtime_put_sync(drm->dev->dev); 471 pm_runtime_put_sync(drm->dev->dev);
471 } 472 }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
487 schedule_work(&drm->fbcon_work); 488 schedule_work(&drm->fbcon_work);
488} 489}
489 490
491void
492nouveau_fbcon_output_poll_changed(struct drm_device *dev)
493{
494 struct nouveau_drm *drm = nouveau_drm(dev);
495 struct nouveau_fbdev *fbcon = drm->fbcon;
496 int ret;
497
498 if (!fbcon)
499 return;
500
501 mutex_lock(&fbcon->hotplug_lock);
502
503 ret = pm_runtime_get(dev->dev);
504 if (ret == 1 || ret == -EACCES) {
505 drm_fb_helper_hotplug_event(&fbcon->helper);
506
507 pm_runtime_mark_last_busy(dev->dev);
508 pm_runtime_put_autosuspend(dev->dev);
509 } else if (ret == 0) {
510 /* If the GPU was already in the process of suspending before
511 * this event happened, then we can't block here as we'll
512 * deadlock the runtime pmops since they wait for us to
513 * finish. So, just defer this event for when we runtime
514 * resume again. It will be handled by fbcon_work.
515 */
516 NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
517 fbcon->hotplug_waiting = true;
518 pm_runtime_put_noidle(drm->dev->dev);
519 } else {
520 DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
521 ret);
522 }
523
524 mutex_unlock(&fbcon->hotplug_lock);
525}
526
527void
528nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
529{
530 struct nouveau_drm *drm;
531
532 if (!fbcon)
533 return;
534 drm = nouveau_drm(fbcon->helper.dev);
535
536 mutex_lock(&fbcon->hotplug_lock);
537 if (fbcon->hotplug_waiting) {
538 fbcon->hotplug_waiting = false;
539
540 NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
541 drm_fb_helper_hotplug_event(&fbcon->helper);
542 }
543 mutex_unlock(&fbcon->hotplug_lock);
544}
545
490int 546int
491nouveau_fbcon_init(struct drm_device *dev) 547nouveau_fbcon_init(struct drm_device *dev)
492{ 548{
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
505 561
506 drm->fbcon = fbcon; 562 drm->fbcon = fbcon;
507 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 563 INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
564 mutex_init(&fbcon->hotplug_lock);
508 565
509 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 566 drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
510 567
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index a6f192ea3fa6..db9d52047ef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
41 struct nvif_object gdi; 41 struct nvif_object gdi;
42 struct nvif_object blit; 42 struct nvif_object blit;
43 struct nvif_object twod; 43 struct nvif_object twod;
44
45 struct mutex hotplug_lock;
46 bool hotplug_waiting;
44}; 47};
45 48
46void nouveau_fbcon_restore(void); 49void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
68void nouveau_fbcon_accel_save_disable(struct drm_device *dev); 71void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
69void nouveau_fbcon_accel_restore(struct drm_device *dev); 72void nouveau_fbcon_accel_restore(struct drm_device *dev);
70 73
74void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
75void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
71extern int nouveau_nofbaccel; 76extern int nouveau_nofbaccel;
72 77
73#endif /* __NV50_FBCON_H__ */ 78#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 3da5a4305aa4..8f1ce4833230 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
46 pr_err("VGA switcheroo: switched nouveau on\n"); 46 pr_err("VGA switcheroo: switched nouveau on\n");
47 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 47 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
48 nouveau_pmops_resume(&pdev->dev); 48 nouveau_pmops_resume(&pdev->dev);
49 drm_kms_helper_poll_enable(dev);
50 dev->switch_power_state = DRM_SWITCH_POWER_ON; 49 dev->switch_power_state = DRM_SWITCH_POWER_ON;
51 } else { 50 } else {
52 pr_err("VGA switcheroo: switched nouveau off\n"); 51 pr_err("VGA switcheroo: switched nouveau off\n");
53 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 52 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
54 drm_kms_helper_poll_disable(dev);
55 nouveau_switcheroo_optimus_dsm(); 53 nouveau_switcheroo_optimus_dsm();
56 nouveau_pmops_suspend(&pdev->dev); 54 nouveau_pmops_suspend(&pdev->dev);
57 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 55 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 32fa94a9773f..cbd33e87b799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
275 struct nvkm_outp *outp, *outt, *pair; 275 struct nvkm_outp *outp, *outt, *pair;
276 struct nvkm_conn *conn; 276 struct nvkm_conn *conn;
277 struct nvkm_head *head; 277 struct nvkm_head *head;
278 struct nvkm_ior *ior;
278 struct nvbios_connE connE; 279 struct nvbios_connE connE;
279 struct dcb_output dcbE; 280 struct dcb_output dcbE;
280 u8 hpd = 0, ver, hdr; 281 u8 hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
399 return ret; 400 return ret;
400 } 401 }
401 402
403 /* Enforce identity-mapped SOR assignment for panels, which have
404 * certain bits (ie. backlight controls) wired to a specific SOR.
405 */
406 list_for_each_entry(outp, &disp->outp, head) {
407 if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
408 outp->conn->info.type == DCB_CONNECTOR_eDP) {
409 ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
410 if (!WARN_ON(!ior))
411 ior->identity = true;
412 outp->identity = true;
413 }
414 }
415
402 i = 0; 416 i = 0;
403 list_for_each_entry(head, &disp->head, head) 417 list_for_each_entry(head, &disp->head, head)
404 i = max(i, head->id + 1); 418 i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 7c5bed29ffef..5f301e632599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -28,6 +28,7 @@
28 28
29#include <subdev/bios.h> 29#include <subdev/bios.h>
30#include <subdev/bios/init.h> 30#include <subdev/bios/init.h>
31#include <subdev/gpio.h>
31#include <subdev/i2c.h> 32#include <subdev/i2c.h>
32 33
33#include <nvif/event.h> 34#include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
412} 413}
413 414
414static void 415static void
415nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior) 416nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
416{ 417{
417 struct nvkm_dp *dp = nvkm_dp(outp); 418 struct nvkm_dp *dp = nvkm_dp(outp);
418 419
419 /* Prevent link from being retrained if sink sends an IRQ. */
420 atomic_set(&dp->lt.done, 0);
421 ior->dp.nr = 0;
422
423 /* Execute DisableLT script from DP Info Table. */ 420 /* Execute DisableLT script from DP Info Table. */
424 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4], 421 nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
425 init.outp = &dp->outp.info; 422 init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
428 ); 425 );
429} 426}
430 427
428static void
429nvkm_dp_release(struct nvkm_outp *outp)
430{
431 struct nvkm_dp *dp = nvkm_dp(outp);
432
433 /* Prevent link from being retrained if sink sends an IRQ. */
434 atomic_set(&dp->lt.done, 0);
435 dp->outp.ior->dp.nr = 0;
436}
437
431static int 438static int
432nvkm_dp_acquire(struct nvkm_outp *outp) 439nvkm_dp_acquire(struct nvkm_outp *outp)
433{ 440{
@@ -491,7 +498,7 @@ done:
491 return ret; 498 return ret;
492} 499}
493 500
494static void 501static bool
495nvkm_dp_enable(struct nvkm_dp *dp, bool enable) 502nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
496{ 503{
497 struct nvkm_i2c_aux *aux = dp->aux; 504 struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
505 512
506 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, 513 if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
507 sizeof(dp->dpcd))) 514 sizeof(dp->dpcd)))
508 return; 515 return true;
509 } 516 }
510 517
511 if (dp->present) { 518 if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
515 } 522 }
516 523
517 atomic_set(&dp->lt.done, 0); 524 atomic_set(&dp->lt.done, 0);
525 return false;
518} 526}
519 527
520static int 528static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
555static void 563static void
556nvkm_dp_init(struct nvkm_outp *outp) 564nvkm_dp_init(struct nvkm_outp *outp)
557{ 565{
566 struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
558 struct nvkm_dp *dp = nvkm_dp(outp); 567 struct nvkm_dp *dp = nvkm_dp(outp);
568
559 nvkm_notify_put(&dp->outp.conn->hpd); 569 nvkm_notify_put(&dp->outp.conn->hpd);
560 nvkm_dp_enable(dp, true); 570
571 /* eDP panels need powering on by us (if the VBIOS doesn't default it
572 * to on) before doing any AUX channel transactions. LVDS panel power
573 * is handled by the SOR itself, and not required for LVDS DDC.
574 */
575 if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
576 int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
577 if (power == 0)
578 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
579
580 /* We delay here unconditionally, even if already powered,
581 * because some laptop panels having a significant resume
582 * delay before the panel begins responding.
583 *
584 * This is likely a bit of a hack, but no better idea for
585 * handling this at the moment.
586 */
587 msleep(300);
588
589 /* If the eDP panel can't be detected, we need to restore
590 * the panel power GPIO to avoid breaking another output.
591 */
592 if (!nvkm_dp_enable(dp, true) && power == 0)
593 nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
594 } else {
595 nvkm_dp_enable(dp, true);
596 }
597
561 nvkm_notify_get(&dp->hpd); 598 nvkm_notify_get(&dp->hpd);
562} 599}
563 600
@@ -576,6 +613,7 @@ nvkm_dp_func = {
576 .fini = nvkm_dp_fini, 613 .fini = nvkm_dp_fini,
577 .acquire = nvkm_dp_acquire, 614 .acquire = nvkm_dp_acquire,
578 .release = nvkm_dp_release, 615 .release = nvkm_dp_release,
616 .disable = nvkm_dp_disable,
579}; 617};
580 618
581static int 619static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index e0b4e0c5704e..19911211a12a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -16,6 +16,7 @@ struct nvkm_ior {
16 char name[8]; 16 char name[8];
17 17
18 struct list_head head; 18 struct list_head head;
19 bool identity;
19 20
20 struct nvkm_ior_state { 21 struct nvkm_ior_state {
21 struct nvkm_outp *outp; 22 struct nvkm_outp *outp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index f89c7b977aa5..def005dd5fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
501 nv50_disp_super_ied_off(head, ior, 2); 501 nv50_disp_super_ied_off(head, ior, 2);
502 502
503 /* If we're shutting down the OR's only active head, execute 503 /* If we're shutting down the OR's only active head, execute
504 * the output path's release function. 504 * the output path's disable function.
505 */ 505 */
506 if (ior->arm.head == (1 << head->id)) { 506 if (ior->arm.head == (1 << head->id)) {
507 if ((outp = ior->arm.outp) && outp->func->release) 507 if ((outp = ior->arm.outp) && outp->func->disable)
508 outp->func->release(outp, ior); 508 outp->func->disable(outp, ior);
509 } 509 }
510} 510}
511 511
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index be9e7f8c3b23..c62030c96fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
93 if (ior) { 93 if (ior) {
94 outp->acquired &= ~user; 94 outp->acquired &= ~user;
95 if (!outp->acquired) { 95 if (!outp->acquired) {
96 if (outp->func->release && outp->ior)
97 outp->func->release(outp);
96 outp->ior->asy.outp = NULL; 98 outp->ior->asy.outp = NULL;
97 outp->ior = NULL; 99 outp->ior = NULL;
98 } 100 }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
127 if (proto == UNKNOWN) 129 if (proto == UNKNOWN)
128 return -ENOSYS; 130 return -ENOSYS;
129 131
132 /* Deal with panels requiring identity-mapped SOR assignment. */
133 if (outp->identity) {
134 ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
135 if (WARN_ON(!ior))
136 return -ENOSPC;
137 return nvkm_outp_acquire_ior(outp, user, ior);
138 }
139
130 /* First preference is to reuse the OR that is currently armed 140 /* First preference is to reuse the OR that is currently armed
131 * on HW, if any, in order to prevent unnecessary switching. 141 * on HW, if any, in order to prevent unnecessary switching.
132 */ 142 */
133 list_for_each_entry(ior, &outp->disp->ior, head) { 143 list_for_each_entry(ior, &outp->disp->ior, head) {
134 if (!ior->asy.outp && ior->arm.outp == outp) 144 if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
135 return nvkm_outp_acquire_ior(outp, user, ior); 145 return nvkm_outp_acquire_ior(outp, user, ior);
136 } 146 }
137 147
138 /* Failing that, a completely unused OR is the next best thing. */ 148 /* Failing that, a completely unused OR is the next best thing. */
139 list_for_each_entry(ior, &outp->disp->ior, head) { 149 list_for_each_entry(ior, &outp->disp->ior, head) {
140 if (!ior->asy.outp && ior->type == type && !ior->arm.outp && 150 if (!ior->identity &&
151 !ior->asy.outp && ior->type == type && !ior->arm.outp &&
141 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 152 (ior->func->route.set || ior->id == __ffs(outp->info.or)))
142 return nvkm_outp_acquire_ior(outp, user, ior); 153 return nvkm_outp_acquire_ior(outp, user, ior);
143 } 154 }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
146 * but will be released during the next modeset. 157 * but will be released during the next modeset.
147 */ 158 */
148 list_for_each_entry(ior, &outp->disp->ior, head) { 159 list_for_each_entry(ior, &outp->disp->ior, head) {
149 if (!ior->asy.outp && ior->type == type && 160 if (!ior->identity && !ior->asy.outp && ior->type == type &&
150 (ior->func->route.set || ior->id == __ffs(outp->info.or))) 161 (ior->func->route.set || ior->id == __ffs(outp->info.or)))
151 return nvkm_outp_acquire_ior(outp, user, ior); 162 return nvkm_outp_acquire_ior(outp, user, ior);
152 } 163 }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
245 outp->index = index; 256 outp->index = index;
246 outp->info = *dcbE; 257 outp->info = *dcbE;
247 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index); 258 outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
248 outp->or = ffs(outp->info.or) - 1;
249 259
250 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x " 260 OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
251 "edid %x bus %d head %x", 261 "edid %x bus %d head %x",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index ea84d7d5741a..6c8aa5cfed9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -13,10 +13,10 @@ struct nvkm_outp {
13 struct dcb_output info; 13 struct dcb_output info;
14 14
15 struct nvkm_i2c_bus *i2c; 15 struct nvkm_i2c_bus *i2c;
16 int or;
17 16
18 struct list_head head; 17 struct list_head head;
19 struct nvkm_conn *conn; 18 struct nvkm_conn *conn;
19 bool identity;
20 20
21 /* Assembly state. */ 21 /* Assembly state. */
22#define NVKM_OUTP_PRIV 1 22#define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
41 void (*init)(struct nvkm_outp *); 41 void (*init)(struct nvkm_outp *);
42 void (*fini)(struct nvkm_outp *); 42 void (*fini)(struct nvkm_outp *);
43 int (*acquire)(struct nvkm_outp *); 43 int (*acquire)(struct nvkm_outp *);
44 void (*release)(struct nvkm_outp *, struct nvkm_ior *); 44 void (*release)(struct nvkm_outp *);
45 void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
45}; 46};
46 47
47#define OUTP_MSG(o,l,f,a...) do { \ 48#define OUTP_MSG(o,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index b80618e35491..17235e940ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
86 struct nvkm_bios *bios = subdev->device->bios; 86 struct nvkm_bios *bios = subdev->device->bios;
87 struct nvbios_pmuR pmu; 87 struct nvbios_pmuR pmu;
88 88
89 if (!nvbios_pmuRm(bios, type, &pmu)) { 89 if (!nvbios_pmuRm(bios, type, &pmu))
90 nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
91 return -EINVAL; 90 return -EINVAL;
92 }
93 91
94 if (!post) 92 if (!post)
95 return 0; 93 return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
124 return -EINVAL; 122 return -EINVAL;
125 } 123 }
126 124
125 /* Upload DEVINIT application from VBIOS onto PMU. */
127 ret = pmu_load(init, 0x04, post, &exec, &args); 126 ret = pmu_load(init, 0x04, post, &exec, &args);
128 if (ret) 127 if (ret) {
128 nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
129 return ret; 129 return ret;
130 }
130 131
131 /* upload first chunk of init data */ 132 /* Upload tables required by opcodes in boot scripts. */
132 if (post) { 133 if (post) {
133 // devinit tables
134 u32 pmu = pmu_args(init, args + 0x08, 0x08); 134 u32 pmu = pmu_args(init, args + 0x08, 0x08);
135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14); 135 u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16); 136 u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
137 pmu_data(init, pmu, img, len); 137 pmu_data(init, pmu, img, len);
138 } 138 }
139 139
140 /* upload second chunk of init data */ 140 /* Upload boot scripts. */
141 if (post) { 141 if (post) {
142 // devinit boot scripts
143 u32 pmu = pmu_args(init, args + 0x08, 0x10); 142 u32 pmu = pmu_args(init, args + 0x08, 0x10);
144 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18); 143 u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
145 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a); 144 u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
146 pmu_data(init, pmu, img, len); 145 pmu_data(init, pmu, img, len);
147 } 146 }
148 147
149 /* execute init tables */ 148 /* Execute DEVINIT. */
150 if (post) { 149 if (post) {
151 nvkm_wr32(device, 0x10a040, 0x00005000); 150 nvkm_wr32(device, 0x10a040, 0x00005000);
152 pmu_exec(init, exec); 151 pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
157 return -ETIMEDOUT; 156 return -ETIMEDOUT;
158 } 157 }
159 158
160 /* load and execute some other ucode image (bios therm?) */ 159 /* Optional: Execute PRE_OS application on PMU, which should at
161 return pmu_load(init, 0x01, post, NULL, NULL); 160 * least take care of fans until a full PMU has been loaded.
161 */
162 pmu_load(init, 0x01, post, NULL, NULL);
163 return 0;
162} 164}
163 165
164static const struct nvkm_devinit_func 166static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index de269eb482dd..7459def78d50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1423void 1423void
1424nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) 1424nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1425{ 1425{
1426 if (vmm->func->part && inst) { 1426 if (inst && vmm->func->part) {
1427 mutex_lock(&vmm->mutex); 1427 mutex_lock(&vmm->mutex);
1428 vmm->func->part(vmm, inst); 1428 vmm->func->part(vmm, inst);
1429 mutex_unlock(&vmm->mutex); 1429 mutex_unlock(&vmm->mutex);
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index a534b225e31b..5fa0441bb6df 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
111} 111}
112 112
113static const struct of_device_id vexpress_muxfpga_match[] = { 113static const struct of_device_id vexpress_muxfpga_match[] = {
114 { .compatible = "arm,vexpress-muxfpga", } 114 { .compatible = "arm,vexpress-muxfpga", },
115 {}
115}; 116};
116 117
117static struct platform_driver vexpress_muxfpga_driver = { 118static struct platform_driver vexpress_muxfpga_driver = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..8b0cd08034e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
418 { .compatible = "allwinner,sun8i-a33-display-engine" }, 418 { .compatible = "allwinner,sun8i-a33-display-engine" },
419 { .compatible = "allwinner,sun8i-a83t-display-engine" }, 419 { .compatible = "allwinner,sun8i-a83t-display-engine" },
420 { .compatible = "allwinner,sun8i-h3-display-engine" }, 420 { .compatible = "allwinner,sun8i-h3-display-engine" },
421 { .compatible = "allwinner,sun8i-r40-display-engine" },
422 { .compatible = "allwinner,sun8i-v3s-display-engine" }, 421 { .compatible = "allwinner,sun8i-v3s-display-engine" },
423 { .compatible = "allwinner,sun9i-a80-display-engine" }, 422 { .compatible = "allwinner,sun9i-a80-display-engine" },
424 { } 423 { }
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..a564b5dfe082 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
398 398
399static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { 399static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
400 .has_phy_clk = true, 400 .has_phy_clk = true,
401 .has_second_pll = true,
402 .phy_init = &sun8i_hdmi_phy_init_h3, 401 .phy_init = &sun8i_hdmi_phy_init_h3,
403 .phy_disable = &sun8i_hdmi_phy_disable_h3, 402 .phy_disable = &sun8i_hdmi_phy_disable_h3,
404 .phy_config = &sun8i_hdmi_phy_config_h3, 403 .phy_config = &sun8i_hdmi_phy_config_h3,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..cb65b0ed53fd 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
545 .vi_num = 1, 545 .vi_num = 1,
546}; 546};
547 547
548static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
549 .ccsc = 0,
550 .mod_rate = 297000000,
551 .scaler_mask = 0xf,
552 .ui_num = 3,
553 .vi_num = 1,
554};
555
556static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
557 .ccsc = 1,
558 .mod_rate = 297000000,
559 .scaler_mask = 0x3,
560 .ui_num = 1,
561 .vi_num = 1,
562};
563
564static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { 548static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
565 .vi_num = 2, 549 .vi_num = 2,
566 .ui_num = 1, 550 .ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
583 .data = &sun8i_h3_mixer0_cfg, 567 .data = &sun8i_h3_mixer0_cfg,
584 }, 568 },
585 { 569 {
586 .compatible = "allwinner,sun8i-r40-de2-mixer-0",
587 .data = &sun8i_r40_mixer0_cfg,
588 },
589 {
590 .compatible = "allwinner,sun8i-r40-de2-mixer-1",
591 .data = &sun8i_r40_mixer1_cfg,
592 },
593 {
594 .compatible = "allwinner,sun8i-v3s-de2-mixer", 570 .compatible = "allwinner,sun8i-v3s-de2-mixer",
595 .data = &sun8i_v3s_mixer_cfg, 571 .data = &sun8i_v3s_mixer_cfg,
596 }, 572 },
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..d5240b777a8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
253 253
254/* sun4i_drv uses this list to check if a device node is a TCON TOP */ 254/* sun4i_drv uses this list to check if a device node is a TCON TOP */
255const struct of_device_id sun8i_tcon_top_of_table[] = { 255const struct of_device_id sun8i_tcon_top_of_table[] = {
256 { .compatible = "allwinner,sun8i-r40-tcon-top" },
257 { /* sentinel */ } 256 { /* sentinel */ }
258}; 257};
259MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); 258MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbb62f6eb48a..dd9ffded223b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
432{ 432{
433 drm_fb_helper_unregister_fbi(&ufbdev->helper); 433 drm_fb_helper_unregister_fbi(&ufbdev->helper);
434 drm_fb_helper_fini(&ufbdev->helper); 434 drm_fb_helper_fini(&ufbdev->helper);
435 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 435 if (ufbdev->ufb.obj) {
436 drm_framebuffer_cleanup(&ufbdev->ufb.base); 436 drm_framebuffer_unregister_private(&ufbdev->ufb.base);
437 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); 437 drm_framebuffer_cleanup(&ufbdev->ufb.base);
438 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
439 }
438} 440}
439 441
440int udl_fbdev_init(struct drm_device *dev) 442int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..a3275fa66b7b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], 297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
298 vc4_state->crtc_h); 298 vc4_state->crtc_h);
299 299
300 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
301 vc4_state->y_scaling[0] == VC4_SCALING_NONE);
302
300 if (num_planes > 1) { 303 if (num_planes > 1) {
301 vc4_state->is_yuv = true; 304 vc4_state->is_yuv = true;
302 305
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
312 vc4_get_scaling_mode(vc4_state->src_h[1], 315 vc4_get_scaling_mode(vc4_state->src_h[1],
313 vc4_state->crtc_h); 316 vc4_state->crtc_h);
314 317
315 /* YUV conversion requires that scaling be enabled, 318 /* YUV conversion requires that horizontal scaling be enabled,
316 * even on a plane that's otherwise 1:1. Choose TPZ 319 * even on a plane that's otherwise 1:1. Looks like only PPF
317 * for simplicity. 320 * works in that case, so let's pick that one.
318 */ 321 */
319 if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) 322 if (vc4_state->is_unity)
320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ; 323 vc4_state->x_scaling[0] = VC4_SCALING_PPF;
321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
323 } else { 324 } else {
324 vc4_state->x_scaling[1] = VC4_SCALING_NONE; 325 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
325 vc4_state->y_scaling[1] = VC4_SCALING_NONE; 326 vc4_state->y_scaling[1] = VC4_SCALING_NONE;
326 } 327 }
327 328
328 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
329 vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
330 vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
331 vc4_state->y_scaling[1] == VC4_SCALING_NONE);
332
333 /* No configuring scaling on the cursor plane, since it gets 329 /* No configuring scaling on the cursor plane, since it gets
334 non-vblank-synced updates, and scaling requires requires 330 non-vblank-synced updates, and scaling requires requires
335 LBM changes which have to be vblank-synced. 331 LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
672 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); 668 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
673 } 669 }
674 670
675 if (!vc4_state->is_unity) { 671 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
672 vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
673 vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
674 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
676 /* LBM Base Address. */ 675 /* LBM Base Address. */
677 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || 676 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
678 vc4_state->y_scaling[1] != VC4_SCALING_NONE) { 677 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1f134570b759..f0ab6b2313bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3729{ 3729{
3730 struct vmw_buffer_object *vbo = 3730 struct vmw_buffer_object *vbo =
3731 container_of(bo, struct vmw_buffer_object, base); 3731 container_of(bo, struct vmw_buffer_object, base);
3732 struct ttm_operation_ctx ctx = { interruptible, true }; 3732 struct ttm_operation_ctx ctx = { interruptible, false };
3733 int ret; 3733 int ret;
3734 3734
3735 if (vbo->pin_count > 0) 3735 if (vbo->pin_count > 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..6a712a8d59e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
1512 struct drm_rect *rects) 1512 struct drm_rect *rects)
1513{ 1513{
1514 struct vmw_private *dev_priv = vmw_priv(dev); 1514 struct vmw_private *dev_priv = vmw_priv(dev);
1515 struct drm_mode_config *mode_config = &dev->mode_config;
1516 struct drm_rect bounding_box = {0}; 1515 struct drm_rect bounding_box = {0};
1517 u64 total_pixels = 0, pixel_mem, bb_mem; 1516 u64 total_pixels = 0, pixel_mem, bb_mem;
1518 int i; 1517 int i;
1519 1518
1520 for (i = 0; i < num_rects; i++) { 1519 for (i = 0; i < num_rects; i++) {
1521 /* 1520 /*
1522 * Currently this check is limiting the topology within max 1521 * For STDU only individual screen (screen target) is limited by
1523 * texture/screentarget size. This should change in future when 1522 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1524 * user-space support multiple fb with topology.
1525 */ 1523 */
1526 if (rects[i].x1 < 0 || rects[i].y1 < 0 || 1524 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1527 rects[i].x2 > mode_config->max_width || 1525 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1528 rects[i].y2 > mode_config->max_height) { 1526 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1529 DRM_ERROR("Invalid GUI layout.\n"); 1527 DRM_ERROR("Screen size not supported.\n");
1530 return -EINVAL; 1528 return -EINVAL;
1531 } 1529 }
1532 1530
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
1615 struct drm_connector_state *conn_state; 1613 struct drm_connector_state *conn_state;
1616 struct vmw_connector_state *vmw_conn_state; 1614 struct vmw_connector_state *vmw_conn_state;
1617 1615
1618 if (!new_crtc_state->enable && old_crtc_state->enable) { 1616 if (!new_crtc_state->enable) {
1619 rects[i].x1 = 0; 1617 rects[i].x1 = 0;
1620 rects[i].y1 = 0; 1618 rects[i].y1 = 0;
1621 rects[i].x2 = 0; 1619 rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2216 if (dev_priv->assume_16bpp) 2214 if (dev_priv->assume_16bpp)
2217 assumed_bpp = 2; 2215 assumed_bpp = 2;
2218 2216
2217 max_width = min(max_width, dev_priv->texture_max_width);
2218 max_height = min(max_height, dev_priv->texture_max_height);
2219
2220 /*
2221 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2222 * HEIGHT registers.
2223 */
2219 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2224 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2220 max_width = min(max_width, dev_priv->stdu_max_width); 2225 max_width = min(max_width, dev_priv->stdu_max_width);
2221 max_width = min(max_width, dev_priv->texture_max_width);
2222
2223 max_height = min(max_height, dev_priv->stdu_max_height); 2226 max_height = min(max_height, dev_priv->stdu_max_height);
2224 max_height = min(max_height, dev_priv->texture_max_height);
2225 } 2227 }
2226 2228
2227 /* Add preferred mode */ 2229 /* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2376 struct drm_file *file_priv) 2378 struct drm_file *file_priv)
2377{ 2379{
2378 struct vmw_private *dev_priv = vmw_priv(dev); 2380 struct vmw_private *dev_priv = vmw_priv(dev);
2381 struct drm_mode_config *mode_config = &dev->mode_config;
2379 struct drm_vmw_update_layout_arg *arg = 2382 struct drm_vmw_update_layout_arg *arg =
2380 (struct drm_vmw_update_layout_arg *)data; 2383 (struct drm_vmw_update_layout_arg *)data;
2381 void __user *user_rects; 2384 void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2421 drm_rects[i].y1 = curr_rect.y; 2424 drm_rects[i].y1 = curr_rect.y;
2422 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2425 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2423 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2426 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2427
2428 /*
2429 * Currently this check is limiting the topology within
2430 * mode_config->max (which actually is max texture size
2431 * supported by virtual device). This limit is here to address
2432 * window managers that create a big framebuffer for whole
2433 * topology.
2434 */
2435 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2436 drm_rects[i].x2 > mode_config->max_width ||
2437 drm_rects[i].y2 > mode_config->max_height) {
2438 DRM_ERROR("Invalid GUI layout.\n");
2439 ret = -EINVAL;
2440 goto out_free;
2441 }
2424 } 2442 }
2425 2443
2426 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2444 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..f30e839f7bfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1600 1600
1601 dev_priv->active_display_unit = vmw_du_screen_target; 1601 dev_priv->active_display_unit = vmw_du_screen_target;
1602 1602
1603 if (dev_priv->capabilities & SVGA_CAP_3D) {
1604 /*
1605 * For 3D VMs, display (scanout) buffer size is the smaller of
1606 * max texture and max STDU
1607 */
1608 uint32_t max_width, max_height;
1609
1610 max_width = min(dev_priv->texture_max_width,
1611 dev_priv->stdu_max_width);
1612 max_height = min(dev_priv->texture_max_height,
1613 dev_priv->stdu_max_height);
1614
1615 dev->mode_config.max_width = max_width;
1616 dev->mode_config.max_height = max_height;
1617 } else {
1618 /*
1619 * Given various display aspect ratios, there's no way to
1620 * estimate these using prim_bb_mem. So just set these to
1621 * something arbitrarily large and we will reject any layout
1622 * that doesn't fit prim_bb_mem later
1623 */
1624 dev->mode_config.max_width = 8192;
1625 dev->mode_config.max_height = 8192;
1626 }
1627
1628 vmw_kms_create_implicit_placement_property(dev_priv, false); 1603 vmw_kms_create_implicit_placement_property(dev_priv, false);
1629 1604
1630 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { 1605 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..80a01cd4c051 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1404 *srf_out = NULL; 1404 *srf_out = NULL;
1405 1405
1406 if (for_scanout) { 1406 if (for_scanout) {
1407 uint32_t max_width, max_height;
1408
1409 if (!svga3dsurface_is_screen_target_format(format)) { 1407 if (!svga3dsurface_is_screen_target_format(format)) {
1410 DRM_ERROR("Invalid Screen Target surface format."); 1408 DRM_ERROR("Invalid Screen Target surface format.");
1411 return -EINVAL; 1409 return -EINVAL;
1412 } 1410 }
1413 1411
1414 max_width = min(dev_priv->texture_max_width, 1412 if (size.width > dev_priv->texture_max_width ||
1415 dev_priv->stdu_max_width); 1413 size.height > dev_priv->texture_max_height) {
1416 max_height = min(dev_priv->texture_max_height,
1417 dev_priv->stdu_max_height);
1418
1419 if (size.width > max_width || size.height > max_height) {
1420 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", 1414 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1421 size.width, size.height, 1415 size.width, size.height,
1422 max_width, max_height); 1416 dev_priv->texture_max_width,
1417 dev_priv->texture_max_height);
1423 return -EINVAL; 1418 return -EINVAL;
1424 } 1419 }
1425 } else { 1420 } else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1495 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 1490 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1496 srf->res.backup_size += sizeof(SVGA3dDXSOState); 1491 srf->res.backup_size += sizeof(SVGA3dDXSOState);
1497 1492
1493 /*
1494 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1495 * size greater than STDU max width/height. This is really a workaround
1496 * to support creation of big framebuffer requested by some user-space
1497 * for whole topology. That big framebuffer won't really be used for
1498 * binding with screen target as during prepare_fb a separate surface is
1499 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1500 */
1498 if (dev_priv->active_display_unit == vmw_du_screen_target && 1501 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1499 for_scanout) 1502 for_scanout && size.width <= dev_priv->stdu_max_width &&
1503 size.height <= dev_priv->stdu_max_height)
1500 srf->flags |= SVGA3D_SURFACE_SCREENTARGET; 1504 srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1501 1505
1502 /* 1506 /*
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a96bf46bc483..cf2a18571d48 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
215 return; 215 return;
216 216
217 client->id = ret | ID_BIT_AUDIO; 217 client->id = ret | ID_BIT_AUDIO;
218 if (client->ops->gpu_bound)
219 client->ops->gpu_bound(client->pdev, ret);
218 } 220 }
219 221
220 vga_switcheroo_debugfs_init(&vgasr_priv); 222 vga_switcheroo_debugfs_init(&vgasr_priv);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 25b7bd56ae11..1cb41992aaa1 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
335 struct hid_field *field, struct hid_usage *usage, 335 struct hid_field *field, struct hid_usage *usage,
336 unsigned long **bit, int *max) 336 unsigned long **bit, int *max)
337{ 337{
338 if (usage->hid == (HID_UP_CUSTOM | 0x0003)) { 338 if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
339 usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
339 /* The fn key on Apple USB keyboards */ 340 /* The fn key on Apple USB keyboards */
340 set_bit(EV_REP, hi->input->evbit); 341 set_bit(EV_REP, hi->input->evbit);
341 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN); 342 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
472 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 473 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI), 474 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
474 .driver_data = APPLE_HAS_FN }, 475 .driver_data = APPLE_HAS_FN },
476 { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
477 .driver_data = APPLE_HAS_FN },
478 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
479 .driver_data = APPLE_HAS_FN },
480 { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
481 .driver_data = APPLE_HAS_FN },
475 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI), 482 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
476 .driver_data = APPLE_HAS_FN }, 483 .driver_data = APPLE_HAS_FN },
477 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO), 484 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3da354af7a0a..44564f61e9cc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
1000 parser = vzalloc(sizeof(struct hid_parser)); 1000 parser = vzalloc(sizeof(struct hid_parser));
1001 if (!parser) { 1001 if (!parser) {
1002 ret = -ENOMEM; 1002 ret = -ENOMEM;
1003 goto err; 1003 goto alloc_err;
1004 } 1004 }
1005 1005
1006 parser->device = device; 1006 parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
1039 hid_err(device, "unbalanced delimiter at end of report description\n"); 1039 hid_err(device, "unbalanced delimiter at end of report description\n");
1040 goto err; 1040 goto err;
1041 } 1041 }
1042 kfree(parser->collection_stack);
1042 vfree(parser); 1043 vfree(parser);
1043 device->status |= HID_STAT_PARSED; 1044 device->status |= HID_STAT_PARSED;
1044 return 0; 1045 return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
1047 1048
1048 hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); 1049 hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
1049err: 1050err:
1051 kfree(parser->collection_stack);
1052alloc_err:
1050 vfree(parser); 1053 vfree(parser);
1051 hid_close_report(device); 1054 hid_close_report(device);
1052 return ret; 1055 return ret;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 79bdf0c7e351..bc49909aba8e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -88,6 +88,7 @@
88#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101 88#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
89 89
90#define USB_VENDOR_ID_APPLE 0x05ac 90#define USB_VENDOR_ID_APPLE 0x05ac
91#define BT_VENDOR_ID_APPLE 0x004c
91#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304 92#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
92#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d 93#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
93#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e 94#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
@@ -157,6 +158,7 @@
157#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 158#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
158#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257 159#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
159#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267 160#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
161#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
160#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 162#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
161#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 163#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
162#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 164#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -528,9 +530,6 @@
528#define I2C_VENDOR_ID_HANTICK 0x0911 530#define I2C_VENDOR_ID_HANTICK 0x0911
529#define I2C_PRODUCT_ID_HANTICK_5288 0x5288 531#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
530 532
531#define I2C_VENDOR_ID_RAYD 0x2386
532#define I2C_PRODUCT_ID_RAYD_3118 0x3118
533
534#define USB_VENDOR_ID_HANWANG 0x0b57 533#define USB_VENDOR_ID_HANWANG 0x0b57
535#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000 534#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
536#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff 535#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -950,6 +949,7 @@
950#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 949#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
951#define USB_DEVICE_ID_SAITEK_PS1000 0x0621 950#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
952#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb 951#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
952#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
953#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 953#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
954#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa 954#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
955#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 955#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
@@ -976,7 +976,6 @@
976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817 976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
977#define USB_DEVICE_ID_SIS_TS 0x1013 977#define USB_DEVICE_ID_SIS_TS 0x1013
978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
979#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
980 979
981#define USB_VENDOR_ID_SKYCABLE 0x1223 980#define USB_VENDOR_ID_SKYCABLE 0x1223
982#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 981#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 4e94ea3e280a..a481eaf39e88 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
1582 input_dev->dev.parent = &hid->dev; 1582 input_dev->dev.parent = &hid->dev;
1583 1583
1584 hidinput->input = input_dev; 1584 hidinput->input = input_dev;
1585 hidinput->application = application;
1585 list_add_tail(&hidinput->list, &hid->inputs); 1586 list_add_tail(&hidinput->list, &hid->inputs);
1586 1587
1587 INIT_LIST_HEAD(&hidinput->reports); 1588 INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
1677 struct hid_input *hidinput; 1678 struct hid_input *hidinput;
1678 1679
1679 list_for_each_entry(hidinput, &hid->inputs, list) { 1680 list_for_each_entry(hidinput, &hid->inputs, list) {
1680 if (hidinput->report && 1681 if (hidinput->application == report->application)
1681 hidinput->report->application == report->application)
1682 return hidinput; 1682 return hidinput;
1683 } 1683 }
1684 1684
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
1815 input_unregister_device(hidinput->input); 1815 input_unregister_device(hidinput->input);
1816 else 1816 else
1817 input_free_device(hidinput->input); 1817 input_free_device(hidinput->input);
1818 kfree(hidinput->name);
1818 kfree(hidinput); 1819 kfree(hidinput);
1819 } 1820 }
1820 1821
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 40fbb7c52723..da954f3f4da7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1375 struct hid_usage *usage, 1375 struct hid_usage *usage,
1376 enum latency_mode latency, 1376 enum latency_mode latency,
1377 bool surface_switch, 1377 bool surface_switch,
1378 bool button_switch) 1378 bool button_switch,
1379 bool *inputmode_found)
1379{ 1380{
1380 struct mt_device *td = hid_get_drvdata(hdev); 1381 struct mt_device *td = hid_get_drvdata(hdev);
1381 struct mt_class *cls = &td->mtclass; 1382 struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1387 1388
1388 switch (usage->hid) { 1389 switch (usage->hid) {
1389 case HID_DG_INPUTMODE: 1390 case HID_DG_INPUTMODE:
1391 /*
1392 * Some elan panels wrongly declare 2 input mode features,
1393 * and silently ignore when we set the value in the second
1394 * field. Skip the second feature and hope for the best.
1395 */
1396 if (*inputmode_found)
1397 return false;
1398
1390 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { 1399 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
1391 report_len = hid_report_len(report); 1400 report_len = hid_report_len(report);
1392 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1401 buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1402 } 1411 }
1403 1412
1404 field->value[index] = td->inputmode_value; 1413 field->value[index] = td->inputmode_value;
1414 *inputmode_found = true;
1405 return true; 1415 return true;
1406 1416
1407 case HID_DG_CONTACTMAX: 1417 case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1439 struct hid_usage *usage; 1449 struct hid_usage *usage;
1440 int i, j; 1450 int i, j;
1441 bool update_report; 1451 bool update_report;
1452 bool inputmode_found = false;
1442 1453
1443 rep_enum = &hdev->report_enum[HID_FEATURE_REPORT]; 1454 rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
1444 list_for_each_entry(rep, &rep_enum->report_list, list) { 1455 list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1457 usage, 1468 usage,
1458 latency, 1469 latency,
1459 surface_switch, 1470 surface_switch,
1460 button_switch)) 1471 button_switch,
1472 &inputmode_found))
1461 update_report = true; 1473 update_report = true;
1462 } 1474 }
1463 } 1475 }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
1685 */ 1697 */
1686 hdev->quirks |= HID_QUIRK_INPUT_PER_APP; 1698 hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
1687 1699
1700 if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
1701 hdev->quirks |= HID_QUIRK_MULTI_INPUT;
1702
1688 timer_setup(&td->release_timer, mt_expired_timeout, 0); 1703 timer_setup(&td->release_timer, mt_expired_timeout, 0);
1689 1704
1690 ret = hid_parse(hdev); 1705 ret = hid_parse(hdev);
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e642686ff0..683861f324e3 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
183 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
185 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 185 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
186 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
187 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
186 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9), 188 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
187 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 189 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
188 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), 190 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 50af72baa5ca..2b63487057c2 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
579} 579}
580EXPORT_SYMBOL_GPL(sensor_hub_device_close); 580EXPORT_SYMBOL_GPL(sensor_hub_device_close);
581 581
582static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
583 unsigned int *rsize)
584{
585 /*
586 * Checks if the report descriptor of Thinkpad Helix 2 has a logical
587 * minimum for magnetic flux axis greater than the maximum.
588 */
589 if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
590 *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
591 rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
592 rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
593 rdesc[921] == 0x07 && rdesc[922] == 0x00) {
594 /* Sets negative logical minimum for mag x, y and z */
595 rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
596 rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
597 rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
598 rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
599 }
600
601 return rdesc;
602}
603
582static int sensor_hub_probe(struct hid_device *hdev, 604static int sensor_hub_probe(struct hid_device *hdev,
583 const struct hid_device_id *id) 605 const struct hid_device_id *id)
584{ 606{
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
743 .probe = sensor_hub_probe, 765 .probe = sensor_hub_probe,
744 .remove = sensor_hub_remove, 766 .remove = sensor_hub_remove,
745 .raw_event = sensor_hub_raw_event, 767 .raw_event = sensor_hub_raw_event,
768 .report_fixup = sensor_hub_report_fixup,
746#ifdef CONFIG_PM 769#ifdef CONFIG_PM
747 .suspend = sensor_hub_suspend, 770 .suspend = sensor_hub_suspend,
748 .resume = sensor_hub_resume, 771 .resume = sensor_hub_resume,
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 2ce194a84868..4e3592e7a3f7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,7 +47,7 @@
47/* quirks to control the device */ 47/* quirks to control the device */
48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
50#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) 50#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
51 51
52/* flags */ 52/* flags */
53#define I2C_HID_STARTED 0 53#define I2C_HID_STARTED 0
@@ -169,11 +169,8 @@ static const struct i2c_hid_quirks {
169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755, 169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
173 { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118, 173 I2C_HID_QUIRK_NO_RUNTIME_PM },
174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
175 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
176 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
177 { 0, 0 } 174 { 0, 0 }
178}; 175};
179 176
@@ -1107,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
1107 goto err_mem_free; 1104 goto err_mem_free;
1108 } 1105 }
1109 1106
1110 pm_runtime_put(&client->dev); 1107 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1108 pm_runtime_put(&client->dev);
1109
1111 return 0; 1110 return 0;
1112 1111
1113err_mem_free: 1112err_mem_free:
@@ -1132,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
1132 struct i2c_hid *ihid = i2c_get_clientdata(client); 1131 struct i2c_hid *ihid = i2c_get_clientdata(client);
1133 struct hid_device *hid; 1132 struct hid_device *hid;
1134 1133
1135 pm_runtime_get_sync(&client->dev); 1134 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1135 pm_runtime_get_sync(&client->dev);
1136 pm_runtime_disable(&client->dev); 1136 pm_runtime_disable(&client->dev);
1137 pm_runtime_set_suspended(&client->dev); 1137 pm_runtime_set_suspended(&client->dev);
1138 pm_runtime_put_noidle(&client->dev); 1138 pm_runtime_put_noidle(&client->dev);
@@ -1235,19 +1235,15 @@ static int i2c_hid_resume(struct device *dev)
1235 pm_runtime_enable(dev); 1235 pm_runtime_enable(dev);
1236 1236
1237 enable_irq(client->irq); 1237 enable_irq(client->irq);
1238 ret = i2c_hid_hwreset(client);
1239 if (ret)
1240 return ret;
1241 1238
1242 /* RAYDIUM device (2386:3118) need to re-send report descr cmd 1239 /* Instead of resetting device, simply powers the device on. This
1243 * after resume, after this it will be back normal. 1240 * solves "incomplete reports" on Raydium devices 2386:3118 and
1244 * otherwise it issues too many incomplete reports. 1241 * 2386:4B33 and fixes various SIS touchscreens no longer sending
1242 * data after a suspend/resume.
1245 */ 1243 */
1246 if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) { 1244 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1247 ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0); 1245 if (ret)
1248 if (ret) 1246 return ret;
1249 return ret;
1250 }
1251 1247
1252 if (hid->driver && hid->driver->reset_resume) { 1248 if (hid->driver && hid->driver->reset_resume) {
1253 ret = hid->driver->reset_resume(hid); 1249 ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 97869b7410eb..08a8327dfd22 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,8 @@
29#define CNL_Ax_DEVICE_ID 0x9DFC 29#define CNL_Ax_DEVICE_ID 0x9DFC
30#define GLK_Ax_DEVICE_ID 0x31A2 30#define GLK_Ax_DEVICE_ID 0x31A2
31#define CNL_H_DEVICE_ID 0xA37C 31#define CNL_H_DEVICE_ID 0xA37C
32#define ICL_MOBILE_DEVICE_ID 0x34FC
33#define SPT_H_DEVICE_ID 0xA135
32 34
33#define REVISION_ID_CHT_A0 0x6 35#define REVISION_ID_CHT_A0 0x6
34#define REVISION_ID_CHT_Ax_SI 0x0 36#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 050f9872f5c0..256b3016116c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
42 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
41 {0, } 43 {0, }
42}; 44};
43MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 45MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b1b548a21f91..c71cc857b649 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1291 if (!attribute->show) 1291 if (!attribute->show)
1292 return -EIO; 1292 return -EIO;
1293 1293
1294 if (chan->state != CHANNEL_OPENED_STATE)
1295 return -EINVAL;
1296
1294 return attribute->show(chan, buf); 1297 return attribute->show(chan, buf);
1295} 1298}
1296 1299
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 90837f7c7d0f..f4c7516eb989 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
302 return clamp_val(reg, 0, 1023) & (0xff << 2); 302 return clamp_val(reg, 0, 1023) & (0xff << 2);
303} 303}
304 304
305static u16 adt7475_read_word(struct i2c_client *client, int reg) 305static int adt7475_read_word(struct i2c_client *client, int reg)
306{ 306{
307 u16 val; 307 int val1, val2;
308 308
309 val = i2c_smbus_read_byte_data(client, reg); 309 val1 = i2c_smbus_read_byte_data(client, reg);
310 val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8); 310 if (val1 < 0)
311 return val1;
312 val2 = i2c_smbus_read_byte_data(client, reg + 1);
313 if (val2 < 0)
314 return val2;
311 315
312 return val; 316 return val1 | (val2 << 8);
313} 317}
314 318
315static void adt7475_write_word(struct i2c_client *client, int reg, u16 val) 319static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
962{ 966{
963 struct adt7475_data *data = adt7475_update_device(dev); 967 struct adt7475_data *data = adt7475_update_device(dev);
964 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr); 968 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
965 int i = clamp_val(data->range[sattr->index] & 0xf, 0, 969 int idx;
966 ARRAY_SIZE(pwmfreq_table) - 1);
967 970
968 if (IS_ERR(data)) 971 if (IS_ERR(data))
969 return PTR_ERR(data); 972 return PTR_ERR(data);
973 idx = clamp_val(data->range[sattr->index] & 0xf, 0,
974 ARRAY_SIZE(pwmfreq_table) - 1);
970 975
971 return sprintf(buf, "%d\n", pwmfreq_table[i]); 976 return sprintf(buf, "%d\n", pwmfreq_table[idx]);
972} 977}
973 978
974static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr, 979static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
1004 char *buf) 1009 char *buf)
1005{ 1010{
1006 struct adt7475_data *data = adt7475_update_device(dev); 1011 struct adt7475_data *data = adt7475_update_device(dev);
1012
1013 if (IS_ERR(data))
1014 return PTR_ERR(data);
1015
1007 return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY)); 1016 return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
1008} 1017}
1009 1018
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index e9e6aeabbf84..71d3445ba869 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
17 * Bi-directional Current/Power Monitor with I2C Interface 17 * Bi-directional Current/Power Monitor with I2C Interface
18 * Datasheet: http://www.ti.com/product/ina230 18 * Datasheet: http://www.ti.com/product/ina230
19 * 19 *
20 * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> 20 * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
21 * Thanks to Jan Volkering 21 * Thanks to Jan Volkering
22 * 22 *
23 * This program is free software; you can redistribute it and/or modify 23 * This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
329 return 0; 329 return 0;
330} 330}
331 331
332static ssize_t ina2xx_show_shunt(struct device *dev,
333 struct device_attribute *da,
334 char *buf)
335{
336 struct ina2xx_data *data = dev_get_drvdata(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
339}
340
332static ssize_t ina2xx_store_shunt(struct device *dev, 341static ssize_t ina2xx_store_shunt(struct device *dev,
333 struct device_attribute *da, 342 struct device_attribute *da,
334 const char *buf, size_t count) 343 const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
403 412
404/* shunt resistance */ 413/* shunt resistance */
405static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, 414static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
406 ina2xx_show_value, ina2xx_store_shunt, 415 ina2xx_show_shunt, ina2xx_store_shunt,
407 INA2XX_CALIBRATION); 416 INA2XX_CALIBRATION);
408 417
409/* update interval (ina226 only) */ 418/* update interval (ina226 only) */
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c6bd61e4695a..78603b78cf41 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -63,6 +63,7 @@
63#include <linux/bitops.h> 63#include <linux/bitops.h>
64#include <linux/dmi.h> 64#include <linux/dmi.h>
65#include <linux/io.h> 65#include <linux/io.h>
66#include <linux/nospec.h>
66#include "lm75.h" 67#include "lm75.h"
67 68
68#define USE_ALTERNATE 69#define USE_ALTERNATE
@@ -206,8 +207,6 @@ superio_exit(int ioreg)
206 207
207#define NUM_FAN 7 208#define NUM_FAN 7
208 209
209#define TEMP_SOURCE_VIRTUAL 0x1f
210
211/* Common and NCT6775 specific data */ 210/* Common and NCT6775 specific data */
212 211
213/* Voltage min/max registers for nr=7..14 are in bank 5 */ 212/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -298,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
298 297
299static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; 298static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
300static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; 299static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
301static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; 300static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
302static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; 301 0x641, 0x642, 0x643, 0x644 };
302static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
303 303
304static const u16 NCT6775_REG_TEMP[] = { 304static const u16 NCT6775_REG_TEMP[] = {
305 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; 305 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -372,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
372}; 372};
373 373
374#define NCT6775_TEMP_MASK 0x001ffffe 374#define NCT6775_TEMP_MASK 0x001ffffe
375#define NCT6775_VIRT_TEMP_MASK 0x00000000
375 376
376static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { 377static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
377 [13] = 0x661, 378 [13] = 0x661,
@@ -424,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
424 425
425static const u16 NCT6776_REG_FAN_MIN[] = { 426static const u16 NCT6776_REG_FAN_MIN[] = {
426 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; 427 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
427static const u16 NCT6776_REG_FAN_PULSES[] = { 428static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
428 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; 429 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
429 430
430static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { 431static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
431 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; 432 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -460,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
460}; 461};
461 462
462#define NCT6776_TEMP_MASK 0x007ffffe 463#define NCT6776_TEMP_MASK 0x007ffffe
464#define NCT6776_VIRT_TEMP_MASK 0x00000000
463 465
464static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { 466static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
465 [14] = 0x401, 467 [14] = 0x401,
@@ -500,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
500 30, 31 }; /* intrusion0, intrusion1 */ 502 30, 31 }; /* intrusion0, intrusion1 */
501 503
502static const u16 NCT6779_REG_FAN[] = { 504static const u16 NCT6779_REG_FAN[] = {
503 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; 505 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
504static const u16 NCT6779_REG_FAN_PULSES[] = { 506static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
505 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; 507 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
506 508
507static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { 509static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
508 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; 510 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -558,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
558}; 560};
559 561
560#define NCT6779_TEMP_MASK 0x07ffff7e 562#define NCT6779_TEMP_MASK 0x07ffff7e
563#define NCT6779_VIRT_TEMP_MASK 0x00000000
561#define NCT6791_TEMP_MASK 0x87ffff7e 564#define NCT6791_TEMP_MASK 0x87ffff7e
565#define NCT6791_VIRT_TEMP_MASK 0x80000000
562 566
563static const u16 NCT6779_REG_TEMP_ALTERNATE[32] 567static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
564 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, 568 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -637,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
637}; 641};
638 642
639#define NCT6792_TEMP_MASK 0x9fffff7e 643#define NCT6792_TEMP_MASK 0x9fffff7e
644#define NCT6792_VIRT_TEMP_MASK 0x80000000
640 645
641static const char *const nct6793_temp_label[] = { 646static const char *const nct6793_temp_label[] = {
642 "", 647 "",
@@ -674,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
674}; 679};
675 680
676#define NCT6793_TEMP_MASK 0xbfff037e 681#define NCT6793_TEMP_MASK 0xbfff037e
682#define NCT6793_VIRT_TEMP_MASK 0x80000000
677 683
678static const char *const nct6795_temp_label[] = { 684static const char *const nct6795_temp_label[] = {
679 "", 685 "",
@@ -711,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
711}; 717};
712 718
713#define NCT6795_TEMP_MASK 0xbfffff7e 719#define NCT6795_TEMP_MASK 0xbfffff7e
720#define NCT6795_VIRT_TEMP_MASK 0x80000000
714 721
715static const char *const nct6796_temp_label[] = { 722static const char *const nct6796_temp_label[] = {
716 "", 723 "",
@@ -723,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
723 "AUXTIN4", 730 "AUXTIN4",
724 "SMBUSMASTER 0", 731 "SMBUSMASTER 0",
725 "SMBUSMASTER 1", 732 "SMBUSMASTER 1",
726 "", 733 "Virtual_TEMP",
727 "", 734 "Virtual_TEMP",
728 "", 735 "",
729 "", 736 "",
730 "", 737 "",
@@ -747,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
747 "Virtual_TEMP" 754 "Virtual_TEMP"
748}; 755};
749 756
750#define NCT6796_TEMP_MASK 0xbfff03fe 757#define NCT6796_TEMP_MASK 0xbfff0ffe
758#define NCT6796_VIRT_TEMP_MASK 0x80000c00
751 759
752/* NCT6102D/NCT6106D specific data */ 760/* NCT6102D/NCT6106D specific data */
753 761
@@ -778,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
778 786
779static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; 787static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
780static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; 788static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
781static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; 789static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
782static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; 790static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
783 791
784static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; 792static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
785static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; 793static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -916,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
916 return 1350000U / (reg << divreg); 924 return 1350000U / (reg << divreg);
917} 925}
918 926
927static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
928{
929 return reg;
930}
931
919static u16 fan_to_reg(u32 fan, unsigned int divreg) 932static u16 fan_to_reg(u32 fan, unsigned int divreg)
920{ 933{
921 if (!fan) 934 if (!fan)
@@ -968,6 +981,7 @@ struct nct6775_data {
968 u16 reg_temp_config[NUM_TEMP]; 981 u16 reg_temp_config[NUM_TEMP];
969 const char * const *temp_label; 982 const char * const *temp_label;
970 u32 temp_mask; 983 u32 temp_mask;
984 u32 virt_temp_mask;
971 985
972 u16 REG_CONFIG; 986 u16 REG_CONFIG;
973 u16 REG_VBAT; 987 u16 REG_VBAT;
@@ -1275,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
1275 case nct6795: 1289 case nct6795:
1276 case nct6796: 1290 case nct6796:
1277 return reg == 0x150 || reg == 0x153 || reg == 0x155 || 1291 return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
1278 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || 1292 (reg & 0xfff0) == 0x4c0 ||
1279 reg == 0x402 || 1293 reg == 0x402 ||
1280 reg == 0x63a || reg == 0x63c || reg == 0x63e || 1294 reg == 0x63a || reg == 0x63c || reg == 0x63e ||
1281 reg == 0x640 || reg == 0x642 || reg == 0x64a || 1295 reg == 0x640 || reg == 0x642 || reg == 0x64a ||
1282 reg == 0x64c || reg == 0x660 || 1296 reg == 0x64c ||
1283 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || 1297 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
1284 reg == 0x7b || reg == 0x7d; 1298 reg == 0x7b || reg == 0x7d;
1285 } 1299 }
@@ -1557,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
1557 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); 1571 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
1558 data->pwm_weight_temp_sel[i] = reg & 0x1f; 1572 data->pwm_weight_temp_sel[i] = reg & 0x1f;
1559 /* If weight is disabled, report weight source as 0 */ 1573 /* If weight is disabled, report weight source as 0 */
1560 if (j == 1 && !(reg & 0x80)) 1574 if (!(reg & 0x80))
1561 data->pwm_weight_temp_sel[i] = 0; 1575 data->pwm_weight_temp_sel[i] = 0;
1562 1576
1563 /* Weight temp data */ 1577 /* Weight temp data */
@@ -1681,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
1681 if (data->has_fan_min & BIT(i)) 1695 if (data->has_fan_min & BIT(i))
1682 data->fan_min[i] = nct6775_read_value(data, 1696 data->fan_min[i] = nct6775_read_value(data,
1683 data->REG_FAN_MIN[i]); 1697 data->REG_FAN_MIN[i]);
1684 data->fan_pulses[i] = 1698
1685 (nct6775_read_value(data, data->REG_FAN_PULSES[i]) 1699 if (data->REG_FAN_PULSES[i]) {
1686 >> data->FAN_PULSE_SHIFT[i]) & 0x03; 1700 data->fan_pulses[i] =
1701 (nct6775_read_value(data,
1702 data->REG_FAN_PULSES[i])
1703 >> data->FAN_PULSE_SHIFT[i]) & 0x03;
1704 }
1687 1705
1688 nct6775_select_fan_div(dev, data, i, reg); 1706 nct6775_select_fan_div(dev, data, i, reg);
1689 } 1707 }
@@ -2689,6 +2707,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
2689 return err; 2707 return err;
2690 if (val > NUM_TEMP) 2708 if (val > NUM_TEMP)
2691 return -EINVAL; 2709 return -EINVAL;
2710 val = array_index_nospec(val, NUM_TEMP + 1);
2692 if (val && (!(data->have_temp & BIT(val - 1)) || 2711 if (val && (!(data->have_temp & BIT(val - 1)) ||
2693 !data->temp_src[val - 1])) 2712 !data->temp_src[val - 1]))
2694 return -EINVAL; 2713 return -EINVAL;
@@ -3637,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
3637 3656
3638 data->temp_label = nct6776_temp_label; 3657 data->temp_label = nct6776_temp_label;
3639 data->temp_mask = NCT6776_TEMP_MASK; 3658 data->temp_mask = NCT6776_TEMP_MASK;
3659 data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
3640 3660
3641 data->REG_VBAT = NCT6106_REG_VBAT; 3661 data->REG_VBAT = NCT6106_REG_VBAT;
3642 data->REG_DIODE = NCT6106_REG_DIODE; 3662 data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3715,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
3715 3735
3716 data->temp_label = nct6775_temp_label; 3736 data->temp_label = nct6775_temp_label;
3717 data->temp_mask = NCT6775_TEMP_MASK; 3737 data->temp_mask = NCT6775_TEMP_MASK;
3738 data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
3718 3739
3719 data->REG_CONFIG = NCT6775_REG_CONFIG; 3740 data->REG_CONFIG = NCT6775_REG_CONFIG;
3720 data->REG_VBAT = NCT6775_REG_VBAT; 3741 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3787,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
3787 3808
3788 data->temp_label = nct6776_temp_label; 3809 data->temp_label = nct6776_temp_label;
3789 data->temp_mask = NCT6776_TEMP_MASK; 3810 data->temp_mask = NCT6776_TEMP_MASK;
3811 data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
3790 3812
3791 data->REG_CONFIG = NCT6775_REG_CONFIG; 3813 data->REG_CONFIG = NCT6775_REG_CONFIG;
3792 data->REG_VBAT = NCT6775_REG_VBAT; 3814 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3851,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
3851 data->ALARM_BITS = NCT6779_ALARM_BITS; 3873 data->ALARM_BITS = NCT6779_ALARM_BITS;
3852 data->BEEP_BITS = NCT6779_BEEP_BITS; 3874 data->BEEP_BITS = NCT6779_BEEP_BITS;
3853 3875
3854 data->fan_from_reg = fan_from_reg13; 3876 data->fan_from_reg = fan_from_reg_rpm;
3855 data->fan_from_reg_min = fan_from_reg13; 3877 data->fan_from_reg_min = fan_from_reg13;
3856 data->target_temp_mask = 0xff; 3878 data->target_temp_mask = 0xff;
3857 data->tolerance_mask = 0x07; 3879 data->tolerance_mask = 0x07;
@@ -3859,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
3859 3881
3860 data->temp_label = nct6779_temp_label; 3882 data->temp_label = nct6779_temp_label;
3861 data->temp_mask = NCT6779_TEMP_MASK; 3883 data->temp_mask = NCT6779_TEMP_MASK;
3884 data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
3862 3885
3863 data->REG_CONFIG = NCT6775_REG_CONFIG; 3886 data->REG_CONFIG = NCT6775_REG_CONFIG;
3864 data->REG_VBAT = NCT6775_REG_VBAT; 3887 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3931,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
3931 data->ALARM_BITS = NCT6791_ALARM_BITS; 3954 data->ALARM_BITS = NCT6791_ALARM_BITS;
3932 data->BEEP_BITS = NCT6779_BEEP_BITS; 3955 data->BEEP_BITS = NCT6779_BEEP_BITS;
3933 3956
3934 data->fan_from_reg = fan_from_reg13; 3957 data->fan_from_reg = fan_from_reg_rpm;
3935 data->fan_from_reg_min = fan_from_reg13; 3958 data->fan_from_reg_min = fan_from_reg13;
3936 data->target_temp_mask = 0xff; 3959 data->target_temp_mask = 0xff;
3937 data->tolerance_mask = 0x07; 3960 data->tolerance_mask = 0x07;
@@ -3942,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
3942 case nct6791: 3965 case nct6791:
3943 data->temp_label = nct6779_temp_label; 3966 data->temp_label = nct6779_temp_label;
3944 data->temp_mask = NCT6791_TEMP_MASK; 3967 data->temp_mask = NCT6791_TEMP_MASK;
3968 data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
3945 break; 3969 break;
3946 case nct6792: 3970 case nct6792:
3947 data->temp_label = nct6792_temp_label; 3971 data->temp_label = nct6792_temp_label;
3948 data->temp_mask = NCT6792_TEMP_MASK; 3972 data->temp_mask = NCT6792_TEMP_MASK;
3973 data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
3949 break; 3974 break;
3950 case nct6793: 3975 case nct6793:
3951 data->temp_label = nct6793_temp_label; 3976 data->temp_label = nct6793_temp_label;
3952 data->temp_mask = NCT6793_TEMP_MASK; 3977 data->temp_mask = NCT6793_TEMP_MASK;
3978 data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
3953 break; 3979 break;
3954 case nct6795: 3980 case nct6795:
3955 data->temp_label = nct6795_temp_label; 3981 data->temp_label = nct6795_temp_label;
3956 data->temp_mask = NCT6795_TEMP_MASK; 3982 data->temp_mask = NCT6795_TEMP_MASK;
3983 data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
3957 break; 3984 break;
3958 case nct6796: 3985 case nct6796:
3959 data->temp_label = nct6796_temp_label; 3986 data->temp_label = nct6796_temp_label;
3960 data->temp_mask = NCT6796_TEMP_MASK; 3987 data->temp_mask = NCT6796_TEMP_MASK;
3988 data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
3961 break; 3989 break;
3962 } 3990 }
3963 3991
@@ -4141,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
4141 * for each fan reflects a different temperature, and there 4169 * for each fan reflects a different temperature, and there
4142 * are no duplicates. 4170 * are no duplicates.
4143 */ 4171 */
4144 if (src != TEMP_SOURCE_VIRTUAL) { 4172 if (!(data->virt_temp_mask & BIT(src))) {
4145 if (mask & BIT(src)) 4173 if (mask & BIT(src))
4146 continue; 4174 continue;
4147 mask |= BIT(src); 4175 mask |= BIT(src);
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index fb4e4a6bb1f6..be5ba4690895 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
164MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); 164MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
165MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver"); 165MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
166MODULE_LICENSE("GPL v2"); 166MODULE_LICENSE("GPL v2");
167MODULE_ALIAS("platform:raspberrypi-hwmon");
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index da962aa2cef5..fc6b7f8b62fb 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
139 th->thdev[i] = NULL; 139 th->thdev[i] = NULL;
140 } 140 }
141 141
142 th->num_thdevs = lowest; 142 if (lowest >= 0)
143 th->num_thdevs = lowest;
143 } 144 }
144 145
145 if (thdrv->attr_group) 146 if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
487 .flags = IORESOURCE_MEM, 488 .flags = IORESOURCE_MEM,
488 }, 489 },
489 { 490 {
490 .start = TH_MMIO_SW, 491 .start = 1, /* use resource[1] */
491 .end = 0, 492 .end = 0,
492 .flags = IORESOURCE_MEM, 493 .flags = IORESOURCE_MEM,
493 }, 494 },
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
580 struct intel_th_device *thdev; 581 struct intel_th_device *thdev;
581 struct resource res[3]; 582 struct resource res[3];
582 unsigned int req = 0; 583 unsigned int req = 0;
584 bool is64bit = false;
583 int r, err; 585 int r, err;
584 586
585 thdev = intel_th_device_alloc(th, subdev->type, subdev->name, 587 thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
589 591
590 thdev->drvdata = th->drvdata; 592 thdev->drvdata = th->drvdata;
591 593
594 for (r = 0; r < th->num_resources; r++)
595 if (th->resource[r].flags & IORESOURCE_MEM_64) {
596 is64bit = true;
597 break;
598 }
599
592 memcpy(res, subdev->res, 600 memcpy(res, subdev->res,
593 sizeof(struct resource) * subdev->nres); 601 sizeof(struct resource) * subdev->nres);
594 602
595 for (r = 0; r < subdev->nres; r++) { 603 for (r = 0; r < subdev->nres; r++) {
596 struct resource *devres = th->resource; 604 struct resource *devres = th->resource;
597 int bar = TH_MMIO_CONFIG; 605 int bar = 0; /* cut subdevices' MMIO from resource[0] */
598 606
599 /* 607 /*
600 * Take .end == 0 to mean 'take the whole bar', 608 * Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
603 */ 611 */
604 if (!res[r].end && res[r].flags == IORESOURCE_MEM) { 612 if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
605 bar = res[r].start; 613 bar = res[r].start;
614 if (is64bit)
615 bar *= 2;
606 res[r].start = 0; 616 res[r].start = 0;
607 res[r].end = resource_size(&devres[bar]) - 1; 617 res[r].end = resource_size(&devres[bar]) - 1;
608 } 618 }
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c2e55e5d97f6..1cf6290d6435 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
160 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), 160 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
161 .driver_data = (kernel_ulong_t)&intel_th_2x, 161 .driver_data = (kernel_ulong_t)&intel_th_2x,
162 }, 162 },
163 {
164 /* Ice Lake PCH */
165 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
166 .driver_data = (kernel_ulong_t)&intel_th_2x,
167 },
163 { 0 }, 168 { 0 },
164}; 169};
165 170
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 6ec65adaba49..c33dcfb87993 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
110 } 110 }
111#ifdef DEBUG 111#ifdef DEBUG
112 if (jiffies != start && i2c_debug >= 3) 112 if (jiffies != start && i2c_debug >= 3)
113 pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go " 113 pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
114 "high\n", jiffies - start); 114 jiffies - start);
115#endif 115#endif
116 116
117done: 117done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
171 setsda(adap, sb); 171 setsda(adap, sb);
172 udelay((adap->udelay + 1) / 2); 172 udelay((adap->udelay + 1) / 2);
173 if (sclhi(adap) < 0) { /* timed out */ 173 if (sclhi(adap) < 0) { /* timed out */
174 bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " 174 bit_dbg(1, &i2c_adap->dev,
175 "timeout at bit #%d\n", (int)c, i); 175 "i2c_outb: 0x%02x, timeout at bit #%d\n",
176 (int)c, i);
176 return -ETIMEDOUT; 177 return -ETIMEDOUT;
177 } 178 }
178 /* FIXME do arbitration here: 179 /* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
185 } 186 }
186 sdahi(adap); 187 sdahi(adap);
187 if (sclhi(adap) < 0) { /* timeout */ 188 if (sclhi(adap) < 0) { /* timeout */
188 bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, " 189 bit_dbg(1, &i2c_adap->dev,
189 "timeout at ack\n", (int)c); 190 "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
190 return -ETIMEDOUT; 191 return -ETIMEDOUT;
191 } 192 }
192 193
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
215 sdahi(adap); 216 sdahi(adap);
216 for (i = 0; i < 8; i++) { 217 for (i = 0; i < 8; i++) {
217 if (sclhi(adap) < 0) { /* timeout */ 218 if (sclhi(adap) < 0) { /* timeout */
218 bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit " 219 bit_dbg(1, &i2c_adap->dev,
219 "#%d\n", 7 - i); 220 "i2c_inb: timeout at bit #%d\n",
221 7 - i);
220 return -ETIMEDOUT; 222 return -ETIMEDOUT;
221 } 223 }
222 indata *= 2; 224 indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
265 goto bailout; 267 goto bailout;
266 } 268 }
267 if (!scl) { 269 if (!scl) {
268 printk(KERN_WARNING "%s: SCL unexpected low " 270 printk(KERN_WARNING
269 "while pulling SDA low!\n", name); 271 "%s: SCL unexpected low while pulling SDA low!\n",
272 name);
270 goto bailout; 273 goto bailout;
271 } 274 }
272 275
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
278 goto bailout; 281 goto bailout;
279 } 282 }
280 if (!scl) { 283 if (!scl) {
281 printk(KERN_WARNING "%s: SCL unexpected low " 284 printk(KERN_WARNING
282 "while pulling SDA high!\n", name); 285 "%s: SCL unexpected low while pulling SDA high!\n",
286 name);
283 goto bailout; 287 goto bailout;
284 } 288 }
285 289
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
291 goto bailout; 295 goto bailout;
292 } 296 }
293 if (!sda) { 297 if (!sda) {
294 printk(KERN_WARNING "%s: SDA unexpected low " 298 printk(KERN_WARNING
295 "while pulling SCL low!\n", name); 299 "%s: SDA unexpected low while pulling SCL low!\n",
300 name);
296 goto bailout; 301 goto bailout;
297 } 302 }
298 303
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
304 goto bailout; 309 goto bailout;
305 } 310 }
306 if (!sda) { 311 if (!sda) {
307 printk(KERN_WARNING "%s: SDA unexpected low " 312 printk(KERN_WARNING
308 "while pulling SCL high!\n", name); 313 "%s: SDA unexpected low while pulling SCL high!\n",
314 name);
309 goto bailout; 315 goto bailout;
310 } 316 }
311 317
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
352 i2c_start(adap); 358 i2c_start(adap);
353 } 359 }
354 if (i && ret) 360 if (i && ret)
355 bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at " 361 bit_dbg(1, &i2c_adap->dev,
356 "0x%02x: %s\n", i + 1, 362 "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
357 addr & 1 ? "read from" : "write to", addr >> 1, 363 addr & 1 ? "read from" : "write to", addr >> 1,
358 ret == 1 ? "success" : "failed, timeout?"); 364 ret == 1 ? "success" : "failed, timeout?");
359 return ret; 365 return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
442 if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { 448 if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
443 if (!(flags & I2C_M_NO_RD_ACK)) 449 if (!(flags & I2C_M_NO_RD_ACK))
444 acknak(i2c_adap, 0); 450 acknak(i2c_adap, 0);
445 dev_err(&i2c_adap->dev, "readbytes: invalid " 451 dev_err(&i2c_adap->dev,
446 "block length (%d)\n", inval); 452 "readbytes: invalid block length (%d)\n",
453 inval);
447 return -EPROTO; 454 return -EPROTO;
448 } 455 }
449 /* The original count value accounts for the extra 456 /* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
506 return -ENXIO; 513 return -ENXIO;
507 } 514 }
508 if (flags & I2C_M_RD) { 515 if (flags & I2C_M_RD) {
509 bit_dbg(3, &i2c_adap->dev, "emitting repeated " 516 bit_dbg(3, &i2c_adap->dev,
510 "start condition\n"); 517 "emitting repeated start condition\n");
511 i2c_repstart(adap); 518 i2c_repstart(adap);
512 /* okay, now switch into reading mode */ 519 /* okay, now switch into reading mode */
513 addr |= 0x01; 520 addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
564 } 571 }
565 ret = bit_doAddress(i2c_adap, pmsg); 572 ret = bit_doAddress(i2c_adap, pmsg);
566 if ((ret != 0) && !nak_ok) { 573 if ((ret != 0) && !nak_ok) {
567 bit_dbg(1, &i2c_adap->dev, "NAK from " 574 bit_dbg(1, &i2c_adap->dev,
568 "device addr 0x%02x msg #%d\n", 575 "NAK from device addr 0x%02x msg #%d\n",
569 msgs[i].addr, i); 576 msgs[i].addr, i);
570 goto bailout; 577 goto bailout;
571 } 578 }
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index e18442b9973a..94d94b4a9a0d 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -708,7 +708,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
708 i2c_set_adapdata(adap, dev); 708 i2c_set_adapdata(adap, dev);
709 709
710 if (dev->pm_disabled) { 710 if (dev->pm_disabled) {
711 dev_pm_syscore_device(dev->dev, true);
712 irq_flags = IRQF_NO_SUSPEND; 711 irq_flags = IRQF_NO_SUSPEND;
713 } else { 712 } else {
714 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; 713 irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 1a8d2da5b000..b5750fd85125 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
434{ 434{
435 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); 435 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
436 436
437 if (i_dev->pm_disabled)
438 return 0;
439
437 i_dev->disable(i_dev); 440 i_dev->disable(i_dev);
438 i2c_dw_prepare_clk(i_dev, false); 441 i2c_dw_prepare_clk(i_dev, false);
439 442
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
444{ 447{
445 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); 448 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
446 449
447 i2c_dw_prepare_clk(i_dev, true); 450 if (!i_dev->pm_disabled)
451 i2c_dw_prepare_clk(i_dev, true);
452
448 i_dev->init(i_dev); 453 i_dev->init(i_dev);
449 454
450 return 0; 455 return 0;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 941c223f6491..c91e145ef5a5 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -140,6 +140,7 @@
140 140
141#define SBREG_BAR 0x10 141#define SBREG_BAR 0x10
142#define SBREG_SMBCTRL 0xc6000c 142#define SBREG_SMBCTRL 0xc6000c
143#define SBREG_SMBCTRL_DNV 0xcf000c
143 144
144/* Host status bits for SMBPCISTS */ 145/* Host status bits for SMBPCISTS */
145#define SMBPCISTS_INTS BIT(3) 146#define SMBPCISTS_INTS BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
1399 spin_unlock(&p2sb_spinlock); 1400 spin_unlock(&p2sb_spinlock);
1400 1401
1401 res = &tco_res[ICH_RES_MEM_OFF]; 1402 res = &tco_res[ICH_RES_MEM_OFF];
1402 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL; 1403 if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1404 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1405 else
1406 res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1407
1403 res->end = res->start + 3; 1408 res->end = res->start + 3;
1404 res->flags = IORESOURCE_MEM; 1409 res->flags = IORESOURCE_MEM;
1405 1410
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv)
1415} 1420}
1416 1421
1417#ifdef CONFIG_ACPI 1422#ifdef CONFIG_ACPI
1423static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
1424 acpi_physical_address address)
1425{
1426 return address >= priv->smba &&
1427 address <= pci_resource_end(priv->pci_dev, SMBBAR);
1428}
1429
1418static acpi_status 1430static acpi_status
1419i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, 1431i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1420 u64 *value, void *handler_context, void *region_context) 1432 u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
1430 */ 1442 */
1431 mutex_lock(&priv->acpi_lock); 1443 mutex_lock(&priv->acpi_lock);
1432 1444
1433 if (!priv->acpi_reserved) { 1445 if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
1434 priv->acpi_reserved = true; 1446 priv->acpi_reserved = true;
1435 1447
1436 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); 1448 dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 6d975f5221ca..06c4c767af32 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
538 538
539static const struct of_device_id lpi2c_imx_of_match[] = { 539static const struct of_device_id lpi2c_imx_of_match[] = {
540 { .compatible = "fsl,imx7ulp-lpi2c" }, 540 { .compatible = "fsl,imx7ulp-lpi2c" },
541 { .compatible = "fsl,imx8dv-lpi2c" },
542 { }, 541 { },
543}; 542};
544MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match); 543MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 439e8778f849..818cab14e87c 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
507 pd->pos = pd->msg->len; 507 pd->pos = pd->msg->len;
508 pd->stop_after_dma = true; 508 pd->stop_after_dma = true;
509 509
510 i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
511
512 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 510 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
513} 511}
514 512
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
602 dma_async_issue_pending(chan); 600 dma_async_issue_pending(chan);
603} 601}
604 602
605static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, 603static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
606 bool do_init) 604 bool do_init)
607{ 605{
608 if (do_init) { 606 if (do_init) {
609 /* Initialize channel registers */ 607 /* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
627 625
628 /* Enable all interrupts to begin with */ 626 /* Enable all interrupts to begin with */
629 iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); 627 iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
630 return 0;
631} 628}
632 629
633static int poll_dte(struct sh_mobile_i2c_data *pd) 630static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
698 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP; 695 pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
699 pd->stop_after_dma = false; 696 pd->stop_after_dma = false;
700 697
701 err = start_ch(pd, msg, do_start); 698 start_ch(pd, msg, do_start);
702 if (err)
703 break;
704 699
705 if (do_start) 700 if (do_start)
706 i2c_op(pd, OP_START, 0); 701 i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
709 timeout = wait_event_timeout(pd->wait, 704 timeout = wait_event_timeout(pd->wait,
710 pd->sr & (ICSR_TACK | SW_DONE), 705 pd->sr & (ICSR_TACK | SW_DONE),
711 adapter->timeout); 706 adapter->timeout);
707
708 /* 'stop_after_dma' tells if DMA transfer was complete */
709 i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
710
712 if (!timeout) { 711 if (!timeout) {
713 dev_err(pd->dev, "Transfer request timed out\n"); 712 dev_err(pd->dev, "Transfer request timed out\n");
714 if (pd->dma_direction != DMA_NONE) 713 if (pd->dma_direction != DMA_NONE)
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 9918bdd81619..a403e8579b65 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
401 return ret; 401 return ret;
402 402
403 for (msg = msgs; msg < emsg; msg++) { 403 for (msg = msgs; msg < emsg; msg++) {
404 /* If next message is read, skip the stop condition */ 404 /* Emit STOP if it is the last message or I2C_M_STOP is set. */
405 bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 405 bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
406 /* but, force it if I2C_M_STOP is set */
407 if (msg->flags & I2C_M_STOP)
408 stop = true;
409 406
410 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop); 407 ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
411 if (ret) 408 if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index bb181b088291..454f914ae66d 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
248 return ret; 248 return ret;
249 249
250 for (msg = msgs; msg < emsg; msg++) { 250 for (msg = msgs; msg < emsg; msg++) {
251 /* If next message is read, skip the stop condition */ 251 /* Emit STOP if it is the last message or I2C_M_STOP is set. */
252 bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD); 252 bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
253 /* but, force it if I2C_M_STOP is set */
254 if (msg->flags & I2C_M_STOP)
255 stop = true;
256 253
257 ret = uniphier_i2c_master_xfer_one(adap, msg, stop); 254 ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
258 if (ret) 255 if (ret)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 9a71e50d21f1..0c51c0ffdda9 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
532{ 532{
533 u8 rx_watermark; 533 u8 rx_watermark;
534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; 534 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
535 unsigned long flags;
535 536
536 /* Clear and enable Rx full interrupt. */ 537 /* Clear and enable Rx full interrupt. */
537 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); 538 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
547 rx_watermark = IIC_RX_FIFO_DEPTH; 548 rx_watermark = IIC_RX_FIFO_DEPTH;
548 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); 549 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
549 550
551 local_irq_save(flags);
550 if (!(msg->flags & I2C_M_NOSTART)) 552 if (!(msg->flags & I2C_M_NOSTART))
551 /* write the address */ 553 /* write the address */
552 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 554 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
556 558
557 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, 559 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
558 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); 560 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
561 local_irq_restore(flags);
562
559 if (i2c->nmsgs == 1) 563 if (i2c->nmsgs == 1)
560 /* very last, enable bus not busy as well */ 564 /* very last, enable bus not busy as well */
561 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); 565 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f15737763608..9ee9a15e7134 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
2293EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf); 2293EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
2294 2294
2295/** 2295/**
2296 * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg 2296 * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
2297 * @msg: the message to be synced with
2298 * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL. 2297 * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
2298 * @msg: the message which the buffer corresponds to
2299 * @xferred: bool saying if the message was transferred
2299 */ 2300 */
2300void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf) 2301void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
2301{ 2302{
2302 if (!buf || buf == msg->buf) 2303 if (!buf || buf == msg->buf)
2303 return; 2304 return;
2304 2305
2305 if (msg->flags & I2C_M_RD) 2306 if (xferred && msg->flags & I2C_M_RD)
2306 memcpy(msg->buf, buf, msg->len); 2307 memcpy(msg->buf, buf, msg->len);
2307 2308
2308 kfree(buf); 2309 kfree(buf);
2309} 2310}
2310EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf); 2311EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
2311 2312
2312MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); 2313MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
2313MODULE_DESCRIPTION("I2C-Bus main module"); 2314MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 7589f2ad1dae..631360b14ca7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
187 187
188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) 188int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
189{ 189{
190 u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask; 190 u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
191 struct st_lsm6dsx_hw *hw = sensor->hw; 191 struct st_lsm6dsx_hw *hw = sensor->hw;
192 struct st_lsm6dsx_sensor *cur_sensor; 192 struct st_lsm6dsx_sensor *cur_sensor;
193 int i, err, data; 193 int i, err, data;
194 __le16 wdata; 194 __le16 wdata;
195 195
196 if (!hw->sip)
197 return 0;
198
196 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) { 199 for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
197 cur_sensor = iio_priv(hw->iio_devs[i]); 200 cur_sensor = iio_priv(hw->iio_devs[i]);
198 201
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
203 : cur_sensor->watermark; 206 : cur_sensor->watermark;
204 207
205 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark); 208 fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
206 sip += cur_sensor->sip;
207 } 209 }
208 210
209 if (!sip) 211 fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
210 return 0; 212 fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
211
212 fifo_watermark = max_t(u16, fifo_watermark, sip);
213 fifo_watermark = (fifo_watermark / sip) * sip;
214 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl; 213 fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
215 214
216 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1, 215 err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 54e383231d1e..c31b9633f32d 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
258static const struct spi_device_id maxim_thermocouple_id[] = { 258static const struct spi_device_id maxim_thermocouple_id[] = {
259 {"max6675", MAX6675}, 259 {"max6675", MAX6675},
260 {"max31855", MAX31855}, 260 {"max31855", MAX31855},
261 {"max31856", MAX31855},
262 {}, 261 {},
263}; 262};
264MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id); 263MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 0bee1f4b914e..3208ad6ad540 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
338} 338}
339 339
340/** 340/**
341 * del_gid - Delete GID table entry
342 *
343 * @ib_dev: IB device whose GID entry to be deleted
344 * @port: Port number of the IB device
345 * @table: GID table of the IB device for a port
346 * @ix: GID entry index to delete
347 *
348 */
349static void del_gid(struct ib_device *ib_dev, u8 port,
350 struct ib_gid_table *table, int ix)
351{
352 struct ib_gid_table_entry *entry;
353
354 lockdep_assert_held(&table->lock);
355
356 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
357 ib_dev->name, port, ix,
358 table->data_vec[ix]->attr.gid.raw);
359
360 write_lock_irq(&table->rwlock);
361 entry = table->data_vec[ix];
362 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
363 /*
364 * For non RoCE protocol, GID entry slot is ready to use.
365 */
366 if (!rdma_protocol_roce(ib_dev, port))
367 table->data_vec[ix] = NULL;
368 write_unlock_irq(&table->rwlock);
369
370 put_gid_entry_locked(entry);
371}
372
373/**
341 * add_modify_gid - Add or modify GID table entry 374 * add_modify_gid - Add or modify GID table entry
342 * 375 *
343 * @table: GID table in which GID to be added or modified 376 * @table: GID table in which GID to be added or modified
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
358 * this index. 391 * this index.
359 */ 392 */
360 if (is_gid_entry_valid(table->data_vec[attr->index])) 393 if (is_gid_entry_valid(table->data_vec[attr->index]))
361 put_gid_entry(table->data_vec[attr->index]); 394 del_gid(attr->device, attr->port_num, table, attr->index);
362 395
363 /* 396 /*
364 * Some HCA's report multiple GID entries with only one valid GID, and 397 * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ done:
386 return ret; 419 return ret;
387} 420}
388 421
389/**
390 * del_gid - Delete GID table entry
391 *
392 * @ib_dev: IB device whose GID entry to be deleted
393 * @port: Port number of the IB device
394 * @table: GID table of the IB device for a port
395 * @ix: GID entry index to delete
396 *
397 */
398static void del_gid(struct ib_device *ib_dev, u8 port,
399 struct ib_gid_table *table, int ix)
400{
401 struct ib_gid_table_entry *entry;
402
403 lockdep_assert_held(&table->lock);
404
405 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
406 ib_dev->name, port, ix,
407 table->data_vec[ix]->attr.gid.raw);
408
409 write_lock_irq(&table->rwlock);
410 entry = table->data_vec[ix];
411 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
412 /*
413 * For non RoCE protocol, GID entry slot is ready to use.
414 */
415 if (!rdma_protocol_roce(ib_dev, port))
416 table->data_vec[ix] = NULL;
417 write_unlock_irq(&table->rwlock);
418
419 put_gid_entry_locked(entry);
420}
421
422/* rwlock should be read locked, or lock should be held */ 422/* rwlock should be read locked, or lock should be held */
423static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, 423static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
424 const struct ib_gid_attr *val, bool default_gid, 424 const struct ib_gid_attr *val, bool default_gid,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f72677291b69..a36c94930c31 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
724 dgid = (union ib_gid *) &addr->sib_addr; 724 dgid = (union ib_gid *) &addr->sib_addr;
725 pkey = ntohs(addr->sib_pkey); 725 pkey = ntohs(addr->sib_pkey);
726 726
727 mutex_lock(&lock);
727 list_for_each_entry(cur_dev, &dev_list, list) { 728 list_for_each_entry(cur_dev, &dev_list, list) {
728 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { 729 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
729 if (!rdma_cap_af_ib(cur_dev->device, p)) 730 if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
750 cma_dev = cur_dev; 751 cma_dev = cur_dev;
751 sgid = gid; 752 sgid = gid;
752 id_priv->id.port_num = p; 753 id_priv->id.port_num = p;
754 goto found;
753 } 755 }
754 } 756 }
755 } 757 }
756 } 758 }
757 759 mutex_unlock(&lock);
758 if (!cma_dev) 760 return -ENODEV;
759 return -ENODEV;
760 761
761found: 762found:
762 cma_attach_to_dev(id_priv, cma_dev); 763 cma_attach_to_dev(id_priv, cma_dev);
763 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); 764 mutex_unlock(&lock);
764 memcpy(&addr->sib_addr, &sgid, sizeof sgid); 765 addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
766 memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
765 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); 767 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
766 return 0; 768 return 0;
767} 769}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6eb64c6f0802..c4118bcd5103 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
882 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 882 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
883 if (!uverbs_destroy_uobject(obj, reason)) 883 if (!uverbs_destroy_uobject(obj, reason))
884 ret = 0; 884 ret = 0;
885 else
886 atomic_set(&obj->usecnt, 0);
885 } 887 }
886 return ret; 888 return ret;
887} 889}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ec8fb289621f..21863ddde63e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
124static DEFINE_IDR(ctx_idr); 124static DEFINE_IDR(ctx_idr);
125static DEFINE_IDR(multicast_idr); 125static DEFINE_IDR(multicast_idr);
126 126
127static const struct file_operations ucma_fops;
128
127static inline struct ucma_context *_ucma_find_context(int id, 129static inline struct ucma_context *_ucma_find_context(int id,
128 struct ucma_file *file) 130 struct ucma_file *file)
129{ 131{
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1581 f = fdget(cmd.fd); 1583 f = fdget(cmd.fd);
1582 if (!f.file) 1584 if (!f.file)
1583 return -ENOENT; 1585 return -ENOENT;
1586 if (f.file->f_op != &ucma_fops) {
1587 ret = -EINVAL;
1588 goto file_put;
1589 }
1584 1590
1585 /* Validate current fd and prevent destruction of id. */ 1591 /* Validate current fd and prevent destruction of id. */
1586 ctx = ucma_get_ctx(f.file->private_data, cmd.id); 1592 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1753,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
1753 mutex_lock(&mut); 1759 mutex_lock(&mut);
1754 if (!ctx->closing) { 1760 if (!ctx->closing) {
1755 mutex_unlock(&mut); 1761 mutex_unlock(&mut);
1762 ucma_put_ctx(ctx);
1763 wait_for_completion(&ctx->comp);
1756 /* rdma_destroy_id ensures that no event handlers are 1764 /* rdma_destroy_id ensures that no event handlers are
1757 * inflight for that id before releasing it. 1765 * inflight for that id before releasing it.
1758 */ 1766 */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a21d5214afc3..e012ca80f9d1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
2027 2027
2028 if ((cmd->base.attr_mask & IB_QP_CUR_STATE && 2028 if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
2029 cmd->base.cur_qp_state > IB_QPS_ERR) || 2029 cmd->base.cur_qp_state > IB_QPS_ERR) ||
2030 cmd->base.qp_state > IB_QPS_ERR) { 2030 (cmd->base.attr_mask & IB_QP_STATE &&
2031 cmd->base.qp_state > IB_QPS_ERR)) {
2031 ret = -EINVAL; 2032 ret = -EINVAL;
2032 goto release_qp; 2033 goto release_qp;
2033 } 2034 }
2034 2035
2035 attr->qp_state = cmd->base.qp_state; 2036 if (cmd->base.attr_mask & IB_QP_STATE)
2036 attr->cur_qp_state = cmd->base.cur_qp_state; 2037 attr->qp_state = cmd->base.qp_state;
2037 attr->path_mtu = cmd->base.path_mtu; 2038 if (cmd->base.attr_mask & IB_QP_CUR_STATE)
2038 attr->path_mig_state = cmd->base.path_mig_state; 2039 attr->cur_qp_state = cmd->base.cur_qp_state;
2039 attr->qkey = cmd->base.qkey; 2040 if (cmd->base.attr_mask & IB_QP_PATH_MTU)
2040 attr->rq_psn = cmd->base.rq_psn; 2041 attr->path_mtu = cmd->base.path_mtu;
2041 attr->sq_psn = cmd->base.sq_psn; 2042 if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
2042 attr->dest_qp_num = cmd->base.dest_qp_num; 2043 attr->path_mig_state = cmd->base.path_mig_state;
2043 attr->qp_access_flags = cmd->base.qp_access_flags; 2044 if (cmd->base.attr_mask & IB_QP_QKEY)
2044 attr->pkey_index = cmd->base.pkey_index; 2045 attr->qkey = cmd->base.qkey;
2045 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2046 if (cmd->base.attr_mask & IB_QP_RQ_PSN)
2046 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2047 attr->rq_psn = cmd->base.rq_psn;
2047 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2048 if (cmd->base.attr_mask & IB_QP_SQ_PSN)
2048 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2049 attr->sq_psn = cmd->base.sq_psn;
2049 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2050 if (cmd->base.attr_mask & IB_QP_DEST_QPN)
2050 attr->port_num = cmd->base.port_num; 2051 attr->dest_qp_num = cmd->base.dest_qp_num;
2051 attr->timeout = cmd->base.timeout; 2052 if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
2052 attr->retry_cnt = cmd->base.retry_cnt; 2053 attr->qp_access_flags = cmd->base.qp_access_flags;
2053 attr->rnr_retry = cmd->base.rnr_retry; 2054 if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
2054 attr->alt_port_num = cmd->base.alt_port_num; 2055 attr->pkey_index = cmd->base.pkey_index;
2055 attr->alt_timeout = cmd->base.alt_timeout; 2056 if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2056 attr->rate_limit = cmd->rate_limit; 2057 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
2058 if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
2059 attr->max_rd_atomic = cmd->base.max_rd_atomic;
2060 if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2061 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
2062 if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
2063 attr->min_rnr_timer = cmd->base.min_rnr_timer;
2064 if (cmd->base.attr_mask & IB_QP_PORT)
2065 attr->port_num = cmd->base.port_num;
2066 if (cmd->base.attr_mask & IB_QP_TIMEOUT)
2067 attr->timeout = cmd->base.timeout;
2068 if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
2069 attr->retry_cnt = cmd->base.retry_cnt;
2070 if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
2071 attr->rnr_retry = cmd->base.rnr_retry;
2072 if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
2073 attr->alt_port_num = cmd->base.alt_port_num;
2074 attr->alt_timeout = cmd->base.alt_timeout;
2075 attr->alt_pkey_index = cmd->base.alt_pkey_index;
2076 }
2077 if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
2078 attr->rate_limit = cmd->rate_limit;
2057 2079
2058 if (cmd->base.attr_mask & IB_QP_AV) 2080 if (cmd->base.attr_mask & IB_QP_AV)
2059 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2081 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 823beca448e1..50152c1b1004 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
440 list_del(&entry->obj_list); 440 list_del(&entry->obj_list);
441 kfree(entry); 441 kfree(entry);
442 } 442 }
443 file->ev_queue.is_closed = 1;
443 spin_unlock_irq(&file->ev_queue.lock); 444 spin_unlock_irq(&file->ev_queue.lock);
444 445
445 uverbs_close_fd(filp); 446 uverbs_close_fd(filp);
@@ -1050,7 +1051,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
1050 uverbs_dev->num_comp_vectors = device->num_comp_vectors; 1051 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1051 1052
1052 if (ib_uverbs_create_uapi(device, uverbs_dev)) 1053 if (ib_uverbs_create_uapi(device, uverbs_dev))
1053 goto err; 1054 goto err_uapi;
1054 1055
1055 cdev_init(&uverbs_dev->cdev, NULL); 1056 cdev_init(&uverbs_dev->cdev, NULL);
1056 uverbs_dev->cdev.owner = THIS_MODULE; 1057 uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1078,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
1077 1078
1078err_class: 1079err_class:
1079 device_destroy(uverbs_class, uverbs_dev->cdev.dev); 1080 device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1080
1081err_cdev: 1081err_cdev:
1082 cdev_del(&uverbs_dev->cdev); 1082 cdev_del(&uverbs_dev->cdev);
1083err_uapi:
1083 clear_bit(devnum, dev_map); 1084 clear_bit(devnum, dev_map);
1084
1085err: 1085err:
1086 if (atomic_dec_and_test(&uverbs_dev->refcount)) 1086 if (atomic_dec_and_test(&uverbs_dev->refcount))
1087 ib_uverbs_comp_dev(uverbs_dev); 1087 ib_uverbs_comp_dev(uverbs_dev);
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 73ea6f0db88f..be854628a7c6 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
248 kfree(rcu_dereference_protected(*slot, true)); 248 kfree(rcu_dereference_protected(*slot, true));
249 radix_tree_iter_delete(&uapi->radix, &iter, slot); 249 radix_tree_iter_delete(&uapi->radix, &iter, slot);
250 } 250 }
251 kfree(uapi);
251} 252}
252 253
253struct uverbs_api *uverbs_alloc_api( 254struct uverbs_api *uverbs_alloc_api(
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bbfb86eb2d24..bc2b9e038439 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
833 "Failed to destroy Shadow QP"); 833 "Failed to destroy Shadow QP");
834 return rc; 834 return rc;
835 } 835 }
836 bnxt_qplib_free_qp_res(&rdev->qplib_res,
837 &rdev->qp1_sqp->qplib_qp);
836 mutex_lock(&rdev->qp_lock); 838 mutex_lock(&rdev->qp_lock);
837 list_del(&rdev->qp1_sqp->list); 839 list_del(&rdev->qp1_sqp->list);
838 atomic_dec(&rdev->qp_count); 840 atomic_dec(&rdev->qp_count);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 20b9f31052bf..85cd1a3593d6 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78/* Mutex to protect the list of bnxt_re devices added */ 78/* Mutex to protect the list of bnxt_re devices added */
79static DEFINE_MUTEX(bnxt_re_dev_lock); 79static DEFINE_MUTEX(bnxt_re_dev_lock);
80static struct workqueue_struct *bnxt_re_wq; 80static struct workqueue_struct *bnxt_re_wq;
81static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); 81static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
82 82
83/* SR-IOV helper functions */ 83/* SR-IOV helper functions */
84 84
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
182 if (!rdev) 182 if (!rdev)
183 return; 183 return;
184 184
185 bnxt_re_ib_unreg(rdev, false); 185 bnxt_re_ib_unreg(rdev);
186} 186}
187 187
188static void bnxt_re_stop_irq(void *handle) 188static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
251/* Driver registration routines used to let the networking driver (bnxt_en) 251/* Driver registration routines used to let the networking driver (bnxt_en)
252 * to know that the RoCE driver is now installed 252 * to know that the RoCE driver is now installed
253 */ 253 */
254static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) 254static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
255{ 255{
256 struct bnxt_en_dev *en_dev; 256 struct bnxt_en_dev *en_dev;
257 int rc; 257 int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 en_dev = rdev->en_dev; 262 en_dev = rdev->en_dev;
263 /* Acquire rtnl lock if it is not invokded from netdev event */
264 if (lock_wait)
265 rtnl_lock();
266 263
267 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, 264 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
268 BNXT_ROCE_ULP); 265 BNXT_ROCE_ULP);
269 if (lock_wait)
270 rtnl_unlock();
271 return rc; 266 return rc;
272} 267}
273 268
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
281 276
282 en_dev = rdev->en_dev; 277 en_dev = rdev->en_dev;
283 278
284 rtnl_lock();
285 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, 279 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
286 &bnxt_re_ulp_ops, rdev); 280 &bnxt_re_ulp_ops, rdev);
287 rtnl_unlock();
288 return rc; 281 return rc;
289} 282}
290 283
291static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) 284static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
292{ 285{
293 struct bnxt_en_dev *en_dev; 286 struct bnxt_en_dev *en_dev;
294 int rc; 287 int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
298 291
299 en_dev = rdev->en_dev; 292 en_dev = rdev->en_dev;
300 293
301 if (lock_wait)
302 rtnl_lock();
303 294
304 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); 295 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
305 296
306 if (lock_wait)
307 rtnl_unlock();
308 return rc; 297 return rc;
309} 298}
310 299
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
320 309
321 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); 310 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
322 311
323 rtnl_lock();
324 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, 312 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
325 rdev->msix_entries, 313 rdev->msix_entries,
326 num_msix_want); 314 num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
335 } 323 }
336 rdev->num_msix = num_msix_got; 324 rdev->num_msix = num_msix_got;
337done: 325done:
338 rtnl_unlock();
339 return rc; 326 return rc;
340} 327}
341 328
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
358 fw_msg->timeout = timeout; 345 fw_msg->timeout = timeout;
359} 346}
360 347
361static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, 348static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
362 bool lock_wait)
363{ 349{
364 struct bnxt_en_dev *en_dev = rdev->en_dev; 350 struct bnxt_en_dev *en_dev = rdev->en_dev;
365 struct hwrm_ring_free_input req = {0}; 351 struct hwrm_ring_free_input req = {0};
366 struct hwrm_ring_free_output resp; 352 struct hwrm_ring_free_output resp;
367 struct bnxt_fw_msg fw_msg; 353 struct bnxt_fw_msg fw_msg;
368 bool do_unlock = false;
369 int rc = -EINVAL; 354 int rc = -EINVAL;
370 355
371 if (!en_dev) 356 if (!en_dev)
372 return rc; 357 return rc;
373 358
374 memset(&fw_msg, 0, sizeof(fw_msg)); 359 memset(&fw_msg, 0, sizeof(fw_msg));
375 if (lock_wait) {
376 rtnl_lock();
377 do_unlock = true;
378 }
379 360
380 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); 361 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
381 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 362 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
386 if (rc) 367 if (rc)
387 dev_err(rdev_to_dev(rdev), 368 dev_err(rdev_to_dev(rdev),
388 "Failed to free HW ring:%d :%#x", req.ring_id, rc); 369 "Failed to free HW ring:%d :%#x", req.ring_id, rc);
389 if (do_unlock)
390 rtnl_unlock();
391 return rc; 370 return rc;
392} 371}
393 372
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
405 return rc; 384 return rc;
406 385
407 memset(&fw_msg, 0, sizeof(fw_msg)); 386 memset(&fw_msg, 0, sizeof(fw_msg));
408 rtnl_lock();
409 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); 387 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
410 req.enables = 0; 388 req.enables = 0;
411 req.page_tbl_addr = cpu_to_le64(dma_arr[0]); 389 req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
426 if (!rc) 404 if (!rc)
427 *fw_ring_id = le16_to_cpu(resp.ring_id); 405 *fw_ring_id = le16_to_cpu(resp.ring_id);
428 406
429 rtnl_unlock();
430 return rc; 407 return rc;
431} 408}
432 409
433static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, 410static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
434 u32 fw_stats_ctx_id, bool lock_wait) 411 u32 fw_stats_ctx_id)
435{ 412{
436 struct bnxt_en_dev *en_dev = rdev->en_dev; 413 struct bnxt_en_dev *en_dev = rdev->en_dev;
437 struct hwrm_stat_ctx_free_input req = {0}; 414 struct hwrm_stat_ctx_free_input req = {0};
438 struct bnxt_fw_msg fw_msg; 415 struct bnxt_fw_msg fw_msg;
439 bool do_unlock = false;
440 int rc = -EINVAL; 416 int rc = -EINVAL;
441 417
442 if (!en_dev) 418 if (!en_dev)
443 return rc; 419 return rc;
444 420
445 memset(&fw_msg, 0, sizeof(fw_msg)); 421 memset(&fw_msg, 0, sizeof(fw_msg));
446 if (lock_wait) {
447 rtnl_lock();
448 do_unlock = true;
449 }
450 422
451 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); 423 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
452 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); 424 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
457 dev_err(rdev_to_dev(rdev), 429 dev_err(rdev_to_dev(rdev),
458 "Failed to free HW stats context %#x", rc); 430 "Failed to free HW stats context %#x", rc);
459 431
460 if (do_unlock)
461 rtnl_unlock();
462 return rc; 432 return rc;
463} 433}
464 434
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
478 return rc; 448 return rc;
479 449
480 memset(&fw_msg, 0, sizeof(fw_msg)); 450 memset(&fw_msg, 0, sizeof(fw_msg));
481 rtnl_lock();
482 451
483 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 452 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
484 req.update_period_ms = cpu_to_le32(1000); 453 req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
490 if (!rc) 459 if (!rc)
491 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); 460 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
492 461
493 rtnl_unlock();
494 return rc; 462 return rc;
495} 463}
496 464
@@ -929,19 +897,19 @@ fail:
929 return rc; 897 return rc;
930} 898}
931 899
932static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) 900static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
933{ 901{
934 int i; 902 int i;
935 903
936 for (i = 0; i < rdev->num_msix - 1; i++) { 904 for (i = 0; i < rdev->num_msix - 1; i++) {
937 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); 905 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
938 bnxt_qplib_free_nq(&rdev->nq[i]); 906 bnxt_qplib_free_nq(&rdev->nq[i]);
939 } 907 }
940} 908}
941 909
942static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) 910static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
943{ 911{
944 bnxt_re_free_nq_res(rdev, lock_wait); 912 bnxt_re_free_nq_res(rdev);
945 913
946 if (rdev->qplib_res.dpi_tbl.max) { 914 if (rdev->qplib_res.dpi_tbl.max) {
947 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 915 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1219 return 0; 1187 return 0;
1220} 1188}
1221 1189
1222static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) 1190static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
1223{ 1191{
1224 int i, rc; 1192 int i, rc;
1225 1193
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
1234 cancel_delayed_work(&rdev->worker); 1202 cancel_delayed_work(&rdev->worker);
1235 1203
1236 bnxt_re_cleanup_res(rdev); 1204 bnxt_re_cleanup_res(rdev);
1237 bnxt_re_free_res(rdev, lock_wait); 1205 bnxt_re_free_res(rdev);
1238 1206
1239 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 1207 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1240 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); 1208 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1241 if (rc) 1209 if (rc)
1242 dev_warn(rdev_to_dev(rdev), 1210 dev_warn(rdev_to_dev(rdev),
1243 "Failed to deinitialize RCFW: %#x", rc); 1211 "Failed to deinitialize RCFW: %#x", rc);
1244 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, 1212 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1245 lock_wait);
1246 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); 1213 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1247 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1214 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1248 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); 1215 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1249 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1216 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1250 } 1217 }
1251 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { 1218 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1252 rc = bnxt_re_free_msix(rdev, lock_wait); 1219 rc = bnxt_re_free_msix(rdev);
1253 if (rc) 1220 if (rc)
1254 dev_warn(rdev_to_dev(rdev), 1221 dev_warn(rdev_to_dev(rdev),
1255 "Failed to free MSI-X vectors: %#x", rc); 1222 "Failed to free MSI-X vectors: %#x", rc);
1256 } 1223 }
1257 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { 1224 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1258 rc = bnxt_re_unregister_netdev(rdev, lock_wait); 1225 rc = bnxt_re_unregister_netdev(rdev);
1259 if (rc) 1226 if (rc)
1260 dev_warn(rdev_to_dev(rdev), 1227 dev_warn(rdev_to_dev(rdev),
1261 "Failed to unregister with netdev: %#x", rc); 1228 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1276{ 1243{
1277 int i, j, rc; 1244 int i, j, rc;
1278 1245
1246 bool locked;
1247
1248 /* Acquire rtnl lock through out this function */
1249 rtnl_lock();
1250 locked = true;
1251
1279 /* Registered a new RoCE device instance to netdev */ 1252 /* Registered a new RoCE device instance to netdev */
1280 rc = bnxt_re_register_netdev(rdev); 1253 rc = bnxt_re_register_netdev(rdev);
1281 if (rc) { 1254 if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1374 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1347 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1375 } 1348 }
1376 1349
1350 rtnl_unlock();
1351 locked = false;
1352
1377 /* Register ib dev */ 1353 /* Register ib dev */
1378 rc = bnxt_re_register_ib(rdev); 1354 rc = bnxt_re_register_ib(rdev);
1379 if (rc) { 1355 if (rc) {
1380 pr_err("Failed to register with IB: %#x\n", rc); 1356 pr_err("Failed to register with IB: %#x\n", rc);
1381 goto fail; 1357 goto fail;
1382 } 1358 }
1359 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1383 dev_info(rdev_to_dev(rdev), "Device registered successfully"); 1360 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1384 for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { 1361 for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
1385 rc = device_create_file(&rdev->ibdev.dev, 1362 rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1395 goto fail; 1372 goto fail;
1396 } 1373 }
1397 } 1374 }
1398 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1399 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1375 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1400 &rdev->active_width); 1376 &rdev->active_width);
1401 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); 1377 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1404 1380
1405 return 0; 1381 return 0;
1406free_sctx: 1382free_sctx:
1407 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); 1383 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1408free_ctx: 1384free_ctx:
1409 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); 1385 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1410disable_rcfw: 1386disable_rcfw:
1411 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1387 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1412free_ring: 1388free_ring:
1413 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); 1389 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1414free_rcfw: 1390free_rcfw:
1415 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1391 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1416fail: 1392fail:
1417 bnxt_re_ib_unreg(rdev, true); 1393 if (!locked)
1394 rtnl_lock();
1395 bnxt_re_ib_unreg(rdev);
1396 rtnl_unlock();
1397
1418 return rc; 1398 return rc;
1419} 1399}
1420 1400
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1567 */ 1547 */
1568 if (atomic_read(&rdev->sched_count) > 0) 1548 if (atomic_read(&rdev->sched_count) > 0)
1569 goto exit; 1549 goto exit;
1570 bnxt_re_ib_unreg(rdev, false); 1550 bnxt_re_ib_unreg(rdev);
1571 bnxt_re_remove_one(rdev); 1551 bnxt_re_remove_one(rdev);
1572 bnxt_re_dev_unreg(rdev); 1552 bnxt_re_dev_unreg(rdev);
1573 break; 1553 break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
1646 */ 1626 */
1647 flush_workqueue(bnxt_re_wq); 1627 flush_workqueue(bnxt_re_wq);
1648 bnxt_re_dev_stop(rdev); 1628 bnxt_re_dev_stop(rdev);
1649 bnxt_re_ib_unreg(rdev, true); 1629 /* Acquire the rtnl_lock as the L2 resources are freed here */
1630 rtnl_lock();
1631 bnxt_re_ib_unreg(rdev);
1632 rtnl_unlock();
1650 bnxt_re_remove_one(rdev); 1633 bnxt_re_remove_one(rdev);
1651 bnxt_re_dev_unreg(rdev); 1634 bnxt_re_dev_unreg(rdev);
1652 } 1635 }
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e426b990c1dd..6ad0d46ab879 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 struct bnxt_qplib_qp *qp) 196 struct bnxt_qplib_qp *qp)
197{ 197{
198 struct bnxt_qplib_q *rq = &qp->rq; 198 struct bnxt_qplib_q *rq = &qp->rq;
199 struct bnxt_qplib_q *sq = &qp->rq; 199 struct bnxt_qplib_q *sq = &qp->sq;
200 int rc = 0; 200 int rc = 0;
201 201
202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { 202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b3203afa3b1d..347fe18b1a41 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
1685 schp = to_c4iw_cq(qhp->ibqp.send_cq); 1685 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1686 1686
1687 if (qhp->ibqp.uobject) { 1687 if (qhp->ibqp.uobject) {
1688
1689 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1690 if (qhp->wq.flushed)
1691 return;
1692
1693 qhp->wq.flushed = 1;
1688 t4_set_wq_in_error(&qhp->wq, 0); 1694 t4_set_wq_in_error(&qhp->wq, 0);
1689 t4_set_cq_in_error(&rchp->cq); 1695 t4_set_cq_in_error(&rchp->cq);
1690 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1696 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2c19bf772451..e1668bcc2d13 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6733 struct hfi1_devdata *dd = ppd->dd; 6733 struct hfi1_devdata *dd = ppd->dd;
6734 struct send_context *sc; 6734 struct send_context *sc;
6735 int i; 6735 int i;
6736 int sc_flags;
6736 6737
6737 if (flags & FREEZE_SELF) 6738 if (flags & FREEZE_SELF)
6738 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6739 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6743 /* notify all SDMA engines that they are going into a freeze */ 6744 /* notify all SDMA engines that they are going into a freeze */
6744 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); 6745 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6745 6746
6747 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6748 SCF_LINK_DOWN : 0);
6746 /* do halt pre-handling on all enabled send contexts */ 6749 /* do halt pre-handling on all enabled send contexts */
6747 for (i = 0; i < dd->num_send_contexts; i++) { 6750 for (i = 0; i < dd->num_send_contexts; i++) {
6748 sc = dd->send_contexts[i].sc; 6751 sc = dd->send_contexts[i].sc;
6749 if (sc && (sc->flags & SCF_ENABLED)) 6752 if (sc && (sc->flags & SCF_ENABLED))
6750 sc_stop(sc, SCF_FROZEN | SCF_HALTED); 6753 sc_stop(sc, sc_flags);
6751 } 6754 }
6752 6755
6753 /* Send context are frozen. Notify user space */ 6756 /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10674 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10677 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10675 10678
10676 handle_linkup_change(dd, 1); 10679 handle_linkup_change(dd, 1);
10680 pio_kernel_linkup(dd);
10677 10681
10678 /* 10682 /*
10679 * After link up, a new link width will have been set. 10683 * After link up, a new link width will have been set.
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index eec83757d55f..6c967dde58e7 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
893 } 893 }
894 894
895 /* 895 /*
896 * A secondary bus reset (SBR) issues a hot reset to our device. 896 * This is an end around to do an SBR during probe time. A new API needs
897 * The following routine does a 1s wait after the reset is dropped 897 * to be implemented to have cleaner interface but this fixes the
898 * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - 898 * current brokenness
899 * Conventional Reset, paragraph 3, line 35 also says that a 1s
900 * delay after a reset is required. Per spec requirements,
901 * the link is either working or not after that point.
902 */ 899 */
903 return pci_reset_bus(dev); 900 return pci_bridge_secondary_bus_reset(dev->bus->self);
904} 901}
905 902
906/* 903/*
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c2c1cba5b23b..752057647f09 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
86 unsigned long flags; 86 unsigned long flags;
87 int write = 1; /* write sendctrl back */ 87 int write = 1; /* write sendctrl back */
88 int flush = 0; /* re-read sendctrl to make sure it is flushed */ 88 int flush = 0; /* re-read sendctrl to make sure it is flushed */
89 int i;
89 90
90 spin_lock_irqsave(&dd->sendctrl_lock, flags); 91 spin_lock_irqsave(&dd->sendctrl_lock, flags);
91 92
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
95 reg |= SEND_CTRL_SEND_ENABLE_SMASK; 96 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
96 /* Fall through */ 97 /* Fall through */
97 case PSC_DATA_VL_ENABLE: 98 case PSC_DATA_VL_ENABLE:
99 mask = 0;
100 for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
101 if (!dd->vld[i].mtu)
102 mask |= BIT_ULL(i);
98 /* Disallow sending on VLs not enabled */ 103 /* Disallow sending on VLs not enabled */
99 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << 104 mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
100 SEND_CTRL_UNSUPPORTED_VL_SHIFT; 105 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
101 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; 106 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
102 break; 107 break;
103 case PSC_GLOBAL_DISABLE: 108 case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
921void sc_disable(struct send_context *sc) 926void sc_disable(struct send_context *sc)
922{ 927{
923 u64 reg; 928 u64 reg;
924 unsigned long flags;
925 struct pio_buf *pbuf; 929 struct pio_buf *pbuf;
926 930
927 if (!sc) 931 if (!sc)
928 return; 932 return;
929 933
930 /* do all steps, even if already disabled */ 934 /* do all steps, even if already disabled */
931 spin_lock_irqsave(&sc->alloc_lock, flags); 935 spin_lock_irq(&sc->alloc_lock);
932 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); 936 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
933 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); 937 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
934 sc->flags &= ~SCF_ENABLED; 938 sc->flags &= ~SCF_ENABLED;
935 sc_wait_for_packet_egress(sc, 1); 939 sc_wait_for_packet_egress(sc, 1);
936 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); 940 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
937 spin_unlock_irqrestore(&sc->alloc_lock, flags);
938 941
939 /* 942 /*
940 * Flush any waiters. Once the context is disabled, 943 * Flush any waiters. Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
944 * proceed with the flush. 947 * proceed with the flush.
945 */ 948 */
946 udelay(1); 949 udelay(1);
947 spin_lock_irqsave(&sc->release_lock, flags); 950 spin_lock(&sc->release_lock);
948 if (sc->sr) { /* this context has a shadow ring */ 951 if (sc->sr) { /* this context has a shadow ring */
949 while (sc->sr_tail != sc->sr_head) { 952 while (sc->sr_tail != sc->sr_head) {
950 pbuf = &sc->sr[sc->sr_tail].pbuf; 953 pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
955 sc->sr_tail = 0; 958 sc->sr_tail = 0;
956 } 959 }
957 } 960 }
958 spin_unlock_irqrestore(&sc->release_lock, flags); 961 spin_unlock(&sc->release_lock);
962 spin_unlock_irq(&sc->alloc_lock);
959} 963}
960 964
961/* return SendEgressCtxtStatus.PacketOccupancy */ 965/* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1178 sc = dd->send_contexts[i].sc; 1182 sc = dd->send_contexts[i].sc;
1179 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1183 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1180 continue; 1184 continue;
1185 if (sc->flags & SCF_LINK_DOWN)
1186 continue;
1181 1187
1182 sc_enable(sc); /* will clear the sc frozen flag */ 1188 sc_enable(sc); /* will clear the sc frozen flag */
1183 } 1189 }
1184} 1190}
1185 1191
1192/**
1193 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1194 * @dd: valid devive data
1195 *
1196 * When the link goes down, the freeze path is taken. However, a link down
1197 * event is different from a freeze because if the send context is re-enabled
1198 * whowever is sending data will start sending data again, which will hang
1199 * any QP that is sending data.
1200 *
1201 * The freeze path now looks at the type of event that occurs and takes this
1202 * path for link down event.
1203 */
1204void pio_kernel_linkup(struct hfi1_devdata *dd)
1205{
1206 struct send_context *sc;
1207 int i;
1208
1209 for (i = 0; i < dd->num_send_contexts; i++) {
1210 sc = dd->send_contexts[i].sc;
1211 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1212 continue;
1213
1214 sc_enable(sc); /* will clear the sc link down flag */
1215 }
1216}
1217
1186/* 1218/*
1187 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. 1219 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1188 * Returns: 1220 * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
1382{ 1414{
1383 unsigned long flags; 1415 unsigned long flags;
1384 1416
1385 /* mark the context */
1386 sc->flags |= flag;
1387
1388 /* stop buffer allocations */ 1417 /* stop buffer allocations */
1389 spin_lock_irqsave(&sc->alloc_lock, flags); 1418 spin_lock_irqsave(&sc->alloc_lock, flags);
1419 /* mark the context */
1420 sc->flags |= flag;
1390 sc->flags &= ~SCF_ENABLED; 1421 sc->flags &= ~SCF_ENABLED;
1391 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1422 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1392 wake_up(&sc->halt_wait); 1423 wake_up(&sc->halt_wait);
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 058b08f459ab..aaf372c3e5d6 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -139,6 +139,7 @@ struct send_context {
139#define SCF_IN_FREE 0x02 139#define SCF_IN_FREE 0x02
140#define SCF_HALTED 0x04 140#define SCF_HALTED 0x04
141#define SCF_FROZEN 0x08 141#define SCF_FROZEN 0x08
142#define SCF_LINK_DOWN 0x10
142 143
143struct send_context_info { 144struct send_context_info {
144 struct send_context *sc; /* allocated working context */ 145 struct send_context *sc; /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
306void pio_reset_all(struct hfi1_devdata *dd); 307void pio_reset_all(struct hfi1_devdata *dd);
307void pio_freeze(struct hfi1_devdata *dd); 308void pio_freeze(struct hfi1_devdata *dd);
308void pio_kernel_unfreeze(struct hfi1_devdata *dd); 309void pio_kernel_unfreeze(struct hfi1_devdata *dd);
310void pio_kernel_linkup(struct hfi1_devdata *dd);
309 311
310/* global PIO send control operations */ 312/* global PIO send control operations */
311#define PSC_GLOBAL_ENABLE 0 313#define PSC_GLOBAL_ENABLE 0
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a3a7b33196d6..5c88706121c1 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
828 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { 828 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
829 if (++req->iov_idx == req->data_iovs) { 829 if (++req->iov_idx == req->data_iovs) {
830 ret = -EFAULT; 830 ret = -EFAULT;
831 goto free_txreq; 831 goto free_tx;
832 } 832 }
833 iovec = &req->iovs[req->iov_idx]; 833 iovec = &req->iovs[req->iov_idx];
834 WARN_ON(iovec->offset); 834 WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 13374c727b14..a7c586a5589d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1582 struct hfi1_pportdata *ppd; 1582 struct hfi1_pportdata *ppd;
1583 struct hfi1_devdata *dd; 1583 struct hfi1_devdata *dd;
1584 u8 sc5; 1584 u8 sc5;
1585 u8 sl;
1585 1586
1586 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && 1587 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
1587 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) 1588 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1590 /* test the mapping for validity */ 1591 /* test the mapping for validity */
1591 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); 1592 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1592 ppd = ppd_from_ibp(ibp); 1593 ppd = ppd_from_ibp(ibp);
1593 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
1594 dd = dd_from_ppd(ppd); 1594 dd = dd_from_ppd(ppd);
1595
1596 sl = rdma_ah_get_sl(ah_attr);
1597 if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
1598 return -EINVAL;
1599
1600 sc5 = ibp->sl_to_sc[sl];
1595 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) 1601 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1596 return -EINVAL; 1602 return -EINVAL;
1597 return 0; 1603 return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ca0f1ee26091..0bbeaaae47e0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
517 props->page_size_cap = dev->dev->caps.page_size_cap; 517 props->page_size_cap = dev->dev->caps.page_size_cap;
518 props->max_qp = dev->dev->quotas.qp; 518 props->max_qp = dev->dev->quotas.qp;
519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 519 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
520 props->max_send_sge = dev->dev->caps.max_sq_sg; 520 props->max_send_sge =
521 props->max_recv_sge = dev->dev->caps.max_rq_sg; 521 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
522 props->max_sge_rd = MLX4_MAX_SGE_RD; 522 props->max_recv_sge =
523 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
524 props->max_sge_rd = MLX4_MAX_SGE_RD;
523 props->max_cq = dev->dev->quotas.cq; 525 props->max_cq = dev->dev->quotas.cq;
524 props->max_cqe = dev->dev->caps.max_cqes; 526 props->max_cqe = dev->dev->caps.max_cqes;
525 props->max_mr = dev->dev->quotas.mpt; 527 props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ac116d63e466..f2f11e652dcd 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
723 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); 723 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
724 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 724 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
725 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 725 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
726 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
726 struct devx_obj *obj; 727 struct devx_obj *obj;
727 int err; 728 int err;
728 729
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
754 755
755 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); 756 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
756 if (err) 757 if (err)
757 goto obj_free; 758 goto obj_destroy;
758 759
759 return 0; 760 return 0;
760 761
762obj_destroy:
763 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
761obj_free: 764obj_free:
762 kfree(obj); 765 kfree(obj);
763 return err; 766 return err;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ea01b8dd2be6..3d5424f335cb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
1027 1027
1028 skb_queue_head_init(&skqueue); 1028 skb_queue_head_init(&skqueue);
1029 1029
1030 netif_tx_lock_bh(p->dev);
1030 spin_lock_irq(&priv->lock); 1031 spin_lock_irq(&priv->lock);
1031 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 1032 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1032 if (p->neigh) 1033 if (p->neigh)
1033 while ((skb = __skb_dequeue(&p->neigh->queue))) 1034 while ((skb = __skb_dequeue(&p->neigh->queue)))
1034 __skb_queue_tail(&skqueue, skb); 1035 __skb_queue_tail(&skqueue, skb);
1035 spin_unlock_irq(&priv->lock); 1036 spin_unlock_irq(&priv->lock);
1037 netif_tx_unlock_bh(p->dev);
1036 1038
1037 while ((skb = __skb_dequeue(&skqueue))) { 1039 while ((skb = __skb_dequeue(&skqueue))) {
1038 skb->dev = p->dev; 1040 skb->dev = p->dev;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 444d16520506..0b34e909505f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
2951{ 2951{
2952 struct srp_target_port *target = host_to_target(scmnd->device->host); 2952 struct srp_target_port *target = host_to_target(scmnd->device->host);
2953 struct srp_rdma_ch *ch; 2953 struct srp_rdma_ch *ch;
2954 int i; 2954 int i, j;
2955 u8 status; 2955 u8 status;
2956 2956
2957 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2957 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
2965 2965
2966 for (i = 0; i < target->ch_count; i++) { 2966 for (i = 0; i < target->ch_count; i++) {
2967 ch = &target->ch[i]; 2967 ch = &target->ch[i];
2968 for (i = 0; i < target->req_ring_size; ++i) { 2968 for (j = 0; j < target->req_ring_size; ++j) {
2969 struct srp_request *req = &ch->req_ring[i]; 2969 struct srp_request *req = &ch->req_ring[j];
2970 2970
2971 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2971 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2972 } 2972 }
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 6f62da2909ec..6caee807cafa 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
75 */ 75 */
76 76
77 77
78static unsigned char atakbd_keycode[0x72] = { /* American layout */ 78static unsigned char atakbd_keycode[0x73] = { /* American layout */
79 [0] = KEY_GRAVE,
80 [1] = KEY_ESC, 79 [1] = KEY_ESC,
81 [2] = KEY_1, 80 [2] = KEY_1,
82 [3] = KEY_2, 81 [3] = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
117 [38] = KEY_L, 116 [38] = KEY_L,
118 [39] = KEY_SEMICOLON, 117 [39] = KEY_SEMICOLON,
119 [40] = KEY_APOSTROPHE, 118 [40] = KEY_APOSTROPHE,
120 [41] = KEY_BACKSLASH, /* FIXME, '#' */ 119 [41] = KEY_GRAVE,
121 [42] = KEY_LEFTSHIFT, 120 [42] = KEY_LEFTSHIFT,
122 [43] = KEY_GRAVE, /* FIXME: '~' */ 121 [43] = KEY_BACKSLASH,
123 [44] = KEY_Z, 122 [44] = KEY_Z,
124 [45] = KEY_X, 123 [45] = KEY_X,
125 [46] = KEY_C, 124 [46] = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
145 [66] = KEY_F8, 144 [66] = KEY_F8,
146 [67] = KEY_F9, 145 [67] = KEY_F9,
147 [68] = KEY_F10, 146 [68] = KEY_F10,
148 [69] = KEY_ESC, 147 [71] = KEY_HOME,
149 [70] = KEY_DELETE, 148 [72] = KEY_UP,
150 [71] = KEY_KP7,
151 [72] = KEY_KP8,
152 [73] = KEY_KP9,
153 [74] = KEY_KPMINUS, 149 [74] = KEY_KPMINUS,
154 [75] = KEY_KP4, 150 [75] = KEY_LEFT,
155 [76] = KEY_KP5, 151 [77] = KEY_RIGHT,
156 [77] = KEY_KP6,
157 [78] = KEY_KPPLUS, 152 [78] = KEY_KPPLUS,
158 [79] = KEY_KP1, 153 [80] = KEY_DOWN,
159 [80] = KEY_KP2, 154 [82] = KEY_INSERT,
160 [81] = KEY_KP3, 155 [83] = KEY_DELETE,
161 [82] = KEY_KP0,
162 [83] = KEY_KPDOT,
163 [90] = KEY_KPLEFTPAREN,
164 [91] = KEY_KPRIGHTPAREN,
165 [92] = KEY_KPASTERISK, /* FIXME */
166 [93] = KEY_KPASTERISK,
167 [94] = KEY_KPPLUS,
168 [95] = KEY_HELP,
169 [96] = KEY_102ND, 156 [96] = KEY_102ND,
170 [97] = KEY_KPASTERISK, /* FIXME */ 157 [97] = KEY_UNDO,
171 [98] = KEY_KPSLASH, 158 [98] = KEY_HELP,
172 [99] = KEY_KPLEFTPAREN, 159 [99] = KEY_KPLEFTPAREN,
173 [100] = KEY_KPRIGHTPAREN, 160 [100] = KEY_KPRIGHTPAREN,
174 [101] = KEY_KPSLASH, 161 [101] = KEY_KPSLASH,
175 [102] = KEY_KPASTERISK, 162 [102] = KEY_KPASTERISK,
176 [103] = KEY_UP, 163 [103] = KEY_KP7,
177 [104] = KEY_KPASTERISK, /* FIXME */ 164 [104] = KEY_KP8,
178 [105] = KEY_LEFT, 165 [105] = KEY_KP9,
179 [106] = KEY_RIGHT, 166 [106] = KEY_KP4,
180 [107] = KEY_KPASTERISK, /* FIXME */ 167 [107] = KEY_KP5,
181 [108] = KEY_DOWN, 168 [108] = KEY_KP6,
182 [109] = KEY_KPASTERISK, /* FIXME */ 169 [109] = KEY_KP1,
183 [110] = KEY_KPASTERISK, /* FIXME */ 170 [110] = KEY_KP2,
184 [111] = KEY_KPASTERISK, /* FIXME */ 171 [111] = KEY_KP3,
185 [112] = KEY_KPASTERISK, /* FIXME */ 172 [112] = KEY_KP0,
186 [113] = KEY_KPASTERISK /* FIXME */ 173 [113] = KEY_KPDOT,
174 [114] = KEY_KPENTER,
187}; 175};
188 176
189static struct input_dev *atakbd_dev; 177static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
191static void atakbd_interrupt(unsigned char scancode, char down) 179static void atakbd_interrupt(unsigned char scancode, char down)
192{ 180{
193 181
194 if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ 182 if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
195 183
196 // report raw events here? 184 // report raw events here?
197 185
198 scancode = atakbd_keycode[scancode]; 186 scancode = atakbd_keycode[scancode];
199 187
200 if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ 188 input_report_key(atakbd_dev, scancode, down);
201 input_report_key(atakbd_dev, scancode, 1); 189 input_sync(atakbd_dev);
202 input_report_key(atakbd_dev, scancode, 0); 190 } else /* scancodes >= 0xf3 are mouse data, most likely */
203 input_sync(atakbd_dev);
204 } else {
205 input_report_key(atakbd_dev, scancode, down);
206 input_sync(atakbd_dev);
207 }
208 } else /* scancodes >= 0xf2 are mouse data, most likely */
209 printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); 191 printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
210 192
211 return; 193 return;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 96a887f33698..eb14ddf69346 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
410 min = abs->minimum; 410 min = abs->minimum;
411 max = abs->maximum; 411 max = abs->maximum;
412 412
413 if ((min != 0 || max != 0) && max <= min) { 413 if ((min != 0 || max != 0) && max < min) {
414 printk(KERN_DEBUG 414 printk(KERN_DEBUG
415 "%s: invalid abs[%02x] min:%d max:%d\n", 415 "%s: invalid abs[%02x] min:%d max:%d\n",
416 UINPUT_NAME, code, min, max); 416 UINPUT_NAME, code, min, max);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 44f57cf6675b..2d95e8d93cc7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1178static const char * const middle_button_pnp_ids[] = { 1178static const char * const middle_button_pnp_ids[] = {
1179 "LEN2131", /* ThinkPad P52 w/ NFC */ 1179 "LEN2131", /* ThinkPad P52 w/ NFC */
1180 "LEN2132", /* ThinkPad P52 */ 1180 "LEN2132", /* ThinkPad P52 */
1181 "LEN2133", /* ThinkPad P72 w/ NFC */
1182 "LEN2134", /* ThinkPad P72 */
1181 NULL 1183 NULL
1182}; 1184};
1183 1185
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 80e69bb8283e..83ac8c128192 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
241 struct i2c_client *client = to_i2c_client(dev); 241 struct i2c_client *client = to_i2c_client(dev);
242 int ret; 242 int ret;
243 243
244 if (device_may_wakeup(dev))
245 return enable_irq_wake(client->irq);
246
244 ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); 247 ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
245 return ret > 0 ? 0 : ret; 248 return ret > 0 ? 0 : ret;
246} 249}
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
249{ 252{
250 struct i2c_client *client = to_i2c_client(dev); 253 struct i2c_client *client = to_i2c_client(dev);
251 254
255 if (device_may_wakeup(dev))
256 return disable_irq_wake(client->irq);
257
252 return egalax_wake_up_device(client); 258 return egalax_wake_up_device(client);
253} 259}
254 260
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4e04fff23977..73e47d93e7a0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
246 246
247 /* The callers make sure that get_device_id() does not fail here */ 247 /* The callers make sure that get_device_id() does not fail here */
248 devid = get_device_id(dev); 248 devid = get_device_id(dev);
249
250 /* For ACPI HID devices, we simply return the devid as such */
251 if (!dev_is_pci(dev))
252 return devid;
253
249 ivrs_alias = amd_iommu_alias_table[devid]; 254 ivrs_alias = amd_iommu_alias_table[devid];
255
250 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); 256 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
251 257
252 if (ivrs_alias == pci_alias) 258 if (ivrs_alias == pci_alias)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5f3f10cf9d9d..bedc801b06a0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2540 if (dev && dev_is_pci(dev) && info->pasid_supported) { 2540 if (dev && dev_is_pci(dev) && info->pasid_supported) {
2541 ret = intel_pasid_alloc_table(dev); 2541 ret = intel_pasid_alloc_table(dev);
2542 if (ret) { 2542 if (ret) {
2543 __dmar_remove_one_dev_info(info); 2543 pr_warn("No pasid table for %s, pasid disabled\n",
2544 spin_unlock_irqrestore(&device_domain_lock, flags); 2544 dev_name(dev));
2545 return NULL; 2545 info->pasid_supported = 0;
2546 } 2546 }
2547 } 2547 }
2548 spin_unlock_irqrestore(&device_domain_lock, flags); 2548 spin_unlock_irqrestore(&device_domain_lock, flags);
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 1c05ed6fc5a5..1fb5e12b029a 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -11,7 +11,7 @@
11#define __INTEL_PASID_H 11#define __INTEL_PASID_H
12 12
13#define PASID_MIN 0x1 13#define PASID_MIN 0x1
14#define PASID_MAX 0x100000 14#define PASID_MAX 0x20000
15 15
16struct pasid_entry { 16struct pasid_entry {
17 u64 val; 17 u64 val;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 258115b10fa9..ad3e2b97469e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
1241 1241
1242static void rk_iommu_shutdown(struct platform_device *pdev) 1242static void rk_iommu_shutdown(struct platform_device *pdev)
1243{ 1243{
1244 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1245 int i = 0, irq;
1246
1247 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1248 devm_free_irq(iommu->dev, irq, iommu);
1249
1244 pm_runtime_force_suspend(&pdev->dev); 1250 pm_runtime_force_suspend(&pdev->dev);
1245} 1251}
1246 1252
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 316a57530f6d..c2df341ff6fa 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
1439 * The consequence of the above is that allocation is cost is low, but 1439 * The consequence of the above is that allocation is cost is low, but
1440 * freeing is expensive. We assumes that freeing rarely occurs. 1440 * freeing is expensive. We assumes that freeing rarely occurs.
1441 */ 1441 */
1442#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1442 1443
1443static DEFINE_MUTEX(lpi_range_lock); 1444static DEFINE_MUTEX(lpi_range_lock);
1444static LIST_HEAD(lpi_range_list); 1445static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
1625{ 1626{
1626 phys_addr_t paddr; 1627 phys_addr_t paddr;
1627 1628
1628 lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer); 1629 lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1630 ITS_MAX_LPI_NRBITS);
1629 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 1631 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1630 if (!gic_rdists->prop_page) { 1632 if (!gic_rdists->prop_page) {
1631 pr_err("Failed to allocate PROPBASE\n"); 1633 pr_err("Failed to allocate PROPBASE\n");
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 83504dd8100a..954dad29e6e8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
965void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 965void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
966 966
967extern struct workqueue_struct *bcache_wq; 967extern struct workqueue_struct *bcache_wq;
968extern struct workqueue_struct *bch_journal_wq;
968extern struct mutex bch_register_lock; 969extern struct mutex bch_register_lock;
969extern struct list_head bch_cache_sets; 970extern struct list_head bch_cache_sets;
970 971
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6116bbf870d8..522c7426f3a0 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
485 485
486 closure_get(&ca->set->cl); 486 closure_get(&ca->set->cl);
487 INIT_WORK(&ja->discard_work, journal_discard_work); 487 INIT_WORK(&ja->discard_work, journal_discard_work);
488 schedule_work(&ja->discard_work); 488 queue_work(bch_journal_wq, &ja->discard_work);
489 } 489 }
490} 490}
491 491
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
592 : &j->w[0]; 592 : &j->w[0];
593 593
594 __closure_wake_up(&w->wait); 594 __closure_wake_up(&w->wait);
595 continue_at_nobarrier(cl, journal_write, system_wq); 595 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
596} 596}
597 597
598static void journal_write_unlock(struct closure *cl) 598static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
627 spin_unlock(&c->journal.lock); 627 spin_unlock(&c->journal.lock);
628 628
629 btree_flush_write(c); 629 btree_flush_write(c);
630 continue_at(cl, journal_write, system_wq); 630 continue_at(cl, journal_write, bch_journal_wq);
631 return; 631 return;
632 } 632 }
633 633
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94c756c66bd7..30ba9aeb5ee8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -47,6 +47,7 @@ static int bcache_major;
47static DEFINE_IDA(bcache_device_idx); 47static DEFINE_IDA(bcache_device_idx);
48static wait_queue_head_t unregister_wait; 48static wait_queue_head_t unregister_wait;
49struct workqueue_struct *bcache_wq; 49struct workqueue_struct *bcache_wq;
50struct workqueue_struct *bch_journal_wq;
50 51
51#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 52#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
52/* limitation of partitions number on single bcache device */ 53/* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
2341 kobject_put(bcache_kobj); 2342 kobject_put(bcache_kobj);
2342 if (bcache_wq) 2343 if (bcache_wq)
2343 destroy_workqueue(bcache_wq); 2344 destroy_workqueue(bcache_wq);
2345 if (bch_journal_wq)
2346 destroy_workqueue(bch_journal_wq);
2347
2344 if (bcache_major) 2348 if (bcache_major)
2345 unregister_blkdev(bcache_major, "bcache"); 2349 unregister_blkdev(bcache_major, "bcache");
2346 unregister_reboot_notifier(&reboot); 2350 unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
2370 if (!bcache_wq) 2374 if (!bcache_wq)
2371 goto err; 2375 goto err;
2372 2376
2377 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2378 if (!bch_journal_wq)
2379 goto err;
2380
2373 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2381 bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2374 if (!bcache_kobj) 2382 if (!bcache_kobj)
2375 goto err; 2383 goto err;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f266c81f396f..0481223b1deb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
332 int err; 332 int err;
333 333
334 desc->tfm = essiv->hash_tfm; 334 desc->tfm = essiv->hash_tfm;
335 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 335 desc->flags = 0;
336 336
337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt); 337 err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
338 shash_desc_zero(desc); 338 shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
606 int i, r; 606 int i, r;
607 607
608 desc->tfm = lmk->hash_tfm; 608 desc->tfm = lmk->hash_tfm;
609 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 609 desc->flags = 0;
610 610
611 r = crypto_shash_init(desc); 611 r = crypto_shash_init(desc);
612 if (r) 612 if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
768 768
769 /* calculate crc32 for every 32bit part and xor it */ 769 /* calculate crc32 for every 32bit part and xor it */
770 desc->tfm = tcw->crc32_tfm; 770 desc->tfm = tcw->crc32_tfm;
771 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 771 desc->flags = 0;
772 for (i = 0; i < 4; i++) { 772 for (i = 0; i < 4; i++) {
773 r = crypto_shash_init(desc); 773 r = crypto_shash_init(desc);
774 if (r) 774 if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
1251 * requests if driver request queue is full. 1251 * requests if driver request queue is full.
1252 */ 1252 */
1253 skcipher_request_set_callback(ctx->r.req, 1253 skcipher_request_set_callback(ctx->r.req,
1254 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1254 CRYPTO_TFM_REQ_MAY_BACKLOG,
1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); 1255 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1256} 1256}
1257 1257
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
1268 * requests if driver request queue is full. 1268 * requests if driver request queue is full.
1269 */ 1269 */
1270 aead_request_set_callback(ctx->r.req_aead, 1270 aead_request_set_callback(ctx->r.req_aead,
1271 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 1271 CRYPTO_TFM_REQ_MAY_BACKLOG,
1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); 1272 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1273} 1273}
1274 1274
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 378878599466..89ccb64342de 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
532 unsigned j, size; 532 unsigned j, size;
533 533
534 desc->tfm = ic->journal_mac; 534 desc->tfm = ic->journal_mac;
535 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 535 desc->flags = 0;
536 536
537 r = crypto_shash_init(desc); 537 r = crypto_shash_init(desc);
538 if (unlikely(r)) { 538 if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) 676static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
677{ 677{
678 int r; 678 int r;
679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 679 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
680 complete_journal_encrypt, comp); 680 complete_journal_encrypt, comp);
681 if (likely(encrypt)) 681 if (likely(encrypt))
682 r = crypto_skcipher_encrypt(req); 682 r = crypto_skcipher_encrypt(req);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cae689de75fd..5ba067fa0c72 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010-2011 Neil Brown 2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the GPL. 5 * This file is released under the GPL.
6 */ 6 */
@@ -29,9 +29,6 @@
29 */ 29 */
30#define MIN_RAID456_JOURNAL_SPACE (4*2048) 30#define MIN_RAID456_JOURNAL_SPACE (4*2048)
31 31
32/* Global list of all raid sets */
33static LIST_HEAD(raid_sets);
34
35static bool devices_handle_discard_safely = false; 32static bool devices_handle_discard_safely = false;
36 33
37/* 34/*
@@ -227,7 +224,6 @@ struct rs_layout {
227 224
228struct raid_set { 225struct raid_set {
229 struct dm_target *ti; 226 struct dm_target *ti;
230 struct list_head list;
231 227
232 uint32_t stripe_cache_entries; 228 uint32_t stripe_cache_entries;
233 unsigned long ctr_flags; 229 unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
273 mddev->new_chunk_sectors = l->new_chunk_sectors; 269 mddev->new_chunk_sectors = l->new_chunk_sectors;
274} 270}
275 271
276/* Find any raid_set in active slot for @rs on global list */
277static struct raid_set *rs_find_active(struct raid_set *rs)
278{
279 struct raid_set *r;
280 struct mapped_device *md = dm_table_get_md(rs->ti->table);
281
282 list_for_each_entry(r, &raid_sets, list)
283 if (r != rs && dm_table_get_md(r->ti->table) == md)
284 return r;
285
286 return NULL;
287}
288
289/* raid10 algorithms (i.e. formats) */ 272/* raid10 algorithms (i.e. formats) */
290#define ALGORITHM_RAID10_DEFAULT 0 273#define ALGORITHM_RAID10_DEFAULT 0
291#define ALGORITHM_RAID10_NEAR 1 274#define ALGORITHM_RAID10_NEAR 1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
764 747
765 mddev_init(&rs->md); 748 mddev_init(&rs->md);
766 749
767 INIT_LIST_HEAD(&rs->list);
768 rs->raid_disks = raid_devs; 750 rs->raid_disks = raid_devs;
769 rs->delta_disks = 0; 751 rs->delta_disks = 0;
770 752
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
782 for (i = 0; i < raid_devs; i++) 764 for (i = 0; i < raid_devs; i++)
783 md_rdev_init(&rs->dev[i].rdev); 765 md_rdev_init(&rs->dev[i].rdev);
784 766
785 /* Add @rs to global list. */
786 list_add(&rs->list, &raid_sets);
787
788 /* 767 /*
789 * Remaining items to be initialized by further RAID params: 768 * Remaining items to be initialized by further RAID params:
790 * rs->md.persistent 769 * rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
797 return rs; 776 return rs;
798} 777}
799 778
800/* Free all @rs allocations and remove it from global list. */ 779/* Free all @rs allocations */
801static void raid_set_free(struct raid_set *rs) 780static void raid_set_free(struct raid_set *rs)
802{ 781{
803 int i; 782 int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
815 dm_put_device(rs->ti, rs->dev[i].data_dev); 794 dm_put_device(rs->ti, rs->dev[i].data_dev);
816 } 795 }
817 796
818 list_del(&rs->list);
819
820 kfree(rs); 797 kfree(rs);
821} 798}
822 799
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
2649 return 0; 2626 return 0;
2650 } 2627 }
2651 2628
2652 /* HM FIXME: get InSync raid_dev? */ 2629 /* HM FIXME: get In_Sync raid_dev? */
2653 rdev = &rs->dev[0].rdev; 2630 rdev = &rs->dev[0].rdev;
2654 2631
2655 if (rs->delta_disks < 0) { 2632 if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3149 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3126 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3150 rs_set_new(rs); 3127 rs_set_new(rs);
3151 } else if (rs_is_recovering(rs)) { 3128 } else if (rs_is_recovering(rs)) {
3129 /* Rebuild particular devices */
3130 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3131 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
3132 rs_setup_recovery(rs, MaxSector);
3133 }
3152 /* A recovering raid set may be resized */ 3134 /* A recovering raid set may be resized */
3153 ; /* skip setup rs */ 3135 ; /* skip setup rs */
3154 } else if (rs_is_reshaping(rs)) { 3136 } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3242 /* Start raid set read-only and assumed clean to change in raid_resume() */ 3224 /* Start raid set read-only and assumed clean to change in raid_resume() */
3243 rs->md.ro = 1; 3225 rs->md.ro = 1;
3244 rs->md.in_sync = 1; 3226 rs->md.in_sync = 1;
3227
3228 /* Keep array frozen */
3245 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 3229 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
3246 3230
3247 /* Has to be held on running the array */ 3231 /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3265 rs->callbacks.congested_fn = raid_is_congested; 3249 rs->callbacks.congested_fn = raid_is_congested;
3266 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 3250 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
3267 3251
3268 /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */ 3252 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
3269 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 3253 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
3270 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); 3254 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
3271 if (r) { 3255 if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
3350 return DM_MAPIO_SUBMITTED; 3334 return DM_MAPIO_SUBMITTED;
3351} 3335}
3352 3336
3353/* Return string describing the current sync action of @mddev */ 3337/* Return sync state string for @state */
3354static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery) 3338enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
3339static const char *sync_str(enum sync_state state)
3340{
3341 /* Has to be in above sync_state order! */
3342 static const char *sync_strs[] = {
3343 "frozen",
3344 "reshape",
3345 "resync",
3346 "check",
3347 "repair",
3348 "recover",
3349 "idle"
3350 };
3351
3352 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
3353};
3354
3355/* Return enum sync_state for @mddev derived from @recovery flags */
3356static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3355{ 3357{
3356 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 3358 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
3357 return "frozen"; 3359 return st_frozen;
3358 3360
3359 /* The MD sync thread can be done with io but still be running */ 3361 /* The MD sync thread can be done with io or be interrupted but still be running */
3360 if (!test_bit(MD_RECOVERY_DONE, &recovery) && 3362 if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
3361 (test_bit(MD_RECOVERY_RUNNING, &recovery) || 3363 (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
3362 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { 3364 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3363 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 3365 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
3364 return "reshape"; 3366 return st_reshape;
3365 3367
3366 if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 3368 if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
3367 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 3369 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
3368 return "resync"; 3370 return st_resync;
3369 else if (test_bit(MD_RECOVERY_CHECK, &recovery)) 3371 if (test_bit(MD_RECOVERY_CHECK, &recovery))
3370 return "check"; 3372 return st_check;
3371 return "repair"; 3373 return st_repair;
3372 } 3374 }
3373 3375
3374 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3376 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3375 return "recover"; 3377 return st_recover;
3378
3379 if (mddev->reshape_position != MaxSector)
3380 return st_reshape;
3376 } 3381 }
3377 3382
3378 return "idle"; 3383 return st_idle;
3379} 3384}
3380 3385
3381/* 3386/*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3409 sector_t resync_max_sectors) 3414 sector_t resync_max_sectors)
3410{ 3415{
3411 sector_t r; 3416 sector_t r;
3417 enum sync_state state;
3412 struct mddev *mddev = &rs->md; 3418 struct mddev *mddev = &rs->md;
3413 3419
3414 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3420 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3419 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3425 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3420 3426
3421 } else { 3427 } else {
3422 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) && 3428 state = decipher_sync_action(mddev, recovery);
3423 !test_bit(MD_RECOVERY_INTR, &recovery) && 3429
3424 (test_bit(MD_RECOVERY_NEEDED, &recovery) || 3430 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
3425 test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3426 test_bit(MD_RECOVERY_RUNNING, &recovery)))
3427 r = mddev->curr_resync_completed;
3428 else
3429 r = mddev->recovery_cp; 3431 r = mddev->recovery_cp;
3432 else
3433 r = mddev->curr_resync_completed;
3430 3434
3431 if (r >= resync_max_sectors && 3435 if (state == st_idle && r >= resync_max_sectors) {
3432 (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
3433 (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
3434 !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
3435 !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
3436 /* 3436 /*
3437 * Sync complete. 3437 * Sync complete.
3438 */ 3438 */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3440 if (test_bit(MD_RECOVERY_RECOVER, &recovery))
3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3441 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3442 3442
3443 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) { 3443 } else if (state == st_recover)
3444 /* 3444 /*
3445 * In case we are recovering, the array is not in sync 3445 * In case we are recovering, the array is not in sync
3446 * and health chars should show the recovering legs. 3446 * and health chars should show the recovering legs.
3447 */ 3447 */
3448 ; 3448 ;
3449 3449 else if (state == st_resync)
3450 } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
3451 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3452 /* 3450 /*
3453 * If "resync" is occurring, the raid set 3451 * If "resync" is occurring, the raid set
3454 * is or may be out of sync hence the health 3452 * is or may be out of sync hence the health
3455 * characters shall be 'a'. 3453 * characters shall be 'a'.
3456 */ 3454 */
3457 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3455 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3458 3456 else if (state == st_reshape)
3459 } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
3460 !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
3461 /* 3457 /*
3462 * If "reshape" is occurring, the raid set 3458 * If "reshape" is occurring, the raid set
3463 * is or may be out of sync hence the health 3459 * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3465 */ 3461 */
3466 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3462 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
3467 3463
3468 } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) { 3464 else if (state == st_check || state == st_repair)
3469 /* 3465 /*
3470 * If "check" or "repair" is occurring, the raid set has 3466 * If "check" or "repair" is occurring, the raid set has
3471 * undergone an initial sync and the health characters 3467 * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3473 */ 3469 */
3474 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3470 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3475 3471
3476 } else { 3472 else {
3477 struct md_rdev *rdev; 3473 struct md_rdev *rdev;
3478 3474
3479 /* 3475 /*
3480 * We are idle and recovery is needed, prevent 'A' chars race 3476 * We are idle and recovery is needed, prevent 'A' chars race
3481 * caused by components still set to in-sync by constrcuctor. 3477 * caused by components still set to in-sync by constructor.
3482 */ 3478 */
3483 if (test_bit(MD_RECOVERY_NEEDED, &recovery)) 3479 if (test_bit(MD_RECOVERY_NEEDED, &recovery))
3484 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3480 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
3542 progress = rs_get_progress(rs, recovery, resync_max_sectors); 3538 progress = rs_get_progress(rs, recovery, resync_max_sectors);
3543 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 3539 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3544 atomic64_read(&mddev->resync_mismatches) : 0; 3540 atomic64_read(&mddev->resync_mismatches) : 0;
3545 sync_action = decipher_sync_action(&rs->md, recovery); 3541 sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
3546 3542
3547 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ 3543 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
3548 for (i = 0; i < rs->raid_disks; i++) 3544 for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
3892 struct mddev *mddev = &rs->md; 3888 struct mddev *mddev = &rs->md;
3893 struct md_personality *pers = mddev->pers; 3889 struct md_personality *pers = mddev->pers;
3894 3890
3891 /* Don't allow the sync thread to work until the table gets reloaded. */
3892 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3893
3895 r = rs_setup_reshape(rs); 3894 r = rs_setup_reshape(rs);
3896 if (r) 3895 if (r)
3897 return r; 3896 return r;
3898 3897
3899 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3900 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3901 mddev_resume(mddev);
3902
3903 /* 3898 /*
3904 * Check any reshape constraints enforced by the personalility 3899 * Check any reshape constraints enforced by the personalility
3905 * 3900 *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
3923 } 3918 }
3924 } 3919 }
3925 3920
3926 /* Suspend because a resume will happen in raid_resume() */
3927 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3928 mddev_suspend(mddev);
3929
3930 /* 3921 /*
3931 * Now reshape got set up, update superblocks to 3922 * Now reshape got set up, update superblocks to
3932 * reflect the fact so that a table reload will 3923 * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
3947 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) 3938 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
3948 return 0; 3939 return 0;
3949 3940
3950 if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
3951 struct raid_set *rs_active = rs_find_active(rs);
3952
3953 if (rs_active) {
3954 /*
3955 * In case no rebuilds have been requested
3956 * and an active table slot exists, copy
3957 * current resynchonization completed and
3958 * reshape position pointers across from
3959 * suspended raid set in the active slot.
3960 *
3961 * This resumes the new mapping at current
3962 * offsets to continue recover/reshape without
3963 * necessarily redoing a raid set partially or
3964 * causing data corruption in case of a reshape.
3965 */
3966 if (rs_active->md.curr_resync_completed != MaxSector)
3967 mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
3968 if (rs_active->md.reshape_position != MaxSector)
3969 mddev->reshape_position = rs_active->md.reshape_position;
3970 }
3971 }
3972
3973 /* 3941 /*
3974 * The superblocks need to be updated on disk if the 3942 * The superblocks need to be updated on disk if the
3975 * array is new or new devices got added (thus zeroed 3943 * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
4046 4014
4047static struct target_type raid_target = { 4015static struct target_type raid_target = {
4048 .name = "raid", 4016 .name = "raid",
4049 .version = {1, 13, 2}, 4017 .version = {1, 14, 0},
4050 .module = THIS_MODULE, 4018 .module = THIS_MODULE,
4051 .ctr = raid_ctr, 4019 .ctr = raid_ctr,
4052 .dtr = raid_dtr, 4020 .dtr = raid_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 72142021b5c9..74f6770c70b1 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -189,6 +189,12 @@ struct dm_pool_metadata {
189 sector_t data_block_size; 189 sector_t data_block_size;
190 190
191 /* 191 /*
192 * We reserve a section of the metadata for commit overhead.
193 * All reported space does *not* include this.
194 */
195 dm_block_t metadata_reserve;
196
197 /*
192 * Set if a transaction has to be aborted but the attempt to roll back 198 * Set if a transaction has to be aborted but the attempt to roll back
193 * to the previous (good) transaction failed. The only pool metadata 199 * to the previous (good) transaction failed. The only pool metadata
194 * operation possible in this state is the closing of the device. 200 * operation possible in this state is the closing of the device.
@@ -816,6 +822,22 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
816 return dm_tm_commit(pmd->tm, sblock); 822 return dm_tm_commit(pmd->tm, sblock);
817} 823}
818 824
825static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
826{
827 int r;
828 dm_block_t total;
829 dm_block_t max_blocks = 4096; /* 16M */
830
831 r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
832 if (r) {
833 DMERR("could not get size of metadata device");
834 pmd->metadata_reserve = max_blocks;
835 } else {
836 sector_div(total, 10);
837 pmd->metadata_reserve = min(max_blocks, total);
838 }
839}
840
819struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, 841struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
820 sector_t data_block_size, 842 sector_t data_block_size,
821 bool format_device) 843 bool format_device)
@@ -849,6 +871,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
849 return ERR_PTR(r); 871 return ERR_PTR(r);
850 } 872 }
851 873
874 __set_metadata_reserve(pmd);
875
852 return pmd; 876 return pmd;
853} 877}
854 878
@@ -1820,6 +1844,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1820 down_read(&pmd->root_lock); 1844 down_read(&pmd->root_lock);
1821 if (!pmd->fail_io) 1845 if (!pmd->fail_io)
1822 r = dm_sm_get_nr_free(pmd->metadata_sm, result); 1846 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1847
1848 if (!r) {
1849 if (*result < pmd->metadata_reserve)
1850 *result = 0;
1851 else
1852 *result -= pmd->metadata_reserve;
1853 }
1823 up_read(&pmd->root_lock); 1854 up_read(&pmd->root_lock);
1824 1855
1825 return r; 1856 return r;
@@ -1932,8 +1963,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
1932 int r = -EINVAL; 1963 int r = -EINVAL;
1933 1964
1934 down_write(&pmd->root_lock); 1965 down_write(&pmd->root_lock);
1935 if (!pmd->fail_io) 1966 if (!pmd->fail_io) {
1936 r = __resize_space_map(pmd->metadata_sm, new_count); 1967 r = __resize_space_map(pmd->metadata_sm, new_count);
1968 if (!r)
1969 __set_metadata_reserve(pmd);
1970 }
1937 up_write(&pmd->root_lock); 1971 up_write(&pmd->root_lock);
1938 1972
1939 return r; 1973 return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7bd60a150f8f..aaf1ad481ee8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
200enum pool_mode { 200enum pool_mode {
201 PM_WRITE, /* metadata may be changed */ 201 PM_WRITE, /* metadata may be changed */
202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ 202 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
203
204 /*
205 * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
206 */
207 PM_OUT_OF_METADATA_SPACE,
203 PM_READ_ONLY, /* metadata may not be changed */ 208 PM_READ_ONLY, /* metadata may not be changed */
209
204 PM_FAIL, /* all I/O fails */ 210 PM_FAIL, /* all I/O fails */
205}; 211};
206 212
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1371 1377
1372static void requeue_bios(struct pool *pool); 1378static void requeue_bios(struct pool *pool);
1373 1379
1374static void check_for_space(struct pool *pool) 1380static bool is_read_only_pool_mode(enum pool_mode mode)
1381{
1382 return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
1383}
1384
1385static bool is_read_only(struct pool *pool)
1386{
1387 return is_read_only_pool_mode(get_pool_mode(pool));
1388}
1389
1390static void check_for_metadata_space(struct pool *pool)
1391{
1392 int r;
1393 const char *ooms_reason = NULL;
1394 dm_block_t nr_free;
1395
1396 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
1397 if (r)
1398 ooms_reason = "Could not get free metadata blocks";
1399 else if (!nr_free)
1400 ooms_reason = "No free metadata blocks";
1401
1402 if (ooms_reason && !is_read_only(pool)) {
1403 DMERR("%s", ooms_reason);
1404 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
1405 }
1406}
1407
1408static void check_for_data_space(struct pool *pool)
1375{ 1409{
1376 int r; 1410 int r;
1377 dm_block_t nr_free; 1411 dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
1397{ 1431{
1398 int r; 1432 int r;
1399 1433
1400 if (get_pool_mode(pool) >= PM_READ_ONLY) 1434 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
1401 return -EINVAL; 1435 return -EINVAL;
1402 1436
1403 r = dm_pool_commit_metadata(pool->pmd); 1437 r = dm_pool_commit_metadata(pool->pmd);
1404 if (r) 1438 if (r)
1405 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); 1439 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
1406 else 1440 else {
1407 check_for_space(pool); 1441 check_for_metadata_space(pool);
1442 check_for_data_space(pool);
1443 }
1408 1444
1409 return r; 1445 return r;
1410} 1446}
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1470 return r; 1506 return r;
1471 } 1507 }
1472 1508
1509 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
1510 if (r) {
1511 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
1512 return r;
1513 }
1514
1515 if (!free_blocks) {
1516 /* Let's commit before we use up the metadata reserve. */
1517 r = commit(pool);
1518 if (r)
1519 return r;
1520 }
1521
1473 return 0; 1522 return 0;
1474} 1523}
1475 1524
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
1501 case PM_OUT_OF_DATA_SPACE: 1550 case PM_OUT_OF_DATA_SPACE:
1502 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; 1551 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
1503 1552
1553 case PM_OUT_OF_METADATA_SPACE:
1504 case PM_READ_ONLY: 1554 case PM_READ_ONLY:
1505 case PM_FAIL: 1555 case PM_FAIL:
1506 return BLK_STS_IOERR; 1556 return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2464 error_retry_list(pool); 2514 error_retry_list(pool);
2465 break; 2515 break;
2466 2516
2517 case PM_OUT_OF_METADATA_SPACE:
2467 case PM_READ_ONLY: 2518 case PM_READ_ONLY:
2468 if (old_mode != new_mode) 2519 if (!is_read_only_pool_mode(old_mode))
2469 notify_of_pool_mode_change(pool, "read-only"); 2520 notify_of_pool_mode_change(pool, "read-only");
2470 dm_pool_metadata_read_only(pool->pmd); 2521 dm_pool_metadata_read_only(pool->pmd);
2471 pool->process_bio = process_bio_read_only; 2522 pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3403 DMINFO("%s: growing the metadata device from %llu to %llu blocks", 3454 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3404 dm_device_name(pool->pool_md), 3455 dm_device_name(pool->pool_md),
3405 sb_metadata_dev_size, metadata_dev_size); 3456 sb_metadata_dev_size, metadata_dev_size);
3457
3458 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
3459 set_pool_mode(pool, PM_WRITE);
3460
3406 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); 3461 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3407 if (r) { 3462 if (r) {
3408 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); 3463 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
3707 struct pool_c *pt = ti->private; 3762 struct pool_c *pt = ti->private;
3708 struct pool *pool = pt->pool; 3763 struct pool *pool = pt->pool;
3709 3764
3710 if (get_pool_mode(pool) >= PM_READ_ONLY) { 3765 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
3711 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", 3766 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
3712 dm_device_name(pool->pool_md)); 3767 dm_device_name(pool->pool_md));
3713 return -EOPNOTSUPP; 3768 return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3781 dm_block_t nr_blocks_data; 3836 dm_block_t nr_blocks_data;
3782 dm_block_t nr_blocks_metadata; 3837 dm_block_t nr_blocks_metadata;
3783 dm_block_t held_root; 3838 dm_block_t held_root;
3839 enum pool_mode mode;
3784 char buf[BDEVNAME_SIZE]; 3840 char buf[BDEVNAME_SIZE];
3785 char buf2[BDEVNAME_SIZE]; 3841 char buf2[BDEVNAME_SIZE];
3786 struct pool_c *pt = ti->private; 3842 struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3851 else 3907 else
3852 DMEMIT("- "); 3908 DMEMIT("- ");
3853 3909
3854 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) 3910 mode = get_pool_mode(pool);
3911 if (mode == PM_OUT_OF_DATA_SPACE)
3855 DMEMIT("out_of_data_space "); 3912 DMEMIT("out_of_data_space ");
3856 else if (pool->pf.mode == PM_READ_ONLY) 3913 else if (is_read_only_pool_mode(mode))
3857 DMEMIT("ro "); 3914 DMEMIT("ro ");
3858 else 3915 else
3859 DMEMIT("rw "); 3916 DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 12decdbd722d..fc65f0dedf7f 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
99{ 99{
100 struct scatterlist sg; 100 struct scatterlist sg;
101 101
102 sg_init_one(&sg, data, len); 102 if (likely(!is_vmalloc_addr(data))) {
103 ahash_request_set_crypt(req, &sg, NULL, len); 103 sg_init_one(&sg, data, len);
104 104 ahash_request_set_crypt(req, &sg, NULL, len);
105 return crypto_wait_req(crypto_ahash_update(req), wait); 105 return crypto_wait_req(crypto_ahash_update(req), wait);
106 } else {
107 do {
108 int r;
109 size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
110 flush_kernel_vmap_range((void *)data, this_step);
111 sg_init_table(&sg, 1);
112 sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
113 ahash_request_set_crypt(req, &sg, NULL, this_step);
114 r = crypto_wait_req(crypto_ahash_update(req), wait);
115 if (unlikely(r))
116 return r;
117 data += this_step;
118 len -= this_step;
119 } while (len);
120 return 0;
121 }
106} 122}
107 123
108/* 124/*
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 94329e03001e..0b2af6e74fc3 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
1276static int resync_finish(struct mddev *mddev) 1276static int resync_finish(struct mddev *mddev)
1277{ 1277{
1278 struct md_cluster_info *cinfo = mddev->cluster_info; 1278 struct md_cluster_info *cinfo = mddev->cluster_info;
1279 int ret = 0;
1279 1280
1280 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); 1281 clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
1281 dlm_unlock_sync(cinfo->resync_lockres);
1282 1282
1283 /* 1283 /*
1284 * If resync thread is interrupted so we can't say resync is finished, 1284 * If resync thread is interrupted so we can't say resync is finished,
1285 * another node will launch resync thread to continue. 1285 * another node will launch resync thread to continue.
1286 */ 1286 */
1287 if (test_bit(MD_CLOSING, &mddev->flags)) 1287 if (!test_bit(MD_CLOSING, &mddev->flags))
1288 return 0; 1288 ret = resync_info_update(mddev, 0, 0);
1289 else 1289 dlm_unlock_sync(cinfo->resync_lockres);
1290 return resync_info_update(mddev, 0, 0); 1290 return ret;
1291} 1291}
1292 1292
1293static int area_resyncing(struct mddev *mddev, int direction, 1293static int area_resyncing(struct mddev *mddev, int direction,
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 981898049491..d6f7978b4449 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4529 allow_barrier(conf); 4529 allow_barrier(conf);
4530 } 4530 }
4531 4531
4532 raise_barrier(conf, 0);
4532read_more: 4533read_more:
4533 /* Now schedule reads for blocks from sector_nr to last */ 4534 /* Now schedule reads for blocks from sector_nr to last */
4534 r10_bio = raid10_alloc_init_r10buf(conf); 4535 r10_bio = raid10_alloc_init_r10buf(conf);
4535 r10_bio->state = 0; 4536 r10_bio->state = 0;
4536 raise_barrier(conf, sectors_done != 0); 4537 raise_barrier(conf, 1);
4537 atomic_set(&r10_bio->remaining, 0); 4538 atomic_set(&r10_bio->remaining, 0);
4538 r10_bio->mddev = mddev; 4539 r10_bio->mddev = mddev;
4539 r10_bio->sector = sector_nr; 4540 r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
4629 if (sector_nr <= last) 4630 if (sector_nr <= last)
4630 goto read_more; 4631 goto read_more;
4631 4632
4633 lower_barrier(conf);
4634
4632 /* Now that we have done the whole section we can 4635 /* Now that we have done the whole section we can
4633 * update reshape_progress 4636 * update reshape_progress
4634 */ 4637 */
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index a001808a2b77..bfb811407061 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
46extern void ppl_quiesce(struct r5conf *conf, int quiesce); 46extern void ppl_quiesce(struct r5conf *conf, int quiesce);
47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); 47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
48 48
49static inline bool raid5_has_log(struct r5conf *conf)
50{
51 return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
52}
53
49static inline bool raid5_has_ppl(struct r5conf *conf) 54static inline bool raid5_has_ppl(struct r5conf *conf)
50{ 55{
51 return test_bit(MD_HAS_PPL, &conf->mddev->flags); 56 return test_bit(MD_HAS_PPL, &conf->mddev->flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4ce0d7502fad..e4e98f47865d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
733{ 733{
734 struct r5conf *conf = sh->raid_conf; 734 struct r5conf *conf = sh->raid_conf;
735 735
736 if (conf->log || raid5_has_ppl(conf)) 736 if (raid5_has_log(conf) || raid5_has_ppl(conf))
737 return false; 737 return false;
738 return test_bit(STRIPE_BATCH_READY, &sh->state) && 738 return test_bit(STRIPE_BATCH_READY, &sh->state) &&
739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 739 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
7737 sector_t newsize; 7737 sector_t newsize;
7738 struct r5conf *conf = mddev->private; 7738 struct r5conf *conf = mddev->private;
7739 7739
7740 if (conf->log || raid5_has_ppl(conf)) 7740 if (raid5_has_log(conf) || raid5_has_ppl(conf))
7741 return -EINVAL; 7741 return -EINVAL;
7742 sectors &= ~((sector_t)conf->chunk_sectors - 1); 7742 sectors &= ~((sector_t)conf->chunk_sectors - 1);
7743 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7743 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
7788{ 7788{
7789 struct r5conf *conf = mddev->private; 7789 struct r5conf *conf = mddev->private;
7790 7790
7791 if (conf->log || raid5_has_ppl(conf)) 7791 if (raid5_has_log(conf) || raid5_has_ppl(conf))
7792 return -EINVAL; 7792 return -EINVAL;
7793 if (mddev->delta_disks == 0 && 7793 if (mddev->delta_disks == 0 &&
7794 mddev->new_layout == mddev->layout && 7794 mddev->new_layout == mddev->layout &&
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b5410aeb5fe2..bb41bea950ac 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
1159 V4L2_CID_AUTO_WHITE_BALANCE, 1159 V4L2_CID_AUTO_WHITE_BALANCE,
1160 0, 1, 1, 1160 0, 1, 1,
1161 V4L2_WHITE_BALANCE_AUTO); 1161 V4L2_WHITE_BALANCE_AUTO);
1162 if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
1163 ret = PTR_ERR(mt9v111->auto_awb);
1164 goto error_free_ctrls;
1165 }
1166
1167 mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, 1162 mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
1168 &mt9v111_ctrl_ops, 1163 &mt9v111_ctrl_ops,
1169 V4L2_CID_EXPOSURE_AUTO, 1164 V4L2_CID_EXPOSURE_AUTO,
1170 V4L2_EXPOSURE_MANUAL, 1165 V4L2_EXPOSURE_MANUAL,
1171 0, V4L2_EXPOSURE_AUTO); 1166 0, V4L2_EXPOSURE_AUTO);
1172 if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
1173 ret = PTR_ERR(mt9v111->auto_exp);
1174 goto error_free_ctrls;
1175 }
1176
1177 /* Initialize timings */
1178 mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1167 mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
1179 V4L2_CID_HBLANK, 1168 V4L2_CID_HBLANK,
1180 MT9V111_CORE_R05_MIN_HBLANK, 1169 MT9V111_CORE_R05_MIN_HBLANK,
1181 MT9V111_CORE_R05_MAX_HBLANK, 1, 1170 MT9V111_CORE_R05_MAX_HBLANK, 1,
1182 MT9V111_CORE_R05_DEF_HBLANK); 1171 MT9V111_CORE_R05_DEF_HBLANK);
1183 if (IS_ERR_OR_NULL(mt9v111->hblank)) {
1184 ret = PTR_ERR(mt9v111->hblank);
1185 goto error_free_ctrls;
1186 }
1187
1188 mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1172 mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
1189 V4L2_CID_VBLANK, 1173 V4L2_CID_VBLANK,
1190 MT9V111_CORE_R06_MIN_VBLANK, 1174 MT9V111_CORE_R06_MIN_VBLANK,
1191 MT9V111_CORE_R06_MAX_VBLANK, 1, 1175 MT9V111_CORE_R06_MAX_VBLANK, 1,
1192 MT9V111_CORE_R06_DEF_VBLANK); 1176 MT9V111_CORE_R06_DEF_VBLANK);
1193 if (IS_ERR_OR_NULL(mt9v111->vblank)) {
1194 ret = PTR_ERR(mt9v111->vblank);
1195 goto error_free_ctrls;
1196 }
1197 1177
1198 /* PIXEL_RATE is fixed: just expose it to user space. */ 1178 /* PIXEL_RATE is fixed: just expose it to user space. */
1199 v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1179 v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
1201 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, 1181 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
1202 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); 1182 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
1203 1183
1184 if (mt9v111->ctrls.error) {
1185 ret = mt9v111->ctrls.error;
1186 goto error_free_ctrls;
1187 }
1204 mt9v111->sd.ctrl_handler = &mt9v111->ctrls; 1188 mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
1205 1189
1206 /* Start with default configuration: 640x480 UYVY. */ 1190 /* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
1226 mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; 1210 mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
1227 ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); 1211 ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
1228 if (ret) 1212 if (ret)
1229 goto error_free_ctrls; 1213 goto error_free_entity;
1230#endif 1214#endif
1231 1215
1232 ret = mt9v111_chip_probe(mt9v111); 1216 ret = mt9v111_chip_probe(mt9v111);
1233 if (ret) 1217 if (ret)
1234 goto error_free_ctrls; 1218 goto error_free_entity;
1235 1219
1236 ret = v4l2_async_register_subdev(&mt9v111->sd); 1220 ret = v4l2_async_register_subdev(&mt9v111->sd);
1237 if (ret) 1221 if (ret)
1238 goto error_free_ctrls; 1222 goto error_free_entity;
1239 1223
1240 return 0; 1224 return 0;
1241 1225
1242error_free_ctrls: 1226error_free_entity:
1243 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1244
1245#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) 1227#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
1246 media_entity_cleanup(&mt9v111->sd.entity); 1228 media_entity_cleanup(&mt9v111->sd.entity);
1247#endif 1229#endif
1248 1230
1231error_free_ctrls:
1232 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1233
1249 mutex_destroy(&mt9v111->pwr_mutex); 1234 mutex_destroy(&mt9v111->pwr_mutex);
1250 mutex_destroy(&mt9v111->stream_mutex); 1235 mutex_destroy(&mt9v111->stream_mutex);
1251 1236
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
1259 1244
1260 v4l2_async_unregister_subdev(sd); 1245 v4l2_async_unregister_subdev(sd);
1261 1246
1262 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1263
1264#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) 1247#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
1265 media_entity_cleanup(&sd->entity); 1248 media_entity_cleanup(&sd->entity);
1266#endif 1249#endif
1267 1250
1251 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1252
1268 mutex_destroy(&mt9v111->pwr_mutex); 1253 mutex_destroy(&mt9v111->pwr_mutex);
1269 mutex_destroy(&mt9v111->stream_mutex); 1254 mutex_destroy(&mt9v111->stream_mutex);
1270 1255
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 94c1fe0e9787..54fe90acb5b2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
541 depends on MFD_CROS_EC 541 depends on MFD_CROS_EC
542 select CEC_CORE 542 select CEC_CORE
543 select CEC_NOTIFIER 543 select CEC_NOTIFIER
544 select CHROME_PLATFORMS
545 select CROS_EC_PROTO
544 ---help--- 546 ---help---
545 If you say yes here you will get support for the 547 If you say yes here you will get support for the
546 ChromeOS Embedded Controller's CEC. 548 ChromeOS Embedded Controller's CEC.
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 729b31891466..a5ae85674ffb 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index c832539397d7..12bce391d71f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h>
15 16
16#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) 17#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
17#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) 18#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index bcd0dfd33618..2e65caf1ecae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h>
15 16
16#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) 17#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
17#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) 18#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 4559f3b1b38c..008afb85023b 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 7f269021d08c..1f33b4eb198c 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/iopoll.h> 14#include <linux/iopoll.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
1076 else 1077 else
1077 return -EINVAL; 1078 return -EINVAL;
1078 1079
1079 ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), 1080 ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
1080 GFP_KERNEL); 1081 GFP_KERNEL);
1081 if (!ispif->line) 1082 if (!ispif->line)
1082 return -ENOMEM; 1083 return -ENOMEM;
1083 1084
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index da3a9fed9f2d..174a36be6f5d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/io.h>
12#include <linux/iopoll.h> 13#include <linux/iopoll.h>
13 14
14#include "camss-vfe.h" 15#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index 4c584bffd179..0dca8bf9281e 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/io.h>
12#include <linux/iopoll.h> 13#include <linux/iopoll.h>
13 14
14#include "camss-vfe.h" 15#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index dcc0c30ef1b1..669615fff6a0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
848 return -EINVAL; 848 return -EINVAL;
849 } 849 }
850 850
851 camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), 851 camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
852 GFP_KERNEL); 852 sizeof(*camss->csiphy), GFP_KERNEL);
853 if (!camss->csiphy) 853 if (!camss->csiphy)
854 return -ENOMEM; 854 return -ENOMEM;
855 855
856 camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), 856 camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
857 GFP_KERNEL); 857 GFP_KERNEL);
858 if (!camss->csid) 858 if (!camss->csid)
859 return -ENOMEM; 859 return -ENOMEM;
860 860
861 camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); 861 camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
862 GFP_KERNEL);
862 if (!camss->vfe) 863 if (!camss->vfe)
863 return -ENOMEM; 864 return -ENOMEM;
864 865
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
993 994
994MODULE_DEVICE_TABLE(of, camss_dt_match); 995MODULE_DEVICE_TABLE(of, camss_dt_match);
995 996
996static int camss_runtime_suspend(struct device *dev) 997static int __maybe_unused camss_runtime_suspend(struct device *dev)
997{ 998{
998 return 0; 999 return 0;
999} 1000}
1000 1001
1001static int camss_runtime_resume(struct device *dev) 1002static int __maybe_unused camss_runtime_resume(struct device *dev)
1002{ 1003{
1003 return 0; 1004 return 0;
1004} 1005}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 666d319d3d1a..1f6c1eefe389 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
402 if (msg[0].addr == state->af9033_i2c_addr[1]) 402 if (msg[0].addr == state->af9033_i2c_addr[1])
403 reg |= 0x100000; 403 reg |= 0x100000;
404 404
405 ret = af9035_wr_regs(d, reg, &msg[0].buf[3], 405 ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
406 msg[0].len - 3); 406 &msg[0].buf[3],
407 msg[0].len - 3)
408 : -EOPNOTSUPP;
407 } else { 409 } else {
408 /* I2C write */ 410 /* I2C write */
409 u8 buf[MAX_XFER_SIZE]; 411 u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
115 if (sev == NULL) 115 if (sev == NULL)
116 return; 116 return;
117 117
118 /*
119 * If the event has been added to the fh->subscribed list, but its
120 * add op has not completed yet elems will be 0, treat this as
121 * not being subscribed.
122 */
123 if (!sev->elems)
124 return;
125
126 /* Increase event sequence number on fh. */ 118 /* Increase event sequence number on fh. */
127 fh->sequence++; 119 fh->sequence++;
128 120
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
208 struct v4l2_subscribed_event *sev, *found_ev; 200 struct v4l2_subscribed_event *sev, *found_ev;
209 unsigned long flags; 201 unsigned long flags;
210 unsigned i; 202 unsigned i;
203 int ret = 0;
211 204
212 if (sub->type == V4L2_EVENT_ALL) 205 if (sub->type == V4L2_EVENT_ALL)
213 return -EINVAL; 206 return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
225 sev->flags = sub->flags; 218 sev->flags = sub->flags;
226 sev->fh = fh; 219 sev->fh = fh;
227 sev->ops = ops; 220 sev->ops = ops;
221 sev->elems = elems;
222
223 mutex_lock(&fh->subscribe_lock);
228 224
229 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 225 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
230 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231 if (!found_ev)
232 list_add(&sev->list, &fh->subscribed);
233 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
234 228
235 if (found_ev) { 229 if (found_ev) {
230 /* Already listening */
236 kvfree(sev); 231 kvfree(sev);
237 return 0; /* Already listening */ 232 goto out_unlock;
238 } 233 }
239 234
240 if (sev->ops && sev->ops->add) { 235 if (sev->ops && sev->ops->add) {
241 int ret = sev->ops->add(sev, elems); 236 ret = sev->ops->add(sev, elems);
242 if (ret) { 237 if (ret) {
243 sev->ops = NULL; 238 kvfree(sev);
244 v4l2_event_unsubscribe(fh, sub); 239 goto out_unlock;
245 return ret;
246 } 240 }
247 } 241 }
248 242
249 /* Mark as ready for use */ 243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
250 sev->elems = elems; 244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
251 246
252 return 0; 247out_unlock:
248 mutex_unlock(&fh->subscribe_lock);
249
250 return ret;
253} 251}
254EXPORT_SYMBOL_GPL(v4l2_event_subscribe); 252EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 253
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
288 return 0; 286 return 0;
289 } 287 }
290 288
289 mutex_lock(&fh->subscribe_lock);
290
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 291 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 292
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 293 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
305 if (sev && sev->ops && sev->ops->del) 305 if (sev && sev->ops && sev->ops->del)
306 sev->ops->del(sev); 306 sev->ops->del(sev);
307 307
308 mutex_unlock(&fh->subscribe_lock);
309
308 kvfree(sev); 310 kvfree(sev);
309 311
310 return 0; 312 return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
45 INIT_LIST_HEAD(&fh->available); 45 INIT_LIST_HEAD(&fh->available);
46 INIT_LIST_HEAD(&fh->subscribed); 46 INIT_LIST_HEAD(&fh->subscribed);
47 fh->sequence = -1; 47 fh->sequence = -1;
48 mutex_init(&fh->subscribe_lock);
48} 49}
49EXPORT_SYMBOL_GPL(v4l2_fh_init); 50EXPORT_SYMBOL_GPL(v4l2_fh_init);
50 51
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
90 return; 91 return;
91 v4l_disable_media_source(fh->vdev); 92 v4l_disable_media_source(fh->vdev);
92 v4l2_event_unsubscribe_all(fh); 93 v4l2_event_unsubscribe_all(fh);
94 mutex_destroy(&fh->subscribe_lock);
93 fh->vdev = NULL; 95 fh->vdev = NULL;
94} 96}
95EXPORT_SYMBOL_GPL(v4l2_fh_exit); 97EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 31112f622b88..475e5b3790ed 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
411 if (ret < 0) 411 if (ret < 0)
412 goto error; 412 goto error;
413 } 413 }
414 } else { 414 } else if (pdata) {
415 for (i = 0; i < pdata->num_sub_devices; i++) { 415 for (i = 0; i < pdata->num_sub_devices; i++) {
416 pdata->sub_devices[i].dev.parent = dev; 416 pdata->sub_devices[i].dev.parent = dev;
417 ret = platform_device_register(&pdata->sub_devices[i]); 417 ret = platform_device_register(&pdata->sub_devices[i]);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e11ab12fbdf2..800986a79704 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
528} 528}
529 529
530static const struct of_device_id usbhs_child_match_table[] = { 530static const struct of_device_id usbhs_child_match_table[] = {
531 { .compatible = "ti,omap-ehci", }, 531 { .compatible = "ti,ehci-omap", },
532 { .compatible = "ti,omap-ohci", }, 532 { .compatible = "ti,ohci-omap3", },
533 { } 533 { }
534}; 534};
535 535
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
855 .pm = &usbhsomap_dev_pm_ops, 855 .pm = &usbhsomap_dev_pm_ops,
856 .of_match_table = usbhs_omap_dt_ids, 856 .of_match_table = usbhs_omap_dt_ids,
857 }, 857 },
858 .probe = usbhs_omap_probe,
858 .remove = usbhs_omap_remove, 859 .remove = usbhs_omap_remove,
859}; 860};
860 861
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
864MODULE_LICENSE("GPL v2"); 865MODULE_LICENSE("GPL v2");
865MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); 866MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
866 867
867static int __init omap_usbhs_drvinit(void) 868static int omap_usbhs_drvinit(void)
868{ 869{
869 return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); 870 return platform_driver_register(&usbhs_omap_driver);
870} 871}
871 872
872/* 873/*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
878 */ 879 */
879fs_initcall_sync(omap_usbhs_drvinit); 880fs_initcall_sync(omap_usbhs_drvinit);
880 881
881static void __exit omap_usbhs_drvexit(void) 882static void omap_usbhs_drvexit(void)
882{ 883{
883 platform_driver_unregister(&usbhs_omap_driver); 884 platform_driver_unregister(&usbhs_omap_driver);
884} 885}
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index eeb7eef62174..38f90e179927 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/sysfs.h> 29#include <linux/sysfs.h>
30#include <linux/nospec.h>
30 31
31static DEFINE_MUTEX(compass_mutex); 32static DEFINE_MUTEX(compass_mutex);
32 33
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
50 return ret; 51 return ret;
51 if (val >= strlen(map)) 52 if (val >= strlen(map))
52 return -EINVAL; 53 return -EINVAL;
54 val = array_index_nospec(val, strlen(map));
53 mutex_lock(&compass_mutex); 55 mutex_lock(&compass_mutex);
54 ret = compass_command(c, map[val]); 56 ret = compass_command(c, map[val]);
55 mutex_unlock(&compass_mutex); 57 mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 8f82bb9d11e2..b8aaa684c397 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2131 retrc = plpar_hcall_norets(H_REG_CRQ, 2131 retrc = plpar_hcall_norets(H_REG_CRQ,
2132 vdev->unit_address, 2132 vdev->unit_address,
2133 queue->msg_token, PAGE_SIZE); 2133 queue->msg_token, PAGE_SIZE);
2134 retrc = rc; 2134 rc = retrc;
2135 2135
2136 if (rc == H_RESOURCE) 2136 if (rc == H_RESOURCE)
2137 rc = ibmvmc_reset_crq_queue(adapter); 2137 rc = ibmvmc_reset_crq_queue(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 7bba62a72921..fc3872fe7b25 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
521 521
522 cl = cldev->cl; 522 cl = cldev->cl;
523 523
524 mutex_lock(&bus->device_lock);
524 if (cl->state == MEI_FILE_UNINITIALIZED) { 525 if (cl->state == MEI_FILE_UNINITIALIZED) {
525 mutex_lock(&bus->device_lock);
526 ret = mei_cl_link(cl); 526 ret = mei_cl_link(cl);
527 mutex_unlock(&bus->device_lock);
528 if (ret) 527 if (ret)
529 return ret; 528 goto out;
530 /* update pointers */ 529 /* update pointers */
531 cl->cldev = cldev; 530 cl->cldev = cldev;
532 } 531 }
533 532
534 mutex_lock(&bus->device_lock);
535 if (mei_cl_is_connected(cl)) { 533 if (mei_cl_is_connected(cl)) {
536 ret = 0; 534 ret = 0;
537 goto out; 535 goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
616 if (err < 0) 614 if (err < 0)
617 dev_err(bus->dev, "Could not disconnect from the ME client\n"); 615 dev_err(bus->dev, "Could not disconnect from the ME client\n");
618 616
619out:
620 mei_cl_bus_module_put(cldev); 617 mei_cl_bus_module_put(cldev);
621 618out:
622 /* Flush queues and remove any pending read */ 619 /* Flush queues and remove any pending read */
623 mei_cl_flush_queues(cl, NULL); 620 mei_cl_flush_queues(cl, NULL);
624 mei_cl_unlink(cl); 621 mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
876 873
877 mei_me_cl_put(cldev->me_cl); 874 mei_me_cl_put(cldev->me_cl);
878 mei_dev_bus_put(cldev->bus); 875 mei_dev_bus_put(cldev->bus);
876 mei_cl_unlink(cldev->cl);
879 kfree(cldev->cl); 877 kfree(cldev->cl);
880 kfree(cldev); 878 kfree(cldev);
881} 879}
882 880
883static const struct device_type mei_cl_device_type = { 881static const struct device_type mei_cl_device_type = {
884 .release = mei_cl_bus_dev_release, 882 .release = mei_cl_bus_dev_release,
885}; 883};
886 884
887/** 885/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 4ab6251d418e..ebdcf0b450e2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1767,7 +1767,7 @@ out:
1767 } 1767 }
1768 } 1768 }
1769 1769
1770 rets = buf->size; 1770 rets = len;
1771err: 1771err:
1772 cl_dbg(dev, cl, "rpm: autosuspend\n"); 1772 cl_dbg(dev, cl, "rpm: autosuspend\n");
1773 pm_runtime_mark_last_busy(dev->dev); 1773 pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 09e233d4c0de..e56f3e72d57a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1161 1161
1162 props_res = (struct hbm_props_response *)mei_msg; 1162 props_res = (struct hbm_props_response *)mei_msg;
1163 1163
1164 if (props_res->status) { 1164 if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
1165 dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
1166 props_res->me_addr);
1167 } else if (props_res->status) {
1165 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n", 1168 dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
1166 props_res->status, 1169 props_res->status,
1167 mei_hbm_status_str(props_res->status)); 1170 mei_hbm_status_str(props_res->status));
1168 return -EPROTO; 1171 return -EPROTO;
1172 } else {
1173 mei_hbm_me_cl_add(dev, props_res);
1169 } 1174 }
1170 1175
1171 mei_hbm_me_cl_add(dev, props_res);
1172
1173 /* request property for the next client */ 1176 /* request property for the next client */
1174 if (mei_hbm_prop_req(dev, props_res->me_addr + 1)) 1177 if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
1175 return -EIO; 1178 return -EIO;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abf9e884386c..f57f5de54206 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, true,
238 cd_debounce_delay_ms, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
241 dev_info(host->parent, "Got CD GPIO\n"); 241 dev_info(host->parent, "Got CD GPIO\n");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 648eb6743ed5..6edffeed9953 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
238 mmc_exit_request(mq->queue, req); 238 mmc_exit_request(mq->queue, req);
239} 239}
240 240
241/*
242 * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
243 * will not be dispatched in parallel.
244 */
245static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 241static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
246 const struct blk_mq_queue_data *bd) 242 const struct blk_mq_queue_data *bd)
247{ 243{
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
264 260
265 spin_lock_irq(q->queue_lock); 261 spin_lock_irq(q->queue_lock);
266 262
267 if (mq->recovery_needed) { 263 if (mq->recovery_needed || mq->busy) {
268 spin_unlock_irq(q->queue_lock); 264 spin_unlock_irq(q->queue_lock);
269 return BLK_STS_RESOURCE; 265 return BLK_STS_RESOURCE;
270 } 266 }
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
291 break; 287 break;
292 } 288 }
293 289
290 /* Parallel dispatch of requests is not supported at the moment */
291 mq->busy = true;
292
294 mq->in_flight[issue_type] += 1; 293 mq->in_flight[issue_type] += 1;
295 get_card = (mmc_tot_in_flight(mq) == 1); 294 get_card = (mmc_tot_in_flight(mq) == 1);
296 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); 295 cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
333 mq->in_flight[issue_type] -= 1; 332 mq->in_flight[issue_type] -= 1;
334 if (mmc_tot_in_flight(mq) == 0) 333 if (mmc_tot_in_flight(mq) == 0)
335 put_card = true; 334 put_card = true;
335 mq->busy = false;
336 spin_unlock_irq(q->queue_lock); 336 spin_unlock_irq(q->queue_lock);
337 if (put_card) 337 if (put_card)
338 mmc_put_card(card, &mq->ctx); 338 mmc_put_card(card, &mq->ctx);
339 } else {
340 WRITE_ONCE(mq->busy, false);
339 } 341 }
340 342
341 return ret; 343 return ret;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 17e59d50b496..9bf3c9245075 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -81,6 +81,7 @@ struct mmc_queue {
81 unsigned int cqe_busy; 81 unsigned int cqe_busy;
82#define MMC_CQE_DCMD_BUSY BIT(0) 82#define MMC_CQE_DCMD_BUSY BIT(0)
83#define MMC_CQE_QUEUE_FULL BIT(1) 83#define MMC_CQE_QUEUE_FULL BIT(1)
84 bool busy;
84 bool use_cqe; 85 bool use_cqe;
85 bool recovery_needed; 86 bool recovery_needed;
86 bool in_recovery; 87 bool in_recovery;
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a833686784b..86803a3a04dc 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
271 if (debounce) { 271 if (debounce) {
272 ret = gpiod_set_debounce(desc, debounce); 272 ret = gpiod_set_debounce(desc, debounce);
273 if (ret < 0) 273 if (ret < 0)
274 ctx->cd_debounce_delay_ms = debounce; 274 ctx->cd_debounce_delay_ms = debounce / 1000;
275 } 275 }
276 276
277 if (gpio_invert) 277 if (gpio_invert)
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 294de177632c..61e4e2a213c9 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
217 * We don't really have DMA, so we need 217 * We don't really have DMA, so we need
218 * to copy from our platform driver buffer 218 * to copy from our platform driver buffer
219 */ 219 */
220 sg_copy_to_buffer(data->sg, 1, host->virt_base, 220 sg_copy_from_buffer(data->sg, 1, host->virt_base,
221 data->sg->length); 221 data->sg->length);
222 } 222 }
223 host->data->bytes_xfered += data->sg->length; 223 host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
393 * We don't really have DMA, so we need to copy to our 393 * We don't really have DMA, so we need to copy to our
394 * platform driver buffer 394 * platform driver buffer
395 */ 395 */
396 sg_copy_from_buffer(data->sg, 1, host->virt_base, 396 sg_copy_to_buffer(data->sg, 1, host->virt_base,
397 data->sg->length); 397 data->sg->length);
398 } 398 }
399} 399}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 5aa2c9404e92..be53044086c7 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1976 do { 1976 do {
1977 value = atmci_readl(host, ATMCI_RDR); 1977 value = atmci_readl(host, ATMCI_RDR);
1978 if (likely(offset + 4 <= sg->length)) { 1978 if (likely(offset + 4 <= sg->length)) {
1979 sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset); 1979 sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
1980 1980
1981 offset += 4; 1981 offset += 4;
1982 nbytes += 4; 1982 nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1993 } else { 1993 } else {
1994 unsigned int remaining = sg->length - offset; 1994 unsigned int remaining = sg->length - offset;
1995 1995
1996 sg_pcopy_to_buffer(sg, 1, &value, remaining, offset); 1996 sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
1997 nbytes += remaining; 1997 nbytes += remaining;
1998 1998
1999 flush_dcache_page(sg_page(sg)); 1999 flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
2003 goto done; 2003 goto done;
2004 2004
2005 offset = 4 - remaining; 2005 offset = 4 - remaining;
2006 sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining, 2006 sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
2007 offset, 0); 2007 offset, 0);
2008 nbytes += offset; 2008 nbytes += offset;
2009 } 2009 }
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2042 2042
2043 do { 2043 do {
2044 if (likely(offset + 4 <= sg->length)) { 2044 if (likely(offset + 4 <= sg->length)) {
2045 sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset); 2045 sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
2046 atmci_writel(host, ATMCI_TDR, value); 2046 atmci_writel(host, ATMCI_TDR, value);
2047 2047
2048 offset += 4; 2048 offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2059 unsigned int remaining = sg->length - offset; 2059 unsigned int remaining = sg->length - offset;
2060 2060
2061 value = 0; 2061 value = 0;
2062 sg_pcopy_from_buffer(sg, 1, &value, remaining, offset); 2062 sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
2063 nbytes += remaining; 2063 nbytes += remaining;
2064 2064
2065 host->sg = sg = sg_next(sg); 2065 host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
2070 } 2070 }
2071 2071
2072 offset = 4 - remaining; 2072 offset = 4 - remaining;
2073 sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining, 2073 sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
2074 offset, 0); 2074 offset, 0);
2075 atmci_writel(host, ATMCI_TDR, value); 2075 atmci_writel(host, ATMCI_TDR, value);
2076 nbytes += offset; 2076 nbytes += offset;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 09cb89645d06..2cfec33178c1 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) 517static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
518{ 518{
519 struct device_node *slot_node; 519 struct device_node *slot_node;
520 struct platform_device *pdev;
520 521
521 /* 522 /*
522 * TODO: the MMC core framework currently does not support 523 * TODO: the MMC core framework currently does not support
523 * controllers with multiple slots properly. So we only register 524 * controllers with multiple slots properly. So we only register
524 * the first slot for now 525 * the first slot for now
525 */ 526 */
526 slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); 527 slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
527 if (!slot_node) { 528 if (!slot_node) {
528 dev_warn(parent, "no 'mmc-slot' sub-node found\n"); 529 dev_warn(parent, "no 'mmc-slot' sub-node found\n");
529 return ERR_PTR(-ENOENT); 530 return ERR_PTR(-ENOENT);
530 } 531 }
531 532
532 return of_platform_device_create(slot_node, NULL, parent); 533 pdev = of_platform_device_create(slot_node, NULL, parent);
534 of_node_put(slot_node);
535
536 return pdev;
533} 537}
534 538
535static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) 539static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 071693ebfe18..68760d4a5d3d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2177 dma_release_channel(host->tx_chan); 2177 dma_release_channel(host->tx_chan);
2178 dma_release_channel(host->rx_chan); 2178 dma_release_channel(host->rx_chan);
2179 2179
2180 dev_pm_clear_wake_irq(host->dev);
2180 pm_runtime_dont_use_autosuspend(host->dev); 2181 pm_runtime_dont_use_autosuspend(host->dev);
2181 pm_runtime_put_sync(host->dev); 2182 pm_runtime_put_sync(host->dev);
2182 pm_runtime_disable(host->dev); 2183 pm_runtime_disable(host->dev);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 35cc0de6be67..ca0b43973769 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -45,14 +45,16 @@
45/* DM_CM_RST */ 45/* DM_CM_RST */
46#define RST_DTRANRST1 BIT(9) 46#define RST_DTRANRST1 BIT(9)
47#define RST_DTRANRST0 BIT(8) 47#define RST_DTRANRST0 BIT(8)
48#define RST_RESERVED_BITS GENMASK_ULL(32, 0) 48#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
49 49
50/* DM_CM_INFO1 and DM_CM_INFO1_MASK */ 50/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
51#define INFO1_CLEAR 0 51#define INFO1_CLEAR 0
52#define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
52#define INFO1_DTRANEND1 BIT(17) 53#define INFO1_DTRANEND1 BIT(17)
53#define INFO1_DTRANEND0 BIT(16) 54#define INFO1_DTRANEND0 BIT(16)
54 55
55/* DM_CM_INFO2 and DM_CM_INFO2_MASK */ 56/* DM_CM_INFO2 and DM_CM_INFO2_MASK */
57#define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
56#define INFO2_DTRANERR1 BIT(17) 58#define INFO2_DTRANERR1 BIT(17)
57#define INFO2_DTRANERR0 BIT(16) 59#define INFO2_DTRANERR0 BIT(16)
58 60
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
252{ 254{
253 struct renesas_sdhi *priv = host_to_priv(host); 255 struct renesas_sdhi *priv = host_to_priv(host);
254 256
257 /* Disable DMAC interrupts, we don't use them */
258 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
259 INFO1_MASK_CLEAR);
260 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
261 INFO2_MASK_CLEAR);
262
255 /* Each value is set to non-zero to assume "enabling" each DMA */ 263 /* Each value is set to non-zero to assume "enabling" each DMA */
256 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf; 264 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
257 265
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 890f192dedbd..5389c4821882 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
498 498
499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
500{ 500{
501 if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && 501 if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
502 of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
502 !soc_device_match(gen3_soc_whitelist)) 503 !soc_device_match(gen3_soc_whitelist))
503 return -ENODEV; 504 return -ENODEV;
504 505
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..270d3c9580c5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
39 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), 39 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
40 SPI_MEM_OP_NO_ADDR, 40 SPI_MEM_OP_NO_ADDR,
41 SPI_MEM_OP_NO_DUMMY, 41 SPI_MEM_OP_NO_DUMMY,
42 SPI_MEM_OP_DATA_IN(len, val, 1)); 42 SPI_MEM_OP_DATA_IN(len, NULL, 1));
43 void *scratchbuf;
43 int ret; 44 int ret;
44 45
46 scratchbuf = kmalloc(len, GFP_KERNEL);
47 if (!scratchbuf)
48 return -ENOMEM;
49
50 op.data.buf.in = scratchbuf;
45 ret = spi_mem_exec_op(flash->spimem, &op); 51 ret = spi_mem_exec_op(flash->spimem, &op);
46 if (ret < 0) 52 if (ret < 0)
47 dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, 53 dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
48 code); 54 code);
55 else
56 memcpy(val, scratchbuf, len);
57
58 kfree(scratchbuf);
49 59
50 return ret; 60 return ret;
51} 61}
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
56 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), 66 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
57 SPI_MEM_OP_NO_ADDR, 67 SPI_MEM_OP_NO_ADDR,
58 SPI_MEM_OP_NO_DUMMY, 68 SPI_MEM_OP_NO_DUMMY,
59 SPI_MEM_OP_DATA_OUT(len, buf, 1)); 69 SPI_MEM_OP_DATA_OUT(len, NULL, 1));
70 void *scratchbuf;
71 int ret;
60 72
61 return spi_mem_exec_op(flash->spimem, &op); 73 scratchbuf = kmemdup(buf, len, GFP_KERNEL);
74 if (!scratchbuf)
75 return -ENOMEM;
76
77 op.data.buf.out = scratchbuf;
78 ret = spi_mem_exec_op(flash->spimem, &op);
79 kfree(scratchbuf);
80
81 return ret;
62} 82}
63 83
64static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, 84static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
873 int ret, err = 0; 873 int ret, err = 0;
874 874
875 np = mtd_get_of_node(master); 875 np = mtd_get_of_node(master);
876 if (!mtd_is_partition(master)) 876 if (mtd_is_partition(master))
877 of_node_get(np);
878 else
877 np = of_get_child_by_name(np, "partitions"); 879 np = of_get_child_by_name(np, "partitions");
880
878 of_property_for_each_string(np, "compatible", prop, compat) { 881 of_property_for_each_string(np, "compatible", prop, compat) {
879 parser = mtd_part_get_compatible_parser(compat); 882 parser = mtd_part_get_compatible_parser(compat);
880 if (!parser) 883 if (!parser)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index ca18612c4201..b864b93dd289 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
596 } 596 }
597 597
598 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 598 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
599 /*
600 * The ->setup_dma() hook kicks DMA by using the data/command
601 * interface, which belongs to a different AXI port from the
602 * register interface. Read back the register to avoid a race.
603 */
604 ioread32(denali->reg + DMA_ENABLE);
599 605
600 denali_reset_irq(denali); 606 denali_reset_irq(denali);
601 denali->setup_dma(denali, dma_addr, page, write); 607 denali->setup_dma(denali, dma_addr, page, write);
@@ -1338,6 +1344,11 @@ int denali_init(struct denali_nand_info *denali)
1338 1344
1339 denali_enable_irq(denali); 1345 denali_enable_irq(denali);
1340 denali_reset_banks(denali); 1346 denali_reset_banks(denali);
1347 if (!denali->max_banks) {
1348 /* Error out earlier if no chip is found for some reasons. */
1349 ret = -ENODEV;
1350 goto disable_irq;
1351 }
1341 1352
1342 denali->active_bank = DENALI_INVALID_BANK; 1353 denali->active_bank = DENALI_INVALID_BANK;
1343 1354
diff --git a/drivers/mtd/nand/raw/docg4.c b/drivers/mtd/nand/raw/docg4.c
index a3f04315c05c..427fcbc1b71c 100644
--- a/drivers/mtd/nand/raw/docg4.c
+++ b/drivers/mtd/nand/raw/docg4.c
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
1218 return 0; 1218 return 0;
1219} 1219}
1220 1220
1221static void __init init_mtd_structs(struct mtd_info *mtd) 1221static void init_mtd_structs(struct mtd_info *mtd)
1222{ 1222{
1223 /* initialize mtd and nand data structures */ 1223 /* initialize mtd and nand data structures */
1224 1224
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
1290 1290
1291} 1291}
1292 1292
1293static int __init read_id_reg(struct mtd_info *mtd) 1293static int read_id_reg(struct mtd_info *mtd)
1294{ 1294{
1295 struct nand_chip *nand = mtd_to_nand(mtd); 1295 struct nand_chip *nand = mtd_to_nand(mtd);
1296 struct docg4_priv *doc = nand_get_controller_data(nand); 1296 struct docg4_priv *doc = nand_get_controller_data(nand);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..bc2ef5209783 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1547 for (op_id = 0; op_id < subop->ninstrs; op_id++) { 1547 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1548 unsigned int offset, naddrs; 1548 unsigned int offset, naddrs;
1549 const u8 *addrs; 1549 const u8 *addrs;
1550 int len = nand_subop_get_data_len(subop, op_id); 1550 int len;
1551 1551
1552 instr = &subop->instrs[op_id]; 1552 instr = &subop->instrs[op_id];
1553 1553
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1593 nfc_op->ndcb[0] |= 1593 nfc_op->ndcb[0] |=
1594 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1594 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1595 NDCB0_LEN_OVRD; 1595 NDCB0_LEN_OVRD;
1596 len = nand_subop_get_data_len(subop, op_id);
1596 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1597 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1597 } 1598 }
1598 nfc_op->data_delay_ns = instr->delay_ns; 1599 nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1606 nfc_op->ndcb[0] |= 1607 nfc_op->ndcb[0] |=
1607 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1608 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1608 NDCB0_LEN_OVRD; 1609 NDCB0_LEN_OVRD;
1610 len = nand_subop_get_data_len(subop, op_id);
1609 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1611 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1610 } 1612 }
1611 nfc_op->data_delay_ns = instr->delay_ns; 1613 nfc_op->data_delay_ns = instr->delay_ns;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
283 case SIOCFINDIPDDPRT: 283 case SIOCFINDIPDDPRT:
284 spin_lock_bh(&ipddp_route_lock); 284 spin_lock_bh(&ipddp_route_lock);
285 rp = __ipddp_find_route(&rcp); 285 rp = __ipddp_find_route(&rcp);
286 if (rp) 286 if (rp) {
287 memcpy(&rcp2, rp, sizeof(rcp2)); 287 memset(&rcp2, 0, sizeof(rcp2));
288 rcp2.ip = rp->ip;
289 rcp2.at = rp->at;
290 rcp2.flags = rp->flags;
291 }
288 spin_unlock_bh(&ipddp_route_lock); 292 spin_unlock_bh(&ipddp_route_lock);
289 293
290 if (rp) { 294 if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a764a83f99da..ee28ec9e0aba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
210static void bond_slave_arr_handler(struct work_struct *work); 210static void bond_slave_arr_handler(struct work_struct *work);
211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, 211static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
212 int mod); 212 int mod);
213static void bond_netdev_notify_work(struct work_struct *work);
213 214
214/*---------------------------- General routines -----------------------------*/ 215/*---------------------------- General routines -----------------------------*/
215 216
@@ -971,16 +972,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
971 struct slave *slave = NULL; 972 struct slave *slave = NULL;
972 struct list_head *iter; 973 struct list_head *iter;
973 struct ad_info ad_info; 974 struct ad_info ad_info;
974 struct netpoll_info *ni;
975 const struct net_device_ops *ops;
976 975
977 if (BOND_MODE(bond) == BOND_MODE_8023AD) 976 if (BOND_MODE(bond) == BOND_MODE_8023AD)
978 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 977 if (bond_3ad_get_active_agg_info(bond, &ad_info))
979 return; 978 return;
980 979
981 bond_for_each_slave_rcu(bond, slave, iter) { 980 bond_for_each_slave_rcu(bond, slave, iter) {
982 ops = slave->dev->netdev_ops; 981 if (!bond_slave_is_up(slave))
983 if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
984 continue; 982 continue;
985 983
986 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 984 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +990,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
992 continue; 990 continue;
993 } 991 }
994 992
995 ni = rcu_dereference_bh(slave->dev->npinfo); 993 netpoll_poll_dev(slave->dev);
996 if (down_trylock(&ni->dev_lock))
997 continue;
998 ops->ndo_poll_controller(slave->dev);
999 up(&ni->dev_lock);
1000 } 994 }
1001} 995}
1002 996
@@ -1177,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1177 } 1171 }
1178 } 1172 }
1179 1173
1180 /* don't change skb->dev for link-local packets */ 1174 /* Link-local multicast packets should be passed to the
1181 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) 1175 * stack on the link they arrive as well as pass them to the
1176 * bond-master device. These packets are mostly usable when
1177 * stack receives it with the link on which they arrive
1178 * (e.g. LLDP) they also must be available on master. Some of
1179 * the use cases include (but are not limited to): LLDP agents
1180 * that must be able to operate both on enslaved interfaces as
1181 * well as on bonds themselves; linux bridges that must be able
1182 * to process/pass BPDUs from attached bonds when any kind of
1183 * STP version is enabled on the network.
1184 */
1185 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
1186 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1187
1188 if (nskb) {
1189 nskb->dev = bond->dev;
1190 nskb->queue_mapping = 0;
1191 netif_rx(nskb);
1192 }
1182 return RX_HANDLER_PASS; 1193 return RX_HANDLER_PASS;
1194 }
1183 if (bond_should_deliver_exact_match(skb, slave, bond)) 1195 if (bond_should_deliver_exact_match(skb, slave, bond))
1184 return RX_HANDLER_EXACT; 1196 return RX_HANDLER_EXACT;
1185 1197
@@ -1276,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
1276 return NULL; 1288 return NULL;
1277 } 1289 }
1278 } 1290 }
1291 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1292
1279 return slave; 1293 return slave;
1280} 1294}
1281 1295
@@ -1283,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
1283{ 1297{
1284 struct bonding *bond = bond_get_bond_by_slave(slave); 1298 struct bonding *bond = bond_get_bond_by_slave(slave);
1285 1299
1300 cancel_delayed_work_sync(&slave->notify_work);
1286 if (BOND_MODE(bond) == BOND_MODE_8023AD) 1301 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1287 kfree(SLAVE_AD_INFO(slave)); 1302 kfree(SLAVE_AD_INFO(slave));
1288 1303
@@ -1304,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1304 info->link_failure_count = slave->link_failure_count; 1319 info->link_failure_count = slave->link_failure_count;
1305} 1320}
1306 1321
1307static void bond_netdev_notify(struct net_device *dev,
1308 struct netdev_bonding_info *info)
1309{
1310 rtnl_lock();
1311 netdev_bonding_info_change(dev, info);
1312 rtnl_unlock();
1313}
1314
1315static void bond_netdev_notify_work(struct work_struct *_work) 1322static void bond_netdev_notify_work(struct work_struct *_work)
1316{ 1323{
1317 struct netdev_notify_work *w = 1324 struct slave *slave = container_of(_work, struct slave,
1318 container_of(_work, struct netdev_notify_work, work.work); 1325 notify_work.work);
1326
1327 if (rtnl_trylock()) {
1328 struct netdev_bonding_info binfo;
1319 1329
1320 bond_netdev_notify(w->dev, &w->bonding_info); 1330 bond_fill_ifslave(slave, &binfo.slave);
1321 dev_put(w->dev); 1331 bond_fill_ifbond(slave->bond, &binfo.master);
1322 kfree(w); 1332 netdev_bonding_info_change(slave->dev, &binfo);
1333 rtnl_unlock();
1334 } else {
1335 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1336 }
1323} 1337}
1324 1338
1325void bond_queue_slave_event(struct slave *slave) 1339void bond_queue_slave_event(struct slave *slave)
1326{ 1340{
1327 struct bonding *bond = slave->bond; 1341 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1328 struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
1329
1330 if (!nnw)
1331 return;
1332
1333 dev_hold(slave->dev);
1334 nnw->dev = slave->dev;
1335 bond_fill_ifslave(slave, &nnw->bonding_info.slave);
1336 bond_fill_ifbond(bond, &nnw->bonding_info.master);
1337 INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1338
1339 queue_delayed_work(slave->bond->wq, &nnw->work, 0);
1340} 1342}
1341 1343
1342void bond_lower_state_changed(struct slave *slave) 1344void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c791c1da4b9..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) 129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) 130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) 131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) 132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
133 133
134/* Offset 0x0C: ATU Data Register */ 134/* Offset 0x0C: ATU Data Register */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
349 chip->ports[entry.portvec].atu_member_violation++; 349 chip->ports[entry.portvec].atu_member_violation++;
350 } 350 }
351 351
352 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 353 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 354 "ATU miss violation for %pM portvec %x\n",
355 entry.mac, entry.portvec); 355 entry.mac, entry.portvec);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 17f12c18d225..7635c38e77dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
459 cqe = &admin_queue->cq.entries[head_masked]; 459 cqe = &admin_queue->cq.entries[head_masked];
460 460
461 /* Go over all the completions */ 461 /* Go over all the completions */
462 while ((cqe->acq_common_descriptor.flags & 462 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { 463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
464 /* Do not read the rest of the completion entry before the 464 /* Do not read the rest of the completion entry before the
465 * phase bit was validated 465 * phase bit was validated
466 */ 466 */
467 rmb(); 467 dma_rmb();
468 ena_com_handle_single_admin_completion(admin_queue, cqe); 468 ena_com_handle_single_admin_completion(admin_queue, cqe);
469 469
470 head_masked++; 470 head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
627 mmio_read_reg |= mmio_read->seq_num & 627 mmio_read_reg |= mmio_read->seq_num &
628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; 628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
629 629
630 /* make sure read_resp->req_id get updated before the hw can write 630 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
631 * there
632 */
633 wmb();
634
635 writel_relaxed(mmio_read_reg,
636 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
637 631
638 mmiowb();
639 for (i = 0; i < timeout; i++) { 632 for (i = 0; i < timeout; i++) {
640 if (read_resp->req_id == mmio_read->seq_num) 633 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
641 break; 634 break;
642 635
643 udelay(1); 636 udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1796 aenq_common = &aenq_e->aenq_common_desc; 1789 aenq_common = &aenq_e->aenq_common_desc;
1797 1790
1798 /* Go over all the events */ 1791 /* Go over all the events */
1799 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == 1792 while ((READ_ONCE(aenq_common->flags) &
1800 phase) { 1793 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1794 /* Make sure the phase bit (ownership) is as expected before
1795 * reading the rest of the descriptor.
1796 */
1797 dma_rmb();
1798
1801 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", 1799 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1802 aenq_common->group, aenq_common->syndrom, 1800 aenq_common->group, aenq_common->syndrom,
1803 (u64)aenq_common->timestamp_low + 1801 (u64)aenq_common->timestamp_low +
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ea149c134e15..1c682b76190f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
51 if (desc_phase != expected_phase) 51 if (desc_phase != expected_phase)
52 return NULL; 52 return NULL;
53 53
54 /* Make sure we read the rest of the descriptor after the phase bit
55 * has been read
56 */
57 dma_rmb();
58
54 return cdesc; 59 return cdesc;
55} 60}
56 61
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
493 if (cdesc_phase != expected_phase) 498 if (cdesc_phase != expected_phase)
494 return -EAGAIN; 499 return -EAGAIN;
495 500
501 dma_rmb();
496 if (unlikely(cdesc->req_id >= io_cq->q_depth)) { 502 if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
497 pr_err("Invalid req id %d\n", cdesc->req_id); 503 pr_err("Invalid req id %d\n", cdesc->req_id);
498 return -EINVAL; 504 return -EINVAL;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 6fdc753d9483..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
107 return io_sq->q_depth - 1 - cnt; 107 return io_sq->q_depth - 1 - cnt;
108} 108}
109 109
110static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, 110static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
111 bool relaxed)
112{ 111{
113 u16 tail; 112 u16 tail;
114 113
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
117 pr_debug("write submission queue doorbell for queue: %d tail: %d\n", 116 pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
118 io_sq->qid, tail); 117 io_sq->qid, tail);
119 118
120 if (relaxed) 119 writel(tail, io_sq->db_addr);
121 writel_relaxed(tail, io_sq->db_addr);
122 else
123 writel(tail, io_sq->db_addr);
124 120
125 return 0; 121 return 0;
126} 122}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c673ac2df65b..25621a218f20 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 76
77static int ena_rss_init_default(struct ena_adapter *adapter); 77static int ena_rss_init_default(struct ena_adapter *adapter);
78static void check_for_admin_com_state(struct ena_adapter *adapter); 78static void check_for_admin_com_state(struct ena_adapter *adapter);
79static void ena_destroy_device(struct ena_adapter *adapter); 79static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
80static int ena_restore_device(struct ena_adapter *adapter); 80static int ena_restore_device(struct ena_adapter *adapter);
81 81
82static void ena_tx_timeout(struct net_device *dev) 82static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
461 return -ENOMEM; 461 return -ENOMEM;
462 } 462 }
463 463
464 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, 464 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
465 DMA_FROM_DEVICE); 465 DMA_FROM_DEVICE);
466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { 466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467 u64_stats_update_begin(&rx_ring->syncp); 467 u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
478 rx_info->page_offset = 0; 478 rx_info->page_offset = 0;
479 ena_buf = &rx_info->ena_buf; 479 ena_buf = &rx_info->ena_buf;
480 ena_buf->paddr = dma; 480 ena_buf->paddr = dma;
481 ena_buf->len = PAGE_SIZE; 481 ena_buf->len = ENA_PAGE_SIZE;
482 482
483 return 0; 483 return 0;
484} 484}
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
495 return; 495 return;
496 } 496 }
497 497
498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, 498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
499 DMA_FROM_DEVICE); 499 DMA_FROM_DEVICE);
500 500
501 __free_page(page); 501 __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
551 rx_ring->qid, i, num); 551 rx_ring->qid, i, num);
552 } 552 }
553 553
554 if (likely(i)) { 554 /* ena_com_write_sq_doorbell issues a wmb() */
555 /* Add memory barrier to make sure the desc were written before 555 if (likely(i))
556 * issue a doorbell 556 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
557 */
558 wmb();
559 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
560 mmiowb();
561 }
562 557
563 rx_ring->next_to_use = next_to_use; 558 rx_ring->next_to_use = next_to_use;
564 559
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
916 do { 911 do {
917 dma_unmap_page(rx_ring->dev, 912 dma_unmap_page(rx_ring->dev,
918 dma_unmap_addr(&rx_info->ena_buf, paddr), 913 dma_unmap_addr(&rx_info->ena_buf, paddr),
919 PAGE_SIZE, DMA_FROM_DEVICE); 914 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
920 915
921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, 916 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922 rx_info->page_offset, len, PAGE_SIZE); 917 rx_info->page_offset, len, ENA_PAGE_SIZE);
923 918
924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, 919 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925 "rx skb updated. len %d. data_len %d\n", 920 "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
1900 "Destroy failure, restarting device\n"); 1895 "Destroy failure, restarting device\n");
1901 ena_dump_stats_to_dmesg(adapter); 1896 ena_dump_stats_to_dmesg(adapter);
1902 /* rtnl lock already obtained in dev_ioctl() layer */ 1897 /* rtnl lock already obtained in dev_ioctl() layer */
1903 ena_destroy_device(adapter); 1898 ena_destroy_device(adapter, false);
1904 ena_restore_device(adapter); 1899 ena_restore_device(adapter);
1905 } 1900 }
1906 1901
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2107 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113 tx_ring->ring_size); 2108 tx_ring->ring_size);
2114 2109
2115 /* This WMB is aimed to:
2116 * 1 - perform smp barrier before reading next_to_completion
2117 * 2 - make sure the desc were written before trigger DB
2118 */
2119 wmb();
2120
2121 /* stop the queue when no more space available, the packet can have up 2110 /* stop the queue when no more space available, the packet can have up
2122 * to sgl_size + 2. one for the meta descriptor and one for header 2111 * to sgl_size + 2. one for the meta descriptor and one for header
2123 * (if the header is larger than tx_max_header_size). 2112 * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2136 * stop the queue but meanwhile clean_tx_irq updates 2125 * stop the queue but meanwhile clean_tx_irq updates
2137 * next_to_completion and terminates. 2126 * next_to_completion and terminates.
2138 * The queue will remain stopped forever. 2127 * The queue will remain stopped forever.
2139 * To solve this issue this function perform rmb, check 2128 * To solve this issue add a mb() to make sure that
2140 * the wakeup condition and wake up the queue if needed. 2129 * netif_tx_stop_queue() write is vissible before checking if
2130 * there is additional space in the queue.
2141 */ 2131 */
2142 smp_rmb(); 2132 smp_mb();
2143 2133
2144 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) 2134 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2145 > ENA_TX_WAKEUP_THRESH) { 2135 > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2151 } 2141 }
2152 2142
2153 if (netif_xmit_stopped(txq) || !skb->xmit_more) { 2143 if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154 /* trigger the dma engine */ 2144 /* trigger the dma engine. ena_com_write_sq_doorbell()
2155 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); 2145 * has a mb
2146 */
2147 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2156 u64_stats_update_begin(&tx_ring->syncp); 2148 u64_stats_update_begin(&tx_ring->syncp);
2157 tx_ring->tx_stats.doorbells++; 2149 tx_ring->tx_stats.doorbells++;
2158 u64_stats_update_end(&tx_ring->syncp); 2150 u64_stats_update_end(&tx_ring->syncp);
@@ -2193,25 +2185,6 @@ error_drop_packet:
2193 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2194} 2186}
2195 2187
2196#ifdef CONFIG_NET_POLL_CONTROLLER
2197static void ena_netpoll(struct net_device *netdev)
2198{
2199 struct ena_adapter *adapter = netdev_priv(netdev);
2200 int i;
2201
2202 /* Dont schedule NAPI if the driver is in the middle of reset
2203 * or netdev is down.
2204 */
2205
2206 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2207 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2208 return;
2209
2210 for (i = 0; i < adapter->num_queues; i++)
2211 napi_schedule(&adapter->ena_napi[i].napi);
2212}
2213#endif /* CONFIG_NET_POLL_CONTROLLER */
2214
2215static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, 2188static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2216 struct net_device *sb_dev, 2189 struct net_device *sb_dev,
2217 select_queue_fallback_t fallback) 2190 select_queue_fallback_t fallback)
@@ -2377,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
2377 .ndo_change_mtu = ena_change_mtu, 2350 .ndo_change_mtu = ena_change_mtu,
2378 .ndo_set_mac_address = NULL, 2351 .ndo_set_mac_address = NULL,
2379 .ndo_validate_addr = eth_validate_addr, 2352 .ndo_validate_addr = eth_validate_addr,
2380#ifdef CONFIG_NET_POLL_CONTROLLER
2381 .ndo_poll_controller = ena_netpoll,
2382#endif /* CONFIG_NET_POLL_CONTROLLER */
2383}; 2353};
2384 2354
2385static int ena_device_validate_params(struct ena_adapter *adapter, 2355static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2550,12 +2520,15 @@ err_disable_msix:
2550 return rc; 2520 return rc;
2551} 2521}
2552 2522
2553static void ena_destroy_device(struct ena_adapter *adapter) 2523static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2554{ 2524{
2555 struct net_device *netdev = adapter->netdev; 2525 struct net_device *netdev = adapter->netdev;
2556 struct ena_com_dev *ena_dev = adapter->ena_dev; 2526 struct ena_com_dev *ena_dev = adapter->ena_dev;
2557 bool dev_up; 2527 bool dev_up;
2558 2528
2529 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2530 return;
2531
2559 netif_carrier_off(netdev); 2532 netif_carrier_off(netdev);
2560 2533
2561 del_timer_sync(&adapter->timer_service); 2534 del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2536,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2563 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); 2536 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2564 adapter->dev_up_before_reset = dev_up; 2537 adapter->dev_up_before_reset = dev_up;
2565 2538
2566 ena_com_set_admin_running_state(ena_dev, false); 2539 if (!graceful)
2540 ena_com_set_admin_running_state(ena_dev, false);
2567 2541
2568 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2542 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2569 ena_down(adapter); 2543 ena_down(adapter);
@@ -2591,6 +2565,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
2591 adapter->reset_reason = ENA_REGS_RESET_NORMAL; 2565 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2592 2566
2593 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2567 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2568 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2594} 2569}
2595 2570
2596static int ena_restore_device(struct ena_adapter *adapter) 2571static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2610,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
2635 } 2610 }
2636 } 2611 }
2637 2612
2613 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2638 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2614 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2639 dev_err(&pdev->dev, "Device reset completed successfully\n"); 2615 dev_err(&pdev->dev, "Device reset completed successfully\n");
2640 2616
@@ -2665,7 +2641,7 @@ static void ena_fw_reset_device(struct work_struct *work)
2665 return; 2641 return;
2666 } 2642 }
2667 rtnl_lock(); 2643 rtnl_lock();
2668 ena_destroy_device(adapter); 2644 ena_destroy_device(adapter, false);
2669 ena_restore_device(adapter); 2645 ena_restore_device(adapter);
2670 rtnl_unlock(); 2646 rtnl_unlock();
2671} 2647}
@@ -3409,30 +3385,24 @@ static void ena_remove(struct pci_dev *pdev)
3409 netdev->rx_cpu_rmap = NULL; 3385 netdev->rx_cpu_rmap = NULL;
3410 } 3386 }
3411#endif /* CONFIG_RFS_ACCEL */ 3387#endif /* CONFIG_RFS_ACCEL */
3412
3413 unregister_netdev(netdev);
3414 del_timer_sync(&adapter->timer_service); 3388 del_timer_sync(&adapter->timer_service);
3415 3389
3416 cancel_work_sync(&adapter->reset_task); 3390 cancel_work_sync(&adapter->reset_task);
3417 3391
3418 /* Reset the device only if the device is running. */ 3392 unregister_netdev(netdev);
3419 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3420 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3421 3393
3422 ena_free_mgmnt_irq(adapter); 3394 /* If the device is running then we want to make sure the device will be
3395 * reset to make sure no more events will be issued by the device.
3396 */
3397 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3398 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3423 3399
3424 ena_disable_msix(adapter); 3400 rtnl_lock();
3401 ena_destroy_device(adapter, true);
3402 rtnl_unlock();
3425 3403
3426 free_netdev(netdev); 3404 free_netdev(netdev);
3427 3405
3428 ena_com_mmio_reg_read_request_destroy(ena_dev);
3429
3430 ena_com_abort_admin_commands(ena_dev);
3431
3432 ena_com_wait_for_abort_completion(ena_dev);
3433
3434 ena_com_admin_destroy(ena_dev);
3435
3436 ena_com_rss_destroy(ena_dev); 3406 ena_com_rss_destroy(ena_dev);
3437 3407
3438 ena_com_delete_debug_area(ena_dev); 3408 ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3437,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
3467 "ignoring device reset request as the device is being suspended\n"); 3437 "ignoring device reset request as the device is being suspended\n");
3468 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 3438 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3469 } 3439 }
3470 ena_destroy_device(adapter); 3440 ena_destroy_device(adapter, true);
3471 rtnl_unlock(); 3441 rtnl_unlock();
3472 return 0; 3442 return 0;
3473} 3443}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index f1972b5ab650..7c7ae56c52cf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
355 355
356int ena_get_sset_count(struct net_device *netdev, int sset); 356int ena_get_sset_count(struct net_device *netdev, int sset);
357 357
358/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
359 * driver passas 0.
360 * Since the max packet size the ENA handles is ~9kB limit the buffer length to
361 * 16kB.
362 */
363#if PAGE_SIZE > SZ_16K
364#define ENA_PAGE_SIZE SZ_16K
365#else
366#define ENA_PAGE_SIZE PAGE_SIZE
367#endif
368
358#endif /* !(ENA_H) */ 369#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 116997a8b593..00332a1ea84b 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
1031 int i, ret; 1031 int i, ret;
1032 unsigned long esar_base; 1032 unsigned long esar_base;
1033 unsigned char *esar; 1033 unsigned char *esar;
1034 const char *desc;
1034 1035
1035 if (dec_lance_debug && version_printed++ == 0) 1036 if (dec_lance_debug && version_printed++ == 0)
1036 printk(version); 1037 printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
1216 */ 1217 */
1217 switch (type) { 1218 switch (type) {
1218 case ASIC_LANCE: 1219 case ASIC_LANCE:
1219 printk("%s: IOASIC onboard LANCE", name); 1220 desc = "IOASIC onboard LANCE";
1220 break; 1221 break;
1221 case PMAD_LANCE: 1222 case PMAD_LANCE:
1222 printk("%s: PMAD-AA", name); 1223 desc = "PMAD-AA";
1223 break; 1224 break;
1224 case PMAX_LANCE: 1225 case PMAX_LANCE:
1225 printk("%s: PMAX onboard LANCE", name); 1226 desc = "PMAX onboard LANCE";
1226 break; 1227 break;
1227 } 1228 }
1228 for (i = 0; i < 6; i++) 1229 for (i = 0; i < 6; i++)
1229 dev->dev_addr[i] = esar[i * 4]; 1230 dev->dev_addr[i] = esar[i * 4];
1230 1231
1231 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); 1232 printk("%s: %s, addr = %pM, irq = %d\n",
1233 name, desc, dev->dev_addr, dev->irq);
1232 1234
1233 dev->netdev_ops = &lance_netdev_ops; 1235 dev->netdev_ops = &lance_netdev_ops;
1234 dev->watchdog_timeo = 5*HZ; 1236 dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 024998d6d8c6..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
155static void bmac_set_timeout(struct net_device *dev); 155static void bmac_set_timeout(struct net_device *dev);
156static void bmac_tx_timeout(struct timer_list *t); 156static void bmac_tx_timeout(struct timer_list *t);
157static int bmac_output(struct sk_buff *skb, struct net_device *dev); 157static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
158static void bmac_start(struct net_device *dev); 158static void bmac_start(struct net_device *dev);
159 159
160#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 160#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
1456 spin_unlock_irqrestore(&bp->lock, flags); 1456 spin_unlock_irqrestore(&bp->lock, flags);
1457} 1457}
1458 1458
1459static int 1459static netdev_tx_t
1460bmac_output(struct sk_buff *skb, struct net_device *dev) 1460bmac_output(struct sk_buff *skb, struct net_device *dev)
1461{ 1461{
1462 struct bmac_data *bp = netdev_priv(dev); 1462 struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
78 78
79static int mace_open(struct net_device *dev); 79static int mace_open(struct net_device *dev);
80static int mace_close(struct net_device *dev); 80static int mace_close(struct net_device *dev);
81static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 81static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
82static void mace_set_multicast(struct net_device *dev); 82static void mace_set_multicast(struct net_device *dev);
83static void mace_reset(struct net_device *dev); 83static void mace_reset(struct net_device *dev);
84static int mace_set_address(struct net_device *dev, void *addr); 84static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
525 mp->timeout_active = 1; 525 mp->timeout_active = 1;
526} 526}
527 527
528static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 528static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
529{ 529{
530 struct mace_data *mp = netdev_priv(dev); 530 struct mace_data *mp = netdev_priv(dev);
531 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 531 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
89 89
90static int mace_open(struct net_device *dev); 90static int mace_open(struct net_device *dev);
91static int mace_close(struct net_device *dev); 91static int mace_close(struct net_device *dev);
92static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 92static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93static void mace_set_multicast(struct net_device *dev); 93static void mace_set_multicast(struct net_device *dev);
94static int mace_set_address(struct net_device *dev, void *addr); 94static int mace_set_address(struct net_device *dev, void *addr);
95static void mace_reset(struct net_device *dev); 95static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
444 * Transmit a frame 444 * Transmit a frame
445 */ 445 */
446 446
447static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 447static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
448{ 448{
449 struct mace_data *mp = netdev_priv(dev); 449 struct mace_data *mp = netdev_priv(dev);
450 unsigned long flags; 450 unsigned long flags;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..d1e1a0ba8615 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
225 } 225 }
226 226
227 /* for single fragment packets use build_skb() */ 227 /* for single fragment packets use build_skb() */
228 if (buff->is_eop) { 228 if (buff->is_eop &&
229 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
229 skb = build_skb(page_address(buff->page), 230 skb = build_skb(page_address(buff->page),
230 buff->len + AQ_SKB_ALIGN); 231 AQ_CFG_RX_FRAME_MAX);
231 if (unlikely(!skb)) { 232 if (unlikely(!skb)) {
232 err = -ENOMEM; 233 err = -ENOMEM;
233 goto err_exit; 234 goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
247 buff->len - ETH_HLEN, 248 buff->len - ETH_HLEN,
248 SKB_TRUESIZE(buff->len - ETH_HLEN)); 249 SKB_TRUESIZE(buff->len - ETH_HLEN));
249 250
250 for (i = 1U, next_ = buff->next, 251 if (!buff->is_eop) {
251 buff_ = &self->buff_ring[next_]; true; 252 for (i = 1U, next_ = buff->next,
252 next_ = buff_->next, 253 buff_ = &self->buff_ring[next_];
253 buff_ = &self->buff_ring[next_], ++i) { 254 true; next_ = buff_->next,
254 skb_add_rx_frag(skb, i, buff_->page, 0, 255 buff_ = &self->buff_ring[next_], ++i) {
255 buff_->len, 256 skb_add_rx_frag(skb, i,
256 SKB_TRUESIZE(buff->len - 257 buff_->page, 0,
257 ETH_HLEN)); 258 buff_->len,
258 buff_->is_cleaned = 1; 259 SKB_TRUESIZE(buff->len -
259 260 ETH_HLEN));
260 if (buff_->is_eop) 261 buff_->is_cleaned = 1;
261 break; 262
263 if (buff_->is_eop)
264 break;
265 }
262 } 266 }
263 } 267 }
264 268
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 147045757b10..c57238fce863 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1069{ 1069{
1070 u32 reg; 1070 u32 reg;
1071 1071
1072 /* Stop monitoring MPD interrupt */
1073 intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
1074
1075 /* Disable RXCHK, active filters and Broadcom tag matching */ 1072 /* Disable RXCHK, active filters and Broadcom tag matching */
1076 reg = rxchk_readl(priv, RXCHK_CONTROL); 1073 reg = rxchk_readl(priv, RXCHK_CONTROL);
1077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK << 1074 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1081 /* Clear the MagicPacket detection logic */ 1078 /* Clear the MagicPacket detection logic */
1082 mpd_enable_set(priv, false); 1079 mpd_enable_set(priv, false);
1083 1080
1081 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1082 if (reg & INTRL2_0_MPD)
1083 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1084
1085 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1086 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1087 RXCHK_BRCM_TAG_MATCH_MASK;
1088 netdev_info(priv->netdev,
1089 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1090 }
1091
1084 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 1092 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1085} 1093}
1086 1094
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1105 struct bcm_sysport_priv *priv = netdev_priv(dev); 1113 struct bcm_sysport_priv *priv = netdev_priv(dev);
1106 struct bcm_sysport_tx_ring *txr; 1114 struct bcm_sysport_tx_ring *txr;
1107 unsigned int ring, ring_bit; 1115 unsigned int ring, ring_bit;
1108 u32 reg;
1109 1116
1110 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 1117 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1111 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 1118 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1131 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 1138 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1132 bcm_sysport_tx_reclaim_all(priv); 1139 bcm_sysport_tx_reclaim_all(priv);
1133 1140
1134 if (priv->irq0_stat & INTRL2_0_MPD)
1135 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1136
1137 if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
1138 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1139 RXCHK_BRCM_TAG_MATCH_MASK;
1140 netdev_info(priv->netdev,
1141 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1142 }
1143
1144 if (!priv->is_lite) 1141 if (!priv->is_lite)
1145 goto out; 1142 goto out;
1146 1143
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2641 /* UniMAC receive needs to be turned on */ 2638 /* UniMAC receive needs to be turned on */
2642 umac_enable_set(priv, CMD_RX_EN, 1); 2639 umac_enable_set(priv, CMD_RX_EN, 1);
2643 2640
2644 /* Enable the interrupt wake-up source */
2645 intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
2646
2647 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 2641 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2648 2642
2649 return 0; 2643 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 71362b7f6040..fcc2328bb0d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12894 } 12894 }
12895} 12895}
12896 12896
12897#ifdef CONFIG_NET_POLL_CONTROLLER
12898static void poll_bnx2x(struct net_device *dev)
12899{
12900 struct bnx2x *bp = netdev_priv(dev);
12901 int i;
12902
12903 for_each_eth_queue(bp, i) {
12904 struct bnx2x_fastpath *fp = &bp->fp[i];
12905 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12906 }
12907}
12908#endif
12909
12910static int bnx2x_validate_addr(struct net_device *dev) 12897static int bnx2x_validate_addr(struct net_device *dev)
12911{ 12898{
12912 struct bnx2x *bp = netdev_priv(dev); 12899 struct bnx2x *bp = netdev_priv(dev);
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
13113 .ndo_tx_timeout = bnx2x_tx_timeout, 13100 .ndo_tx_timeout = bnx2x_tx_timeout,
13114 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, 13101 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13115 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, 13102 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13116#ifdef CONFIG_NET_POLL_CONTROLLER
13117 .ndo_poll_controller = poll_bnx2x,
13118#endif
13119 .ndo_setup_tc = __bnx2x_setup_tc, 13103 .ndo_setup_tc = __bnx2x_setup_tc,
13120#ifdef CONFIG_BNX2X_SRIOV 13104#ifdef CONFIG_BNX2X_SRIOV
13121 .ndo_set_vf_mac = bnx2x_set_vf_mac, 13105 .ndo_set_vf_mac = bnx2x_set_vf_mac,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8bb1e38b1681..0478e562abac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1884 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 1884 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1885 tx_pkts++; 1885 tx_pkts++;
1886 /* return full budget so NAPI will complete. */ 1886 /* return full budget so NAPI will complete. */
1887 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1887 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1888 rx_pkts = budget; 1888 rx_pkts = budget;
1889 raw_cons = NEXT_RAW_CMP(raw_cons);
1890 break;
1891 }
1889 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1892 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1890 if (likely(budget)) 1893 if (likely(budget))
1891 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1894 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1913 } 1916 }
1914 raw_cons = NEXT_RAW_CMP(raw_cons); 1917 raw_cons = NEXT_RAW_CMP(raw_cons);
1915 1918
1916 if (rx_pkts == budget) 1919 if (rx_pkts && rx_pkts == budget)
1917 break; 1920 break;
1918 } 1921 }
1919 1922
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
2027 while (1) { 2030 while (1) {
2028 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); 2031 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2029 2032
2030 if (work_done >= budget) 2033 if (work_done >= budget) {
2034 if (!budget)
2035 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2036 cpr->cp_raw_cons);
2031 break; 2037 break;
2038 }
2032 2039
2033 if (!bnxt_has_work(bp, cpr)) { 2040 if (!bnxt_has_work(bp, cpr)) {
2034 if (napi_complete_done(napi, work_done)) 2041 if (napi_complete_done(napi, work_done))
@@ -5913,12 +5920,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5913 return bp->hw_resc.max_cp_rings; 5920 return bp->hw_resc.max_cp_rings;
5914} 5921}
5915 5922
5916void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) 5923unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
5917{ 5924{
5918 bp->hw_resc.max_cp_rings = max; 5925 return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
5919} 5926}
5920 5927
5921unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 5928static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5922{ 5929{
5923 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 5930 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5924 5931
@@ -6684,6 +6691,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
6684 hw_resc->resv_rx_rings = 0; 6691 hw_resc->resv_rx_rings = 0;
6685 hw_resc->resv_hw_ring_grps = 0; 6692 hw_resc->resv_hw_ring_grps = 0;
6686 hw_resc->resv_vnics = 0; 6693 hw_resc->resv_vnics = 0;
6694 bp->tx_nr_rings = 0;
6695 bp->rx_nr_rings = 0;
6687 } 6696 }
6688 return rc; 6697 return rc;
6689} 6698}
@@ -7670,21 +7679,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
7670 bnxt_queue_sp_work(bp); 7679 bnxt_queue_sp_work(bp);
7671} 7680}
7672 7681
7673#ifdef CONFIG_NET_POLL_CONTROLLER
7674static void bnxt_poll_controller(struct net_device *dev)
7675{
7676 struct bnxt *bp = netdev_priv(dev);
7677 int i;
7678
7679 /* Only process tx rings/combined rings in netpoll mode. */
7680 for (i = 0; i < bp->tx_nr_rings; i++) {
7681 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7682
7683 napi_schedule(&txr->bnapi->napi);
7684 }
7685}
7686#endif
7687
7688static void bnxt_timer(struct timer_list *t) 7682static void bnxt_timer(struct timer_list *t)
7689{ 7683{
7690 struct bnxt *bp = from_timer(bp, t, timer); 7684 struct bnxt *bp = from_timer(bp, t, timer);
@@ -8025,7 +8019,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
8025 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 8019 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
8026 return 0; 8020 return 0;
8027 8021
8028 rc = bnxt_approve_mac(bp, addr->sa_data); 8022 rc = bnxt_approve_mac(bp, addr->sa_data, true);
8029 if (rc) 8023 if (rc)
8030 return rc; 8024 return rc;
8031 8025
@@ -8518,9 +8512,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
8518 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 8512 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
8519 .ndo_set_vf_trust = bnxt_set_vf_trust, 8513 .ndo_set_vf_trust = bnxt_set_vf_trust,
8520#endif 8514#endif
8521#ifdef CONFIG_NET_POLL_CONTROLLER
8522 .ndo_poll_controller = bnxt_poll_controller,
8523#endif
8524 .ndo_setup_tc = bnxt_setup_tc, 8515 .ndo_setup_tc = bnxt_setup_tc,
8525#ifdef CONFIG_RFS_ACCEL 8516#ifdef CONFIG_RFS_ACCEL
8526 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 8517 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
@@ -8629,7 +8620,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8629 8620
8630 *max_tx = hw_resc->max_tx_rings; 8621 *max_tx = hw_resc->max_tx_rings;
8631 *max_rx = hw_resc->max_rx_rings; 8622 *max_rx = hw_resc->max_rx_rings;
8632 *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings); 8623 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
8624 hw_resc->max_irqs);
8633 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs); 8625 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
8634 max_ring_grps = hw_resc->max_hw_ring_grps; 8626 max_ring_grps = hw_resc->max_hw_ring_grps;
8635 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 8627 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8761,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
8769 if (bp->tx_nr_rings) 8761 if (bp->tx_nr_rings)
8770 return 0; 8762 return 0;
8771 8763
8764 bnxt_ulp_irq_stop(bp);
8765 bnxt_clear_int_mode(bp);
8772 rc = bnxt_set_dflt_rings(bp, true); 8766 rc = bnxt_set_dflt_rings(bp, true);
8773 if (rc) { 8767 if (rc) {
8774 netdev_err(bp->dev, "Not enough rings available.\n"); 8768 netdev_err(bp->dev, "Not enough rings available.\n");
8775 return rc; 8769 goto init_dflt_ring_err;
8776 } 8770 }
8777 rc = bnxt_init_int_mode(bp); 8771 rc = bnxt_init_int_mode(bp);
8778 if (rc) 8772 if (rc)
8779 return rc; 8773 goto init_dflt_ring_err;
8774
8780 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 8775 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8781 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 8776 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
8782 bp->flags |= BNXT_FLAG_RFS; 8777 bp->flags |= BNXT_FLAG_RFS;
8783 bp->dev->features |= NETIF_F_NTUPLE; 8778 bp->dev->features |= NETIF_F_NTUPLE;
8784 } 8779 }
8785 return 0; 8780init_dflt_ring_err:
8781 bnxt_ulp_irq_restart(bp, rc);
8782 return rc;
8786} 8783}
8787 8784
8788int bnxt_restore_pf_fw_resources(struct bnxt *bp) 8785int bnxt_restore_pf_fw_resources(struct bnxt *bp)
@@ -8819,14 +8816,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
8819 } else { 8816 } else {
8820#ifdef CONFIG_BNXT_SRIOV 8817#ifdef CONFIG_BNXT_SRIOV
8821 struct bnxt_vf_info *vf = &bp->vf; 8818 struct bnxt_vf_info *vf = &bp->vf;
8819 bool strict_approval = true;
8822 8820
8823 if (is_valid_ether_addr(vf->mac_addr)) { 8821 if (is_valid_ether_addr(vf->mac_addr)) {
8824 /* overwrite netdev dev_addr with admin VF MAC */ 8822 /* overwrite netdev dev_addr with admin VF MAC */
8825 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 8823 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
8824 /* Older PF driver or firmware may not approve this
8825 * correctly.
8826 */
8827 strict_approval = false;
8826 } else { 8828 } else {
8827 eth_hw_addr_random(bp->dev); 8829 eth_hw_addr_random(bp->dev);
8828 } 8830 }
8829 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 8831 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
8830#endif 8832#endif
8831 } 8833 }
8832 return rc; 8834 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index fefa011320e0..bde384630a75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
1481unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); 1481unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
1482void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); 1482void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
1483unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); 1483unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
1484void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); 1484unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
1485unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
1486int bnxt_get_avail_msix(struct bnxt *bp, int num); 1485int bnxt_get_avail_msix(struct bnxt *bp, int num);
1487int bnxt_reserve_rings(struct bnxt *bp); 1486int bnxt_reserve_rings(struct bnxt *bp);
1488void bnxt_tx_disable(struct bnxt *bp); 1487void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f3b9fbcc705b..790c684f08ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
46 } 46 }
47 } 47 }
48 48
49 if (i == ARRAY_SIZE(nvm_params))
50 return -EOPNOTSUPP;
51
49 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) 52 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
50 idx = bp->pf.port_id; 53 idx = bp->pf.port_id;
51 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) 54 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 6d583bcd2a81..3962f6fd543c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
451 451
452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); 452 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
453 453
454 vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings; 454 vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 455 vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
456 if (bp->flags & BNXT_FLAG_AGG_RINGS) 456 if (bp->flags & BNXT_FLAG_AGG_RINGS)
457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; 457 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
549 max_stat_ctxs = hw_resc->max_stat_ctxs; 549 max_stat_ctxs = hw_resc->max_stat_ctxs;
550 550
551 /* Remaining rings are distributed equally amongs VF's for now */ 551 /* Remaining rings are distributed equally amongs VF's for now */
552 vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs; 552 vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
553 bp->cp_nr_rings) / num_vfs;
553 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; 554 vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
554 if (bp->flags & BNXT_FLAG_AGG_RINGS) 555 if (bp->flags & BNXT_FLAG_AGG_RINGS)
555 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / 556 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
643 */ 644 */
644 vfs_supported = *num_vfs; 645 vfs_supported = *num_vfs;
645 646
646 avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings; 647 avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
647 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; 648 avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
648 avail_cp = min_t(int, avail_cp, avail_stat); 649 avail_cp = min_t(int, avail_cp, avail_stat);
649 650
@@ -1103,7 +1104,7 @@ update_vf_mac_exit:
1103 mutex_unlock(&bp->hwrm_cmd_lock); 1104 mutex_unlock(&bp->hwrm_cmd_lock);
1104} 1105}
1105 1106
1106int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1107int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1107{ 1108{
1108 struct hwrm_func_vf_cfg_input req = {0}; 1109 struct hwrm_func_vf_cfg_input req = {0};
1109 int rc = 0; 1110 int rc = 0;
@@ -1121,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
1121 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 1122 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1122 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1123mac_done: 1124mac_done:
1124 if (rc) { 1125 if (rc && strict) {
1125 rc = -EADDRNOTAVAIL; 1126 rc = -EADDRNOTAVAIL;
1126 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 1127 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1127 mac); 1128 mac);
1129 return rc;
1128 } 1130 }
1129 return rc; 1131 return 0;
1130} 1132}
1131#else 1133#else
1132 1134
@@ -1143,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
1143{ 1145{
1144} 1146}
1145 1147
1146int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1148int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1147{ 1149{
1148 return 0; 1150 return 0;
1149} 1151}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index e9b20cd19881..2eed9eda1195 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
39void bnxt_sriov_disable(struct bnxt *); 39void bnxt_sriov_disable(struct bnxt *);
40void bnxt_hwrm_exec_fwd_req(struct bnxt *); 40void bnxt_hwrm_exec_fwd_req(struct bnxt *);
41void bnxt_update_vf_mac(struct bnxt *); 41void bnxt_update_vf_mac(struct bnxt *);
42int bnxt_approve_mac(struct bnxt *, u8 *); 42int bnxt_approve_mac(struct bnxt *, u8 *, bool);
43#endif 43#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 139d96c5a023..e1594c9df4c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
75 return 0; 75 return 0;
76} 76}
77 77
78static void bnxt_tc_parse_vlan(struct bnxt *bp, 78static int bnxt_tc_parse_vlan(struct bnxt *bp,
79 struct bnxt_tc_actions *actions, 79 struct bnxt_tc_actions *actions,
80 const struct tc_action *tc_act) 80 const struct tc_action *tc_act)
81{ 81{
82 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { 82 switch (tcf_vlan_action(tc_act)) {
83 case TCA_VLAN_ACT_POP:
83 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; 84 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
84 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { 85 break;
86 case TCA_VLAN_ACT_PUSH:
85 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; 87 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
86 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); 88 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
87 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); 89 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
90 break;
91 default:
92 return -EOPNOTSUPP;
88 } 93 }
94 return 0;
89} 95}
90 96
91static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, 97static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -110,16 +116,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
110 struct tcf_exts *tc_exts) 116 struct tcf_exts *tc_exts)
111{ 117{
112 const struct tc_action *tc_act; 118 const struct tc_action *tc_act;
113 LIST_HEAD(tc_actions); 119 int i, rc;
114 int rc;
115 120
116 if (!tcf_exts_has_actions(tc_exts)) { 121 if (!tcf_exts_has_actions(tc_exts)) {
117 netdev_info(bp->dev, "no actions"); 122 netdev_info(bp->dev, "no actions");
118 return -EINVAL; 123 return -EINVAL;
119 } 124 }
120 125
121 tcf_exts_to_list(tc_exts, &tc_actions); 126 tcf_exts_for_each_action(i, tc_act, tc_exts) {
122 list_for_each_entry(tc_act, &tc_actions, list) {
123 /* Drop action */ 127 /* Drop action */
124 if (is_tcf_gact_shot(tc_act)) { 128 if (is_tcf_gact_shot(tc_act)) {
125 actions->flags |= BNXT_TC_ACTION_FLAG_DROP; 129 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
@@ -136,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
136 140
137 /* Push/pop VLAN */ 141 /* Push/pop VLAN */
138 if (is_tcf_vlan(tc_act)) { 142 if (is_tcf_vlan(tc_act)) {
139 bnxt_tc_parse_vlan(bp, actions, tc_act); 143 rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
144 if (rc)
145 return rc;
140 continue; 146 continue;
141 } 147 }
142 148
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index c37b2842f972..beee61292d5e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix; 169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
170 } 170 }
171 bnxt_fill_msix_vecs(bp, ent); 171 bnxt_fill_msix_vecs(bp, ent);
172 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
173 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; 172 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
174 return avail_msix; 173 return avail_msix;
175} 174}
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
178{ 177{
179 struct net_device *dev = edev->net; 178 struct net_device *dev = edev->net;
180 struct bnxt *bp = netdev_priv(dev); 179 struct bnxt *bp = netdev_priv(dev);
181 int max_cp_rings, msix_requested;
182 180
183 ASSERT_RTNL(); 181 ASSERT_RTNL();
184 if (ulp_id != BNXT_ROCE_ULP) 182 if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
187 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) 185 if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
188 return 0; 186 return 0;
189 187
190 max_cp_rings = bnxt_get_max_func_cp_rings(bp);
191 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
192 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
193 edev->ulp_tbl[ulp_id].msix_requested = 0; 188 edev->ulp_tbl[ulp_id].msix_requested = 0;
194 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; 189 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
195 if (netif_running(dev)) { 190 if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
220 return 0; 215 return 0;
221} 216}
222 217
223void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
224{
225 ASSERT_RTNL();
226 if (bnxt_ulp_registered(bp->edev, ulp_id)) {
227 struct bnxt_en_dev *edev = bp->edev;
228 unsigned int msix_req, max;
229
230 msix_req = edev->ulp_tbl[ulp_id].msix_requested;
231 max = bnxt_get_max_func_cp_rings(bp);
232 bnxt_set_max_func_cp_rings(bp, max - msix_req);
233 max = bnxt_get_max_func_stat_ctxs(bp);
234 bnxt_set_max_func_stat_ctxs(bp, max - 1);
235 }
236}
237
238static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, 218static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
239 struct bnxt_fw_msg *fw_msg) 219 struct bnxt_fw_msg *fw_msg)
240{ 220{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index df48ac71729f..d9bea37cd211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
90 90
91int bnxt_get_ulp_msix_num(struct bnxt *bp); 91int bnxt_get_ulp_msix_num(struct bnxt *bp);
92int bnxt_get_ulp_msix_base(struct bnxt *bp); 92int bnxt_get_ulp_msix_base(struct bnxt *bp);
93void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
94void bnxt_ulp_stop(struct bnxt *bp); 93void bnxt_ulp_stop(struct bnxt *bp);
95void bnxt_ulp_start(struct bnxt *bp); 94void bnxt_ulp_start(struct bnxt *bp);
96void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); 95void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b773bc07edf7..14b49612aa86 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
186#define UMAC_MAC1 0x010 186#define UMAC_MAC1 0x010
187#define UMAC_MAX_FRAME_LEN 0x014 187#define UMAC_MAX_FRAME_LEN 0x014
188 188
189#define UMAC_MODE 0x44
190#define MODE_LINK_STATUS (1 << 5)
191
189#define UMAC_EEE_CTRL 0x064 192#define UMAC_EEE_CTRL 0x064
190#define EN_LPI_RX_PAUSE (1 << 0) 193#define EN_LPI_RX_PAUSE (1 << 0)
191#define EN_LPI_TX_PFC (1 << 1) 194#define EN_LPI_TX_PFC (1 << 1)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5333274a283c..4241ae928d4a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
115static int bcmgenet_fixed_phy_link_update(struct net_device *dev, 115static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
116 struct fixed_phy_status *status) 116 struct fixed_phy_status *status)
117{ 117{
118 if (dev && dev->phydev && status) 118 struct bcmgenet_priv *priv;
119 status->link = dev->phydev->link; 119 u32 reg;
120
121 if (dev && dev->phydev && status) {
122 priv = netdev_priv(dev);
123 reg = bcmgenet_umac_readl(priv, UMAC_MODE);
124 status->link = !!(reg & MODE_LINK_STATUS);
125 }
120 126
121 return 0; 127 return 0;
122} 128}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index dc09f9a8a49b..58b9744c4058 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
482 482
483 if (np) { 483 if (np) {
484 if (of_phy_is_fixed_link(np)) { 484 if (of_phy_is_fixed_link(np)) {
485 if (of_phy_register_fixed_link(np) < 0) {
486 dev_err(&bp->pdev->dev,
487 "broken fixed-link specification\n");
488 return -ENODEV;
489 }
490 bp->phy_node = of_node_get(np); 485 bp->phy_node = of_node_get(np);
491 } else { 486 } else {
492 bp->phy_node = of_parse_phandle(np, "phy-handle", 0); 487 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
569{ 564{
570 struct macb_platform_data *pdata; 565 struct macb_platform_data *pdata;
571 struct device_node *np; 566 struct device_node *np;
572 int err; 567 int err = -ENXIO;
573 568
574 /* Enable management port */ 569 /* Enable management port */
575 macb_writel(bp, NCR, MACB_BIT(MPE)); 570 macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
592 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); 587 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
593 588
594 np = bp->pdev->dev.of_node; 589 np = bp->pdev->dev.of_node;
595 if (pdata) 590 if (np && of_phy_is_fixed_link(np)) {
596 bp->mii_bus->phy_mask = pdata->phy_mask; 591 if (of_phy_register_fixed_link(np) < 0) {
592 dev_err(&bp->pdev->dev,
593 "broken fixed-link specification %pOF\n", np);
594 goto err_out_free_mdiobus;
595 }
596
597 err = mdiobus_register(bp->mii_bus);
598 } else {
599 if (pdata)
600 bp->mii_bus->phy_mask = pdata->phy_mask;
601
602 err = of_mdiobus_register(bp->mii_bus, np);
603 }
597 604
598 err = of_mdiobus_register(bp->mii_bus, np);
599 if (err) 605 if (err)
600 goto err_out_free_mdiobus; 606 goto err_out_free_fixed_link;
601 607
602 err = macb_mii_probe(bp->dev); 608 err = macb_mii_probe(bp->dev);
603 if (err) 609 if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
607 613
608err_out_unregister_bus: 614err_out_unregister_bus:
609 mdiobus_unregister(bp->mii_bus); 615 mdiobus_unregister(bp->mii_bus);
616err_out_free_fixed_link:
610 if (np && of_phy_is_fixed_link(np)) 617 if (np && of_phy_is_fixed_link(np))
611 of_phy_deregister_fixed_link(np); 618 of_phy_deregister_fixed_link(np);
612err_out_free_mdiobus: 619err_out_free_mdiobus:
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
642 if (!(status & MACB_BIT(TGO))) 649 if (!(status & MACB_BIT(TGO)))
643 return 0; 650 return 0;
644 651
645 usleep_range(10, 250); 652 udelay(250);
646 } while (time_before(halt_time, timeout)); 653 } while (time_before(halt_time, timeout));
647 654
648 return -ETIMEDOUT; 655 return -ETIMEDOUT;
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
2028{ 2035{
2029 struct macb_queue *queue; 2036 struct macb_queue *queue;
2030 unsigned int q; 2037 unsigned int q;
2038 u32 ctrl = macb_readl(bp, NCR);
2031 2039
2032 /* Disable RX and TX (XXX: Should we halt the transmission 2040 /* Disable RX and TX (XXX: Should we halt the transmission
2033 * more gracefully?) 2041 * more gracefully?)
2034 */ 2042 */
2035 macb_writel(bp, NCR, 0); 2043 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2036 2044
2037 /* Clear the stats registers (XXX: Update stats first?) */ 2045 /* Clear the stats registers (XXX: Update stats first?) */
2038 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 2046 ctrl |= MACB_BIT(CLRSTAT);
2047
2048 macb_writel(bp, NCR, ctrl);
2039 2049
2040 /* Clear all status flags */ 2050 /* Clear all status flags */
2041 macb_writel(bp, TSR, -1); 2051 macb_writel(bp, TSR, -1);
@@ -2150,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
2150 else 2160 else
2151 dmacfg &= ~GEM_BIT(TXCOEN); 2161 dmacfg &= ~GEM_BIT(TXCOEN);
2152 2162
2163 dmacfg &= ~GEM_BIT(ADDR64);
2153#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2164#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2154 if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2165 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2155 dmacfg |= GEM_BIT(ADDR64); 2166 dmacfg |= GEM_BIT(ADDR64);
@@ -2223,7 +2234,7 @@ static void macb_init_hw(struct macb *bp)
2223 } 2234 }
2224 2235
2225 /* Enable TX and RX */ 2236 /* Enable TX and RX */
2226 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 2237 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2227} 2238}
2228 2239
2229/* The hash address register is 64 bits long and takes up two 2240/* The hash address register is 64 bits long and takes up two
@@ -3827,6 +3838,13 @@ static const struct macb_config at91sam9260_config = {
3827 .init = macb_init, 3838 .init = macb_init,
3828}; 3839};
3829 3840
3841static const struct macb_config sama5d3macb_config = {
3842 .caps = MACB_CAPS_SG_DISABLED
3843 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3844 .clk_init = macb_clk_init,
3845 .init = macb_init,
3846};
3847
3830static const struct macb_config pc302gem_config = { 3848static const struct macb_config pc302gem_config = {
3831 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3849 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3832 .dma_burst_length = 16, 3850 .dma_burst_length = 16,
@@ -3894,6 +3912,7 @@ static const struct of_device_id macb_dt_ids[] = {
3894 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3912 { .compatible = "cdns,gem", .data = &pc302gem_config },
3895 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3913 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3896 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3914 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3915 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
3897 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3916 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3898 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3917 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3899 { .compatible = "cdns,emac", .data = &emac_config }, 3918 { .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 623f73dd7738..c116f96956fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
417 struct ch_filter_specification *fs) 417 struct ch_filter_specification *fs)
418{ 418{
419 const struct tc_action *a; 419 const struct tc_action *a;
420 LIST_HEAD(actions); 420 int i;
421 421
422 tcf_exts_to_list(cls->exts, &actions); 422 tcf_exts_for_each_action(i, a, cls->exts) {
423 list_for_each_entry(a, &actions, list) {
424 if (is_tcf_gact_ok(a)) { 423 if (is_tcf_gact_ok(a)) {
425 fs->action = FILTER_PASS; 424 fs->action = FILTER_PASS;
426 } else if (is_tcf_gact_shot(a)) { 425 } else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
591 bool act_redir = false; 590 bool act_redir = false;
592 bool act_pedit = false; 591 bool act_pedit = false;
593 bool act_vlan = false; 592 bool act_vlan = false;
594 LIST_HEAD(actions); 593 int i;
595 594
596 tcf_exts_to_list(cls->exts, &actions); 595 tcf_exts_for_each_action(i, a, cls->exts) {
597 list_for_each_entry(a, &actions, list) {
598 if (is_tcf_gact_ok(a)) { 596 if (is_tcf_gact_ok(a)) {
599 /* Do nothing */ 597 /* Do nothing */
600 } else if (is_tcf_gact_shot(a)) { 598 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 18eb2aedd4cb..c7d2b4dc7568 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
93 unsigned int num_actions = 0; 93 unsigned int num_actions = 0;
94 const struct tc_action *a; 94 const struct tc_action *a;
95 struct tcf_exts *exts; 95 struct tcf_exts *exts;
96 LIST_HEAD(actions); 96 int i;
97 97
98 exts = cls->knode.exts; 98 exts = cls->knode.exts;
99 if (!tcf_exts_has_actions(exts)) 99 if (!tcf_exts_has_actions(exts))
100 return -EINVAL; 100 return -EINVAL;
101 101
102 tcf_exts_to_list(exts, &actions); 102 tcf_exts_for_each_action(i, a, exts) {
103 list_for_each_entry(a, &actions, list) {
104 /* Don't allow more than one action per rule. */ 103 /* Don't allow more than one action per rule. */
105 if (num_actions) 104 if (num_actions)
106 return -EINVAL; 105 return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
753}; 753};
754 754
755struct cpl_abort_req_rss6 { 755struct cpl_abort_req_rss6 {
756 WR_HDR;
757 union opcode_tid ot; 756 union opcode_tid ot;
758 __be32 srqidx_status; 757 __be32 srqidx_status;
759}; 758};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e2a702996db4..13dfdfca49fc 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
332 return rx; 332 return rx;
333} 333}
334 334
335static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 335static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
336{ 336{
337 struct ep93xx_priv *ep = netdev_priv(dev); 337 struct ep93xx_priv *ep = netdev_priv(dev);
338 struct ep93xx_tdesc *txd; 338 struct ep93xx_tdesc *txd;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 3f8fe8fd79cc..6324e80960c3 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -113,7 +113,7 @@ struct net_local {
113 113
114/* Index to functions, as function prototypes. */ 114/* Index to functions, as function prototypes. */
115static int net_open(struct net_device *dev); 115static int net_open(struct net_device *dev);
116static int net_send_packet(struct sk_buff *skb, struct net_device *dev); 116static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
117static irqreturn_t net_interrupt(int irq, void *dev_id); 117static irqreturn_t net_interrupt(int irq, void *dev_id);
118static void set_multicast_list(struct net_device *dev); 118static void set_multicast_list(struct net_device *dev);
119static void net_rx(struct net_device *dev); 119static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
324 return 0; 324 return 0;
325} 325}
326 326
327static int 327static netdev_tx_t
328net_send_packet(struct sk_buff *skb, struct net_device *dev) 328net_send_packet(struct sk_buff *skb, struct net_device *dev)
329{ 329{
330 struct net_local *lp = netdev_priv(dev); 330 struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ff92ab1daeb8..1e9d882c04ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4500 port_res->max_vfs += le16_to_cpu(pcie->num_vfs); 4500 port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
4501 } 4501 }
4502 } 4502 }
4503 return status; 4503 goto err;
4504 } 4504 }
4505 4505
4506 pcie = be_get_pcie_desc(resp->func_param, desc_count, 4506 pcie = be_get_pcie_desc(resp->func_param, desc_count,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2708297e7795..bf9b9fd6d2a0 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
1158 napi_disable(&fep->napi); 1158 napi_disable(&fep->napi);
1159 netif_tx_lock_bh(ndev); 1159 netif_tx_lock_bh(ndev);
1160 fec_restart(ndev); 1160 fec_restart(ndev);
1161 netif_wake_queue(ndev); 1161 netif_tx_wake_all_queues(ndev);
1162 netif_tx_unlock_bh(ndev); 1162 netif_tx_unlock_bh(ndev);
1163 napi_enable(&fep->napi); 1163 napi_enable(&fep->napi);
1164 } 1164 }
@@ -1273,7 +1273,7 @@ skb_done:
1273 1273
1274 /* Since we have freed up a buffer, the ring is no longer full 1274 /* Since we have freed up a buffer, the ring is no longer full
1275 */ 1275 */
1276 if (netif_queue_stopped(ndev)) { 1276 if (netif_tx_queue_stopped(nq)) {
1277 entries_free = fec_enet_get_free_txdesc_num(txq); 1277 entries_free = fec_enet_get_free_txdesc_num(txq);
1278 if (entries_free >= txq->tx_wake_threshold) 1278 if (entries_free >= txq->tx_wake_threshold)
1279 netif_tx_wake_queue(nq); 1279 netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1746 napi_disable(&fep->napi); 1746 napi_disable(&fep->napi);
1747 netif_tx_lock_bh(ndev); 1747 netif_tx_lock_bh(ndev);
1748 fec_restart(ndev); 1748 fec_restart(ndev);
1749 netif_wake_queue(ndev); 1749 netif_tx_wake_all_queues(ndev);
1750 netif_tx_unlock_bh(ndev); 1750 netif_tx_unlock_bh(ndev);
1751 napi_enable(&fep->napi); 1751 napi_enable(&fep->napi);
1752 } 1752 }
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
2247 napi_disable(&fep->napi); 2247 napi_disable(&fep->napi);
2248 netif_tx_lock_bh(ndev); 2248 netif_tx_lock_bh(ndev);
2249 fec_restart(ndev); 2249 fec_restart(ndev);
2250 netif_wake_queue(ndev); 2250 netif_tx_wake_all_queues(ndev);
2251 netif_tx_unlock_bh(ndev); 2251 netif_tx_unlock_bh(ndev);
2252 napi_enable(&fep->napi); 2252 napi_enable(&fep->napi);
2253 } 2253 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
84 if (cb->type == DESC_TYPE_SKB) 84 if (cb->type == DESC_TYPE_SKB)
85 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 85 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
86 ring_to_dma_dir(ring)); 86 ring_to_dma_dir(ring));
87 else 87 else if (cb->length)
88 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 88 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
89 ring_to_dma_dir(ring)); 89 ring_to_dma_dir(ring));
90} 90}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index fa5b30f547f6..08a750fb60c4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
220 220
221 /* priv data for the desc, e.g. skb when use with ip stack*/ 221 /* priv data for the desc, e.g. skb when use with ip stack*/
222 void *priv; 222 void *priv;
223 u16 page_offset; 223 u32 page_offset;
224 u16 reuse_flag; 224 u32 length; /* length of the buffer */
225 225
226 u16 length; /* length of the buffer */ 226 u16 reuse_flag;
227 227
228 /* desc type, used by the ring user to mark the type of the priv data */ 228 /* desc type, used by the ring user to mark the type of the priv data */
229 u16 type; 229 u16 type;
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
486 u8 *auto_neg, u16 *speed, u8 *duplex); 486 u8 *auto_neg, u16 *speed, u8 *duplex);
487 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); 487 void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
488 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); 488 void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
489 bool (*need_adjust_link)(struct hnae_handle *handle,
490 int speed, int duplex);
489 int (*set_loopback)(struct hnae_handle *handle, 491 int (*set_loopback)(struct hnae_handle *handle,
490 enum hnae_loop loop_mode, int en); 492 enum hnae_loop loop_mode, int en);
491 void (*get_ring_bdnum_limit)(struct hnae_queue *queue, 493 void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e6aad30e7e69..b52029e26d15 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
156} 156}
157 157
158static int hns_ae_wait_flow_down(struct hnae_handle *handle)
159{
160 struct dsaf_device *dsaf_dev;
161 struct hns_ppe_cb *ppe_cb;
162 struct hnae_vf_cb *vf_cb;
163 int ret;
164 int i;
165
166 for (i = 0; i < handle->q_num; i++) {
167 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
168 if (ret)
169 return ret;
170 }
171
172 ppe_cb = hns_get_ppe_cb(handle);
173 ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
174 if (ret)
175 return ret;
176
177 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
178 if (!dsaf_dev)
179 return -EINVAL;
180 ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
181 if (ret)
182 return ret;
183
184 vf_cb = hns_ae_get_vf_cb(handle);
185 ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
186 if (ret)
187 return ret;
188
189 mdelay(10);
190 return 0;
191}
192
158static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val) 193static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
159{ 194{
160 int q_num = handle->q_num; 195 int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
399 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex); 434 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
400} 435}
401 436
437static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
438 int duplex)
439{
440 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
441
442 return hns_mac_need_adjust_link(mac_cb, speed, duplex);
443}
444
402static void hns_ae_adjust_link(struct hnae_handle *handle, int speed, 445static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
403 int duplex) 446 int duplex)
404{ 447{
405 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); 448 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
406 449
407 hns_mac_adjust_link(mac_cb, speed, duplex); 450 switch (mac_cb->dsaf_dev->dsaf_ver) {
451 case AE_VERSION_1:
452 hns_mac_adjust_link(mac_cb, speed, duplex);
453 break;
454
455 case AE_VERSION_2:
456 /* chip need to clear all pkt inside */
457 hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
458 if (hns_ae_wait_flow_down(handle)) {
459 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
460 break;
461 }
462
463 hns_mac_adjust_link(mac_cb, speed, duplex);
464 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
465 break;
466
467 default:
468 break;
469 }
470
471 return;
408} 472}
409 473
410static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, 474static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
902 .get_status = hns_ae_get_link_status, 966 .get_status = hns_ae_get_link_status,
903 .get_info = hns_ae_get_mac_info, 967 .get_info = hns_ae_get_mac_info,
904 .adjust_link = hns_ae_adjust_link, 968 .adjust_link = hns_ae_adjust_link,
969 .need_adjust_link = hns_ae_need_adjust_link,
905 .set_loopback = hns_ae_config_loopback, 970 .set_loopback = hns_ae_config_loopback,
906 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit, 971 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
907 .get_pauseparam = hns_ae_get_pauseparam, 972 .get_pauseparam = hns_ae_get_pauseparam,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 5488c6e89f21..09e4061d1fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
257 *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B); 257 *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
258} 258}
259 259
260static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
261 int duplex)
262{
263 struct mac_driver *drv = (struct mac_driver *)mac_drv;
264 struct hns_mac_cb *mac_cb = drv->mac_cb;
265
266 return (mac_cb->speed != speed) ||
267 (mac_cb->half_duplex == duplex);
268}
269
260static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed, 270static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
261 u32 full_duplex) 271 u32 full_duplex)
262{ 272{
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
309 hns_gmac_set_uc_match(mac_drv, en); 319 hns_gmac_set_uc_match(mac_drv, en);
310} 320}
311 321
322int hns_gmac_wait_fifo_clean(void *mac_drv)
323{
324 struct mac_driver *drv = (struct mac_driver *)mac_drv;
325 int wait_cnt;
326 u32 val;
327
328 wait_cnt = 0;
329 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
330 val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
331 /* bit5~bit0 is not send complete pkts */
332 if ((val & 0x3f) == 0)
333 break;
334 usleep_range(100, 200);
335 }
336
337 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
338 dev_err(drv->dev,
339 "hns ge %d fifo was not idle.\n", drv->mac_id);
340 return -EBUSY;
341 }
342
343 return 0;
344}
345
312static void hns_gmac_init(void *mac_drv) 346static void hns_gmac_init(void *mac_drv)
313{ 347{
314 u32 port; 348 u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
690 mac_drv->mac_disable = hns_gmac_disable; 724 mac_drv->mac_disable = hns_gmac_disable;
691 mac_drv->mac_free = hns_gmac_free; 725 mac_drv->mac_free = hns_gmac_free;
692 mac_drv->adjust_link = hns_gmac_adjust_link; 726 mac_drv->adjust_link = hns_gmac_adjust_link;
727 mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
693 mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames; 728 mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
694 mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length; 729 mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
695 mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg; 730 mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
717 mac_drv->get_strings = hns_gmac_get_strings; 752 mac_drv->get_strings = hns_gmac_get_strings;
718 mac_drv->update_stats = hns_gmac_update_stats; 753 mac_drv->update_stats = hns_gmac_update_stats;
719 mac_drv->set_promiscuous = hns_gmac_set_promisc; 754 mac_drv->set_promiscuous = hns_gmac_set_promisc;
755 mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
720 756
721 return (void *)mac_drv; 757 return (void *)mac_drv;
722} 758}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 1c2326bd76e2..6ed6f142427e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
114 return 0; 114 return 0;
115} 115}
116 116
117/**
118 *hns_mac_is_adjust_link - check is need change mac speed and duplex register
119 *@mac_cb: mac device
120 *@speed: phy device speed
121 *@duplex:phy device duplex
122 *
123 */
124bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
125{
126 struct mac_driver *mac_ctrl_drv;
127
128 mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
129
130 if (mac_ctrl_drv->need_adjust_link)
131 return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
132 (enum mac_speed)speed, duplex);
133 else
134 return true;
135}
136
117void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) 137void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
118{ 138{
119 int ret; 139 int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
430 return 0; 450 return 0;
431} 451}
432 452
453int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
454{
455 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
456
457 if (drv->wait_fifo_clean)
458 return drv->wait_fifo_clean(drv);
459
460 return 0;
461}
462
433void hns_mac_reset(struct hns_mac_cb *mac_cb) 463void hns_mac_reset(struct hns_mac_cb *mac_cb)
434{ 464{
435 struct mac_driver *drv = hns_mac_get_drv(mac_cb); 465 struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
998 return DSAF_MAX_PORT_NUM; 1028 return DSAF_MAX_PORT_NUM;
999} 1029}
1000 1030
1031void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1032{
1033 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1034
1035 mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
1036}
1037
1038void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
1039{
1040 struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
1041
1042 mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
1043}
1044
1001/** 1045/**
1002 * hns_mac_init - init mac 1046 * hns_mac_init - init mac
1003 * @dsaf_dev: dsa fabric device struct pointer 1047 * @dsaf_dev: dsa fabric device struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index bbc0a98e7ca3..fbc75341bef7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -356,6 +356,9 @@ struct mac_driver {
356 /*adjust mac mode of port,include speed and duplex*/ 356 /*adjust mac mode of port,include speed and duplex*/
357 int (*adjust_link)(void *mac_drv, enum mac_speed speed, 357 int (*adjust_link)(void *mac_drv, enum mac_speed speed,
358 u32 full_duplex); 358 u32 full_duplex);
359 /* need adjust link */
360 bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
361 int duplex);
359 /* config autoegotaite mode of port*/ 362 /* config autoegotaite mode of port*/
360 void (*set_an_mode)(void *mac_drv, u8 enable); 363 void (*set_an_mode)(void *mac_drv, u8 enable);
361 /* config loopbank mode */ 364 /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
394 void (*get_info)(void *mac_drv, struct mac_info *mac_info); 397 void (*get_info)(void *mac_drv, struct mac_info *mac_info);
395 398
396 void (*update_stats)(void *mac_drv); 399 void (*update_stats)(void *mac_drv);
400 int (*wait_fifo_clean)(void *mac_drv);
397 401
398 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
399 u8 mac_id; 403 u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
427 431
428int hns_mac_init(struct dsaf_device *dsaf_dev); 432int hns_mac_init(struct dsaf_device *dsaf_dev);
429void mac_adjust_link(struct net_device *net_dev); 433void mac_adjust_link(struct net_device *net_dev);
434bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
430void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); 435void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
431int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); 436int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
432int hns_mac_set_multi(struct hns_mac_cb *mac_cb, 437int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
463int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id, 468int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
464 const unsigned char *addr); 469 const unsigned char *addr);
465int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn); 470int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
471void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
472void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
473int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
466 474
467#endif /* _HNS_DSAF_MAC_H */ 475#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ca50c2553a9c..e557a4ef5996 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
2727 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX; 2727 soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
2728} 2728}
2729 2729
2730int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
2731{
2732 u32 val, val_tmp;
2733 int wait_cnt;
2734
2735 if (port >= DSAF_SERVICE_NW_NUM)
2736 return 0;
2737
2738 wait_cnt = 0;
2739 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
2740 val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
2741 (port + DSAF_XGE_NUM) * 0x40);
2742 val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
2743 (port + DSAF_XGE_NUM) * 0x40);
2744 if (val == val_tmp)
2745 break;
2746
2747 usleep_range(100, 200);
2748 }
2749
2750 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
2751 dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
2752 val, val_tmp);
2753 return -EBUSY;
2754 }
2755
2756 return 0;
2757}
2758
2730/** 2759/**
2731 * dsaf_probe - probo dsaf dev 2760 * dsaf_probe - probo dsaf dev
2732 * @pdev: dasf platform device 2761 * @pdev: dasf platform device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 4507e8222683..0e1cd99831a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -44,6 +44,8 @@ struct hns_mac_cb;
44#define DSAF_ROCE_CREDIT_CHN 8 44#define DSAF_ROCE_CREDIT_CHN 8
45#define DSAF_ROCE_CHAN_MODE 3 45#define DSAF_ROCE_CHAN_MODE 3
46 46
47#define HNS_MAX_WAIT_CNT 10000
48
47enum dsaf_roce_port_mode { 49enum dsaf_roce_port_mode {
48 DSAF_ROCE_6PORT_MODE, 50 DSAF_ROCE_6PORT_MODE,
49 DSAF_ROCE_4PORT_MODE, 51 DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
463 465
464int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, 466int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
465 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
466 469
467#endif /* __HNS_DSAF_MAIN_H__ */ 470#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index d160d8c9e45b..0942e4916d9d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
275 dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk); 275 dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
276} 276}
277 277
278int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
279{
280 int wait_cnt;
281 u32 val;
282
283 wait_cnt = 0;
284 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
285 val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
286 if (!val)
287 break;
288
289 usleep_range(100, 200);
290 }
291
292 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
293 dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
294 val);
295 return -EBUSY;
296 }
297
298 return 0;
299}
300
278/** 301/**
279 * ppe_init_hw - init ppe 302 * ppe_init_hw - init ppe
280 * @ppe_cb: ppe device 303 * @ppe_cb: ppe device
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 9d8e643e8aa6..f670e63a5a01 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -100,6 +100,7 @@ struct ppe_common_cb {
100 100
101}; 101};
102 102
103int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
103int hns_ppe_init(struct dsaf_device *dsaf_dev); 104int hns_ppe_init(struct dsaf_device *dsaf_dev);
104 105
105void hns_ppe_uninit(struct dsaf_device *dsaf_dev); 106void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 9d76e2e54f9d..5d64519b9b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
66 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num); 66 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
67} 67}
68 68
69int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
70{
71 u32 head, tail;
72 int wait_cnt;
73
74 tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
75 wait_cnt = 0;
76 while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
77 head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
78 if (tail == head)
79 break;
80
81 usleep_range(100, 200);
82 }
83
84 if (wait_cnt >= HNS_MAX_WAIT_CNT) {
85 dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
86 return -EBUSY;
87 }
88
89 return 0;
90}
91
69/** 92/**
70 *hns_rcb_reset_ring_hw - ring reset 93 *hns_rcb_reset_ring_hw - ring reset
71 *@q: ring struct pointer 94 *@q: ring struct pointer
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 602816498c8d..2319b772a271 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
136void hns_rcb_init_hw(struct ring_pair_cb *ring); 136void hns_rcb_init_hw(struct ring_pair_cb *ring);
137void hns_rcb_reset_ring_hw(struct hnae_queue *q); 137void hns_rcb_reset_ring_hw(struct hnae_queue *q);
138void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 138void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
139int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
139u32 hns_rcb_get_rx_coalesced_frames( 140u32 hns_rcb_get_rx_coalesced_frames(
140 struct rcb_common_cb *rcb_common, u32 port_idx); 141 struct rcb_common_cb *rcb_common, u32 port_idx);
141u32 hns_rcb_get_tx_coalesced_frames( 142u32 hns_rcb_get_tx_coalesced_frames(
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 886cbbf25761..74d935d82cbc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -464,6 +464,7 @@
464#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 464#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
465#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8 465#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
466 466
467#define GMAC_FIFO_STATE_REG 0x0000UL
467#define GMAC_DUPLEX_TYPE_REG 0x0008UL 468#define GMAC_DUPLEX_TYPE_REG 0x0008UL
468#define GMAC_FD_FC_TYPE_REG 0x000CUL 469#define GMAC_FD_FC_TYPE_REG 0x000CUL
469#define GMAC_TX_WATER_LINE_REG 0x0010UL 470#define GMAC_TX_WATER_LINE_REG 0x0010UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 9f2b552aee33..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
40#define SKB_TMP_LEN(SKB) \ 40#define SKB_TMP_LEN(SKB) \
41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) 41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
42 42
43static void fill_v2_desc(struct hnae_ring *ring, void *priv, 43static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
44 int size, dma_addr_t dma, int frag_end, 44 int send_sz, dma_addr_t dma, int frag_end,
45 int buf_num, enum hns_desc_type type, int mtu) 45 int buf_num, enum hns_desc_type type, int mtu)
46{ 46{
47 struct hnae_desc *desc = &ring->desc[ring->next_to_use]; 47 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
64 desc_cb->type = type; 64 desc_cb->type = type;
65 65
66 desc->addr = cpu_to_le64(dma); 66 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size); 67 desc->tx.send_size = cpu_to_le16((u16)send_sz);
68 68
69 /* config bd buffer end */ 69 /* config bd buffer end */
70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); 70 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
133 ring_ptr_move_fw(ring, next_to_use); 133 ring_ptr_move_fw(ring, next_to_use);
134} 134}
135 135
136static void fill_v2_desc(struct hnae_ring *ring, void *priv,
137 int size, dma_addr_t dma, int frag_end,
138 int buf_num, enum hns_desc_type type, int mtu)
139{
140 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
141 buf_num, type, mtu);
142}
143
136static const struct acpi_device_id hns_enet_acpi_match[] = { 144static const struct acpi_device_id hns_enet_acpi_match[] = {
137 { "HISI00C1", 0 }, 145 { "HISI00C1", 0 },
138 { "HISI00C2", 0 }, 146 { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
289 297
290 /* when the frag size is bigger than hardware, split this frag */ 298 /* when the frag size is bigger than hardware, split this frag */
291 for (k = 0; k < frag_buf_num; k++) 299 for (k = 0; k < frag_buf_num; k++)
292 fill_v2_desc(ring, priv, 300 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
293 (k == frag_buf_num - 1) ? 301 (k == frag_buf_num - 1) ?
294 sizeoflast : BD_MAX_SEND_SIZE, 302 sizeoflast : BD_MAX_SEND_SIZE,
295 dma + BD_MAX_SEND_SIZE * k, 303 dma + BD_MAX_SEND_SIZE * k,
296 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 304 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
297 buf_num, 305 buf_num,
298 (type == DESC_TYPE_SKB && !k) ? 306 (type == DESC_TYPE_SKB && !k) ?
299 DESC_TYPE_SKB : DESC_TYPE_PAGE, 307 DESC_TYPE_SKB : DESC_TYPE_PAGE,
300 mtu); 308 mtu);
301} 309}
302 310
303netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev, 311netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -406,113 +414,13 @@ out_net_tx_busy:
406 return NETDEV_TX_BUSY; 414 return NETDEV_TX_BUSY;
407} 415}
408 416
409/**
410 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
411 * @data: pointer to the start of the headers
412 * @max: total length of section to find headers in
413 *
414 * This function is meant to determine the length of headers that will
415 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
416 * motivation of doing this is to only perform one pull for IPv4 TCP
417 * packets so that we can do basic things like calculating the gso_size
418 * based on the average data per packet.
419 **/
420static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
421 unsigned int max_size)
422{
423 unsigned char *network;
424 u8 hlen;
425
426 /* this should never happen, but better safe than sorry */
427 if (max_size < ETH_HLEN)
428 return max_size;
429
430 /* initialize network frame pointer */
431 network = data;
432
433 /* set first protocol and move network header forward */
434 network += ETH_HLEN;
435
436 /* handle any vlan tag if present */
437 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
438 == HNS_RX_FLAG_VLAN_PRESENT) {
439 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
440 return max_size;
441
442 network += VLAN_HLEN;
443 }
444
445 /* handle L3 protocols */
446 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
447 == HNS_RX_FLAG_L3ID_IPV4) {
448 if ((typeof(max_size))(network - data) >
449 (max_size - sizeof(struct iphdr)))
450 return max_size;
451
452 /* access ihl as a u8 to avoid unaligned access on ia64 */
453 hlen = (network[0] & 0x0F) << 2;
454
455 /* verify hlen meets minimum size requirements */
456 if (hlen < sizeof(struct iphdr))
457 return network - data;
458
459 /* record next protocol if header is present */
460 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
461 == HNS_RX_FLAG_L3ID_IPV6) {
462 if ((typeof(max_size))(network - data) >
463 (max_size - sizeof(struct ipv6hdr)))
464 return max_size;
465
466 /* record next protocol */
467 hlen = sizeof(struct ipv6hdr);
468 } else {
469 return network - data;
470 }
471
472 /* relocate pointer to start of L4 header */
473 network += hlen;
474
475 /* finally sort out TCP/UDP */
476 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
477 == HNS_RX_FLAG_L4ID_TCP) {
478 if ((typeof(max_size))(network - data) >
479 (max_size - sizeof(struct tcphdr)))
480 return max_size;
481
482 /* access doff as a u8 to avoid unaligned access on ia64 */
483 hlen = (network[12] & 0xF0) >> 2;
484
485 /* verify hlen meets minimum size requirements */
486 if (hlen < sizeof(struct tcphdr))
487 return network - data;
488
489 network += hlen;
490 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
491 == HNS_RX_FLAG_L4ID_UDP) {
492 if ((typeof(max_size))(network - data) >
493 (max_size - sizeof(struct udphdr)))
494 return max_size;
495
496 network += sizeof(struct udphdr);
497 }
498
499 /* If everything has gone correctly network should be the
500 * data section of the packet and will be the end of the header.
501 * If not then it probably represents the end of the last recognized
502 * header.
503 */
504 if ((typeof(max_size))(network - data) < max_size)
505 return network - data;
506 else
507 return max_size;
508}
509
510static void hns_nic_reuse_page(struct sk_buff *skb, int i, 417static void hns_nic_reuse_page(struct sk_buff *skb, int i,
511 struct hnae_ring *ring, int pull_len, 418 struct hnae_ring *ring, int pull_len,
512 struct hnae_desc_cb *desc_cb) 419 struct hnae_desc_cb *desc_cb)
513{ 420{
514 struct hnae_desc *desc; 421 struct hnae_desc *desc;
515 int truesize, size; 422 u32 truesize;
423 int size;
516 int last_offset; 424 int last_offset;
517 bool twobufs; 425 bool twobufs;
518 426
@@ -530,7 +438,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
530 } 438 }
531 439
532 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, 440 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
533 size - pull_len, truesize - pull_len); 441 size - pull_len, truesize);
534 442
535 /* avoid re-using remote pages,flag default unreuse */ 443 /* avoid re-using remote pages,flag default unreuse */
536 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) 444 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +603,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
695 } else { 603 } else {
696 ring->stats.seg_pkt_cnt++; 604 ring->stats.seg_pkt_cnt++;
697 605
698 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE); 606 pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
699 memcpy(__skb_put(skb, pull_len), va, 607 memcpy(__skb_put(skb, pull_len), va,
700 ALIGN(pull_len, sizeof(long))); 608 ALIGN(pull_len, sizeof(long)));
701 609
@@ -1212,11 +1120,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
1212 struct hnae_handle *h = priv->ae_handle; 1120 struct hnae_handle *h = priv->ae_handle;
1213 int state = 1; 1121 int state = 1;
1214 1122
1123 /* If there is no phy, do not need adjust link */
1215 if (ndev->phydev) { 1124 if (ndev->phydev) {
1216 h->dev->ops->adjust_link(h, ndev->phydev->speed, 1125 /* When phy link down, do nothing */
1217 ndev->phydev->duplex); 1126 if (ndev->phydev->link == 0)
1218 state = ndev->phydev->link; 1127 return;
1128
1129 if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1130 ndev->phydev->duplex)) {
1131 /* because Hi161X chip don't support to change gmac
1132 * speed and duplex with traffic. Delay 200ms to
1133 * make sure there is no more data in chip FIFO.
1134 */
1135 netif_carrier_off(ndev);
1136 msleep(200);
1137 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1138 ndev->phydev->duplex);
1139 netif_carrier_on(ndev);
1140 }
1219 } 1141 }
1142
1220 state = state && h->dev->ops->get_status(h); 1143 state = state && h->dev->ops->get_status(h);
1221 1144
1222 if (state != priv->link) { 1145 if (state != priv->link) {
@@ -1580,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1580 return phy_mii_ioctl(phy_dev, ifr, cmd); 1503 return phy_mii_ioctl(phy_dev, ifr, cmd);
1581} 1504}
1582 1505
1583/* use only for netconsole to poll with the device without interrupt */
1584#ifdef CONFIG_NET_POLL_CONTROLLER
1585static void hns_nic_poll_controller(struct net_device *ndev)
1586{
1587 struct hns_nic_priv *priv = netdev_priv(ndev);
1588 unsigned long flags;
1589 int i;
1590
1591 local_irq_save(flags);
1592 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1593 napi_schedule(&priv->ring_data[i].napi);
1594 local_irq_restore(flags);
1595}
1596#endif
1597
1598static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, 1506static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1599 struct net_device *ndev) 1507 struct net_device *ndev)
1600{ 1508{
@@ -2047,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
2047 .ndo_set_features = hns_nic_set_features, 1955 .ndo_set_features = hns_nic_set_features,
2048 .ndo_fix_features = hns_nic_fix_features, 1956 .ndo_fix_features = hns_nic_fix_features,
2049 .ndo_get_stats64 = hns_nic_get_stats64, 1957 .ndo_get_stats64 = hns_nic_get_stats64,
2050#ifdef CONFIG_NET_POLL_CONTROLLER
2051 .ndo_poll_controller = hns_nic_poll_controller,
2052#endif
2053 .ndo_set_rx_mode = hns_nic_set_rx_mode, 1958 .ndo_set_rx_mode = hns_nic_set_rx_mode,
2054 .ndo_select_queue = hns_nic_select_queue, 1959 .ndo_select_queue = hns_nic_select_queue,
2055}; 1960};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 08f3c4743f74..774beda040a1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
243 } 243 }
244 244
245 if (h->dev->ops->adjust_link) { 245 if (h->dev->ops->adjust_link) {
246 netif_carrier_off(net_dev);
246 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex); 247 h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
248 netif_carrier_on(net_dev);
247 return 0; 249 return 0;
248 } 250 }
249 251
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 3554dca7a680..955c4ab18b03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2019 struct hns3_desc_cb *desc_cb) 2019 struct hns3_desc_cb *desc_cb)
2020{ 2020{
2021 struct hns3_desc *desc; 2021 struct hns3_desc *desc;
2022 int truesize, size; 2022 u32 truesize;
2023 int size;
2023 int last_offset; 2024 int last_offset;
2024 bool twobufs; 2025 bool twobufs;
2025 2026
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index a02a96aee2a2..cb450d7ec8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
284 284
285 /* priv data for the desc, e.g. skb when use with ip stack*/ 285 /* priv data for the desc, e.g. skb when use with ip stack*/
286 void *priv; 286 void *priv;
287 u16 page_offset; 287 u32 page_offset;
288 u16 reuse_flag;
289
290 u32 length; /* length of the buffer */ 288 u32 length; /* length of the buffer */
291 289
290 u16 reuse_flag;
291
292 /* desc type, used by the ring user to mark the type of the priv data */ 292 /* desc type, used by the ring user to mark the type of the priv data */
293 u16 type; 293 u16 type;
294}; 294};
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index c8c7ad2eff77..9b5a68b65432 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2634 /* Wait for link to drop */ 2634 /* Wait for link to drop */
2635 time = jiffies + (HZ / 10); 2635 time = jiffies + (HZ / 10);
2636 do { 2636 do {
2637 if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) 2637 if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
2638 break; 2638 break;
2639 if (!in_interrupt()) 2639 if (!in_interrupt())
2640 schedule_timeout_interruptible(1); 2640 schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..4a8f82938ed5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
789 stats->tx_errors = nic_tx_stats->tx_dropped; 789 stats->tx_errors = nic_tx_stats->tx_dropped;
790} 790}
791 791
792#ifdef CONFIG_NET_POLL_CONTROLLER
793static void hinic_netpoll(struct net_device *netdev)
794{
795 struct hinic_dev *nic_dev = netdev_priv(netdev);
796 int i, num_qps;
797
798 num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
799 for (i = 0; i < num_qps; i++) {
800 struct hinic_txq *txq = &nic_dev->txqs[i];
801 struct hinic_rxq *rxq = &nic_dev->rxqs[i];
802
803 napi_schedule(&txq->napi);
804 napi_schedule(&rxq->napi);
805 }
806}
807#endif
808
809static const struct net_device_ops hinic_netdev_ops = { 792static const struct net_device_ops hinic_netdev_ops = {
810 .ndo_open = hinic_open, 793 .ndo_open = hinic_open,
811 .ndo_stop = hinic_close, 794 .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
818 .ndo_start_xmit = hinic_xmit_frame, 801 .ndo_start_xmit = hinic_xmit_frame,
819 .ndo_tx_timeout = hinic_tx_timeout, 802 .ndo_tx_timeout = hinic_tx_timeout,
820 .ndo_get_stats64 = hinic_get_stats64, 803 .ndo_get_stats64 = hinic_get_stats64,
821#ifdef CONFIG_NET_POLL_CONTROLLER
822 .ndo_poll_controller = hinic_netpoll,
823#endif
824}; 804};
825 805
826static void netdev_features_init(struct net_device *netdev) 806static void netdev_features_init(struct net_device *netdev)
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index dc983450354b..35f6291a3672 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
64#define RX_AREA_END 0x0fc00 64#define RX_AREA_END 0x0fc00
65 65
66static int ether1_open(struct net_device *dev); 66static int ether1_open(struct net_device *dev);
67static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); 67static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
68 struct net_device *dev);
68static irqreturn_t ether1_interrupt(int irq, void *dev_id); 69static irqreturn_t ether1_interrupt(int irq, void *dev_id);
69static int ether1_close(struct net_device *dev); 70static int ether1_close(struct net_device *dev);
70static void ether1_setmulticastlist(struct net_device *dev); 71static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
667 netif_wake_queue(dev); 668 netif_wake_queue(dev);
668} 669}
669 670
670static int 671static netdev_tx_t
671ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) 672ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
672{ 673{
673 int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; 674 int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f00a1dc2128c..2f7ae118217f 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -347,7 +347,7 @@ static const char init_setup[] =
347 0x7f /* *multi IA */ }; 347 0x7f /* *multi IA */ };
348 348
349static int i596_open(struct net_device *dev); 349static int i596_open(struct net_device *dev);
350static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 350static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351static irqreturn_t i596_interrupt(int irq, void *dev_id); 351static irqreturn_t i596_interrupt(int irq, void *dev_id);
352static int i596_close(struct net_device *dev); 352static int i596_close(struct net_device *dev);
353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
966} 966}
967 967
968 968
969static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 969static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
970{ 970{
971 struct i596_private *lp = netdev_priv(dev); 971 struct i596_private *lp = netdev_priv(dev);
972 struct tx_cmd *tx_cmd; 972 struct tx_cmd *tx_cmd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 8bb15a8c2a40..1a86184d44c0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
121static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); 121static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
122static int sun3_82586_open(struct net_device *dev); 122static int sun3_82586_open(struct net_device *dev);
123static int sun3_82586_close(struct net_device *dev); 123static int sun3_82586_close(struct net_device *dev);
124static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); 124static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
125 struct net_device *);
125static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); 126static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
126static void set_multicast_list(struct net_device *dev); 127static void set_multicast_list(struct net_device *dev);
127static void sun3_82586_timeout(struct net_device *dev); 128static void sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
1002 * send frame 1003 * send frame
1003 */ 1004 */
1004 1005
1005static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) 1006static netdev_tx_t
1007sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1006{ 1008{
1007 int len,i; 1009 int len,i;
1008#ifndef NO_NOPCOMMANDS 1010#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ba580bfae512..03f64f40b2a3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
921 return rx; 921 return rx;
922} 922}
923 923
924#ifdef CONFIG_NET_POLL_CONTROLLER
925static void ehea_netpoll(struct net_device *dev)
926{
927 struct ehea_port *port = netdev_priv(dev);
928 int i;
929
930 for (i = 0; i < port->num_def_qps; i++)
931 napi_schedule(&port->port_res[i].napi);
932}
933#endif
934
935static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 924static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
936{ 925{
937 struct ehea_port_res *pr = param; 926 struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
2953 .ndo_open = ehea_open, 2942 .ndo_open = ehea_open,
2954 .ndo_stop = ehea_stop, 2943 .ndo_stop = ehea_stop,
2955 .ndo_start_xmit = ehea_start_xmit, 2944 .ndo_start_xmit = ehea_start_xmit,
2956#ifdef CONFIG_NET_POLL_CONTROLLER
2957 .ndo_poll_controller = ehea_netpoll,
2958#endif
2959 .ndo_get_stats64 = ehea_get_stats64, 2945 .ndo_get_stats64 = ehea_get_stats64,
2960 .ndo_set_mac_address = ehea_set_mac_addr, 2946 .ndo_set_mac_address = ehea_set_mac_addr,
2961 .ndo_validate_addr = eth_validate_addr, 2947 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 354c0982847b..129f4e9f38da 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
494 case 16384: 494 case 16384:
495 ret |= EMAC_MR1_RFS_16K; 495 ret |= EMAC_MR1_RFS_16K;
496 break; 496 break;
497 case 8192:
498 ret |= EMAC4_MR1_RFS_8K;
499 break;
500 case 4096: 497 case 4096:
501 ret |= EMAC_MR1_RFS_4K; 498 ret |= EMAC_MR1_RFS_4K;
502 break; 499 break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
537 case 16384: 534 case 16384:
538 ret |= EMAC4_MR1_RFS_16K; 535 ret |= EMAC4_MR1_RFS_16K;
539 break; 536 break;
537 case 8192:
538 ret |= EMAC4_MR1_RFS_8K;
539 break;
540 case 4096: 540 case 4096:
541 ret |= EMAC4_MR1_RFS_4K; 541 ret |= EMAC4_MR1_RFS_4K;
542 break; 542 break;
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
2677 if (of_phy_is_fixed_link(np)) { 2677 if (of_phy_is_fixed_link(np)) {
2678 int res = emac_dt_mdio_probe(dev); 2678 int res = emac_dt_mdio_probe(dev);
2679 2679
2680 if (!res) { 2680 if (res)
2681 res = of_phy_register_fixed_link(np); 2681 return res;
2682 if (res) 2682
2683 mdiobus_unregister(dev->mii_bus); 2683 res = of_phy_register_fixed_link(np);
2684 dev->phy_dev = of_phy_find_device(np);
2685 if (res || !dev->phy_dev) {
2686 mdiobus_unregister(dev->mii_bus);
2687 return res ? res : -EINVAL;
2684 } 2688 }
2685 return res; 2689 emac_adjust_link(dev->ndev);
2690 put_device(&dev->phy_dev->mdio.dev);
2686 } 2691 }
2687 return 0; 2692 return 0;
2688 } 2693 }
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index dafdd4ade705..699ef942b615 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1823 adapter->map_id = 1; 1823 adapter->map_id = 1;
1824 release_rx_pools(adapter); 1824 release_rx_pools(adapter);
1825 release_tx_pools(adapter); 1825 release_tx_pools(adapter);
1826 init_rx_pools(netdev); 1826 rc = init_rx_pools(netdev);
1827 init_tx_pools(netdev); 1827 if (rc)
1828 return rc;
1829 rc = init_tx_pools(netdev);
1830 if (rc)
1831 return rc;
1828 1832
1829 release_napi(adapter); 1833 release_napi(adapter);
1830 init_napi(adapter); 1834 rc = init_napi(adapter);
1835 if (rc)
1836 return rc;
1831 } else { 1837 } else {
1832 rc = reset_tx_pools(adapter); 1838 rc = reset_tx_pools(adapter);
1833 if (rc) 1839 if (rc)
@@ -2201,19 +2207,6 @@ restart_poll:
2201 return frames_processed; 2207 return frames_processed;
2202} 2208}
2203 2209
2204#ifdef CONFIG_NET_POLL_CONTROLLER
2205static void ibmvnic_netpoll_controller(struct net_device *dev)
2206{
2207 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2208 int i;
2209
2210 replenish_pools(netdev_priv(dev));
2211 for (i = 0; i < adapter->req_rx_queues; i++)
2212 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2213 adapter->rx_scrq[i]);
2214}
2215#endif
2216
2217static int wait_for_reset(struct ibmvnic_adapter *adapter) 2210static int wait_for_reset(struct ibmvnic_adapter *adapter)
2218{ 2211{
2219 int rc, ret; 2212 int rc, ret;
@@ -2286,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
2286 .ndo_set_mac_address = ibmvnic_set_mac, 2279 .ndo_set_mac_address = ibmvnic_set_mac,
2287 .ndo_validate_addr = eth_validate_addr, 2280 .ndo_validate_addr = eth_validate_addr,
2288 .ndo_tx_timeout = ibmvnic_tx_timeout, 2281 .ndo_tx_timeout = ibmvnic_tx_timeout,
2289#ifdef CONFIG_NET_POLL_CONTROLLER
2290 .ndo_poll_controller = ibmvnic_netpoll_controller,
2291#endif
2292 .ndo_change_mtu = ibmvnic_change_mtu, 2282 .ndo_change_mtu = ibmvnic_change_mtu,
2293 .ndo_features_check = ibmvnic_features_check, 2283 .ndo_features_check = ibmvnic_features_check,
2294}; 2284};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index bdb3f8e65ed4..2569a168334c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
624 adapter->tx_ring = tx_old; 624 adapter->tx_ring = tx_old;
625 e1000_free_all_rx_resources(adapter); 625 e1000_free_all_rx_resources(adapter);
626 e1000_free_all_tx_resources(adapter); 626 e1000_free_all_tx_resources(adapter);
627 kfree(tx_old);
628 kfree(rx_old);
629 adapter->rx_ring = rxdr; 627 adapter->rx_ring = rxdr;
630 adapter->tx_ring = txdr; 628 adapter->tx_ring = txdr;
631 err = e1000_up(adapter); 629 err = e1000_up(adapter);
632 if (err) 630 if (err)
633 goto err_setup; 631 goto err_setup;
634 } 632 }
633 kfree(tx_old);
634 kfree(rx_old);
635 635
636 clear_bit(__E1000_RESETTING, &adapter->flags); 636 clear_bit(__E1000_RESETTING, &adapter->flags);
637 return 0; 637 return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
644err_alloc_rx: 644err_alloc_rx:
645 kfree(txdr); 645 kfree(txdr);
646err_alloc_tx: 646err_alloc_tx:
647 e1000_up(adapter); 647 if (netif_running(adapter->netdev))
648 e1000_up(adapter);
648err_setup: 649err_setup:
649 clear_bit(__E1000_RESETTING, &adapter->flags); 650 clear_bit(__E1000_RESETTING, &adapter->flags);
650 return err; 651 return err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a903a0ba45e1..7d42582ed48d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
504void fm10k_service_event_schedule(struct fm10k_intfc *interface); 504void fm10k_service_event_schedule(struct fm10k_intfc *interface);
505void fm10k_macvlan_schedule(struct fm10k_intfc *interface); 505void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
506void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); 506void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
507#ifdef CONFIG_NET_POLL_CONTROLLER
508void fm10k_netpoll(struct net_device *netdev);
509#endif
510 507
511/* Netdev */ 508/* Netdev */
512struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); 509struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 929f538d28bc..538a8467f434 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
1648 .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, 1648 .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
1649 .ndo_dfwd_add_station = fm10k_dfwd_add_station, 1649 .ndo_dfwd_add_station = fm10k_dfwd_add_station,
1650 .ndo_dfwd_del_station = fm10k_dfwd_del_station, 1650 .ndo_dfwd_del_station = fm10k_dfwd_del_station,
1651#ifdef CONFIG_NET_POLL_CONTROLLER
1652 .ndo_poll_controller = fm10k_netpoll,
1653#endif
1654 .ndo_features_check = fm10k_features_check, 1651 .ndo_features_check = fm10k_features_check,
1655}; 1652};
1656 1653
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 15071e4adb98..c859ababeed5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
1210 return IRQ_HANDLED; 1210 return IRQ_HANDLED;
1211} 1211}
1212 1212
1213#ifdef CONFIG_NET_POLL_CONTROLLER
1214/**
1215 * fm10k_netpoll - A Polling 'interrupt' handler
1216 * @netdev: network interface device structure
1217 *
1218 * This is used by netconsole to send skbs without having to re-enable
1219 * interrupts. It's not called while the normal interrupt routine is executing.
1220 **/
1221void fm10k_netpoll(struct net_device *netdev)
1222{
1223 struct fm10k_intfc *interface = netdev_priv(netdev);
1224 int i;
1225
1226 /* if interface is down do nothing */
1227 if (test_bit(__FM10K_DOWN, interface->state))
1228 return;
1229
1230 for (i = 0; i < interface->num_q_vectors; i++)
1231 fm10k_msix_clean_rings(0, interface->q_vector[i]);
1232}
1233
1234#endif
1235#define FM10K_ERR_MSG(type) case (type): error = #type; break 1213#define FM10K_ERR_MSG(type) case (type): error = #type; break
1236static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, 1214static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
1237 struct fm10k_fault *fault) 1215 struct fm10k_fault *fault)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index abcd096ede14..5ff6caa83948 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) 2013 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i); 2014 i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
2015 2015
2016 WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, 2016 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
2017 "stat strings count mismatch!"); 2017 "stat strings count mismatch!");
2018} 2018}
2019 2019
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f2c622e78802..ac685ad4d877 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5122 u8 *bw_share) 5122 u8 *bw_share)
5123{ 5123{
5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data; 5124 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5125 struct i40e_pf *pf = vsi->back;
5125 i40e_status ret; 5126 i40e_status ret;
5126 int i; 5127 int i;
5127 5128
5128 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) 5129 /* There is no need to reset BW when mqprio mode is on. */
5130 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5129 return 0; 5131 return 0;
5130 if (!vsi->mqprio_qopt.qopt.hw) { 5132 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5131 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); 5133 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5132 if (ret) 5134 if (ret)
5133 dev_info(&vsi->back->pdev->dev, 5135 dev_info(&pf->pdev->dev,
5134 "Failed to reset tx rate for vsi->seid %u\n", 5136 "Failed to reset tx rate for vsi->seid %u\n",
5135 vsi->seid); 5137 vsi->seid);
5136 return ret; 5138 return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5139 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) 5141 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5140 bw_data.tc_bw_credits[i] = bw_share[i]; 5142 bw_data.tc_bw_credits[i] = bw_share[i];
5141 5143
5142 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data, 5144 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5143 NULL);
5144 if (ret) { 5145 if (ret) {
5145 dev_info(&vsi->back->pdev->dev, 5146 dev_info(&pf->pdev->dev,
5146 "AQ command Config VSI BW allocation per TC failed = %d\n", 5147 "AQ command Config VSI BW allocation per TC failed = %d\n",
5147 vsi->back->hw.aq.asq_last_status); 5148 pf->hw.aq.asq_last_status);
5148 return -EINVAL; 5149 return -EINVAL;
5149 } 5150 }
5150 5151
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5906c1c1d19d..fef6d892ed4c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
396 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; 396 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
397} 397}
398 398
399#ifdef CONFIG_NET_POLL_CONTROLLER
400/**
401 * i40evf_netpoll - A Polling 'interrupt' handler
402 * @netdev: network interface device structure
403 *
404 * This is used by netconsole to send skbs without having to re-enable
405 * interrupts. It's not called while the normal interrupt routine is executing.
406 **/
407static void i40evf_netpoll(struct net_device *netdev)
408{
409 struct i40evf_adapter *adapter = netdev_priv(netdev);
410 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
411 int i;
412
413 /* if interface is down do nothing */
414 if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
415 return;
416
417 for (i = 0; i < q_vectors; i++)
418 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
419}
420
421#endif
422/** 399/**
423 * i40evf_irq_affinity_notify - Callback for affinity changes 400 * i40evf_irq_affinity_notify - Callback for affinity changes
424 * @notify: context as to what irq was changed 401 * @notify: context as to what irq was changed
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
3229 .ndo_features_check = i40evf_features_check, 3206 .ndo_features_check = i40evf_features_check,
3230 .ndo_fix_features = i40evf_fix_features, 3207 .ndo_fix_features = i40evf_fix_features,
3231 .ndo_set_features = i40evf_set_features, 3208 .ndo_set_features = i40evf_set_features,
3232#ifdef CONFIG_NET_POLL_CONTROLLER
3233 .ndo_poll_controller = i40evf_netpoll,
3234#endif
3235 .ndo_setup_tc = i40evf_setup_tc, 3209 .ndo_setup_tc = i40evf_setup_tc,
3236}; 3210};
3237 3211
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d8b5fff581e7..868f4a1d0f72 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
89#define ice_for_each_rxq(vsi, i) \ 89#define ice_for_each_rxq(vsi, i) \
90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) 90 for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
91 91
92/* Macros for each allocated tx/rx ring whether used or not in a VSI */
93#define ice_for_each_alloc_txq(vsi, i) \
94 for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
95
96#define ice_for_each_alloc_rxq(vsi, i) \
97 for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
98
92struct ice_tc_info { 99struct ice_tc_info {
93 u16 qoffset; 100 u16 qoffset;
94 u16 qcount; 101 u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
189 struct list_head tmp_sync_list; /* MAC filters to be synced */ 196 struct list_head tmp_sync_list; /* MAC filters to be synced */
190 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */ 197 struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
191 198
192 bool irqs_ready; 199 u8 irqs_ready;
193 bool current_isup; /* Sync 'link up' logging */ 200 u8 current_isup; /* Sync 'link up' logging */
194 bool stat_offsets_loaded; 201 u8 stat_offsets_loaded;
195 202
196 /* queue information */ 203 /* queue information */
197 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ 204 u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
262 struct ice_hw_port_stats stats; 269 struct ice_hw_port_stats stats;
263 struct ice_hw_port_stats stats_prev; 270 struct ice_hw_port_stats stats_prev;
264 struct ice_hw hw; 271 struct ice_hw hw;
265 bool stat_prev_loaded; /* has previous stats been loaded */ 272 u8 stat_prev_loaded; /* has previous stats been loaded */
266 char int_name[ICE_INT_NAME_STR_LEN]; 273 char int_name[ICE_INT_NAME_STR_LEN];
267}; 274};
268 275
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7541ec2270b3..a0614f472658 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
329 /* VLAN section */ 329 /* VLAN section */
330 __le16 pvid; /* VLANS include priority bits */ 330 __le16 pvid; /* VLANS include priority bits */
331 u8 pvlan_reserved[2]; 331 u8 pvlan_reserved[2];
332 u8 port_vlan_flags; 332 u8 vlan_flags;
333#define ICE_AQ_VSI_PVLAN_MODE_S 0 333#define ICE_AQ_VSI_VLAN_MODE_S 0
334#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S) 334#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
335#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1 335#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
336#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2 336#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
337#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3 337#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) 338#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
339#define ICE_AQ_VSI_PVLAN_EMOD_S 3 339#define ICE_AQ_VSI_VLAN_EMOD_S 3
340#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 340#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
341#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S) 341#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
342#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S) 342#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
343#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S) 343#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
344#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S) 344#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
345 u8 pvlan_reserved2[3]; 345 u8 pvlan_reserved2[3];
346 /* ingress egress up sections */ 346 /* ingress egress up sections */
347 __le32 ingress_table; /* bitmap, 3 bits per up */ 347 __le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) 594#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22 595#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) 596#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
597#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
597 598
598 /* Action = 7 - Set Stat count */ 599 /* Action = 7 - Set Stat count */
599#define ICE_LG_ACT_STAT_COUNT 0x7 600#define ICE_LG_ACT_STAT_COUNT 0x7
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 71d032cc5fa7..661beea6af79 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
45/** 45/**
46 * ice_clear_pf_cfg - Clear PF configuration 46 * ice_clear_pf_cfg - Clear PF configuration
47 * @hw: pointer to the hardware structure 47 * @hw: pointer to the hardware structure
48 *
49 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
50 * configuration, flow director filters, etc.).
48 */ 51 */
49enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 52enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
50{ 53{
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
1483 struct ice_phy_info *phy_info; 1486 struct ice_phy_info *phy_info;
1484 enum ice_status status = 0; 1487 enum ice_status status = 0;
1485 1488
1486 if (!pi) 1489 if (!pi || !link_up)
1487 return ICE_ERR_PARAM; 1490 return ICE_ERR_PARAM;
1488 1491
1489 phy_info = &pi->phy; 1492 phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
1619 } 1622 }
1620 1623
1621 /* LUT size is only valid for Global and PF table types */ 1624 /* LUT size is only valid for Global and PF table types */
1622 if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) { 1625 switch (lut_size) {
1623 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 1626 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
1624 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1627 break;
1625 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1628 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
1626 } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
1627 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 1629 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
1628 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1630 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1629 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1631 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1630 } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) && 1632 break;
1631 (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) { 1633 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
1632 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 1634 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
1633 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 1635 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
1634 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 1636 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
1635 } else { 1637 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
1638 break;
1639 }
1640 /* fall-through */
1641 default:
1636 status = ICE_ERR_PARAM; 1642 status = ICE_ERR_PARAM;
1637 goto ice_aq_get_set_rss_lut_exit; 1643 goto ice_aq_get_set_rss_lut_exit;
1638 } 1644 }
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 7c511f144ed6..62be72fdc8f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
597 return 0; 597 return 0;
598 598
599init_ctrlq_free_rq: 599init_ctrlq_free_rq:
600 ice_shutdown_rq(hw, cq); 600 if (cq->rq.head) {
601 ice_shutdown_sq(hw, cq); 601 ice_shutdown_rq(hw, cq);
602 mutex_destroy(&cq->sq_lock); 602 mutex_destroy(&cq->rq_lock);
603 mutex_destroy(&cq->rq_lock); 603 }
604 if (cq->sq.head) {
605 ice_shutdown_sq(hw, cq);
606 mutex_destroy(&cq->sq_lock);
607 }
604 return status; 608 return status;
605} 609}
606 610
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
706 return; 710 return;
707 } 711 }
708 712
709 ice_shutdown_sq(hw, cq); 713 if (cq->sq.head) {
710 ice_shutdown_rq(hw, cq); 714 ice_shutdown_sq(hw, cq);
711 mutex_destroy(&cq->sq_lock); 715 mutex_destroy(&cq->sq_lock);
712 mutex_destroy(&cq->rq_lock); 716 }
717 if (cq->rq.head) {
718 ice_shutdown_rq(hw, cq);
719 mutex_destroy(&cq->rq_lock);
720 }
713} 721}
714 722
715/** 723/**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1057 1065
1058clean_rq_elem_out: 1066clean_rq_elem_out:
1059 /* Set pending if needed, unlock and return */ 1067 /* Set pending if needed, unlock and return */
1060 if (pending) 1068 if (pending) {
1069 /* re-read HW head to calculate actual pending messages */
1070 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1061 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); 1071 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1072 }
1062clean_rq_elem_err: 1073clean_rq_elem_err:
1063 mutex_unlock(&cq->rq_lock); 1074 mutex_unlock(&cq->rq_lock);
1064 1075
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 1db304c01d10..c71a9b528d6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
26{ 26{
27 struct ice_netdev_priv *np = netdev_priv(netdev); 27 struct ice_netdev_priv *np = netdev_priv(netdev);
28 28
29 return ((np->vsi->num_txq + np->vsi->num_rxq) * 29 return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30 (sizeof(struct ice_q_stats) / sizeof(u64))); 30 (sizeof(struct ice_q_stats) / sizeof(u64)));
31} 31}
32 32
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
218 p += ETH_GSTRING_LEN; 218 p += ETH_GSTRING_LEN;
219 } 219 }
220 220
221 ice_for_each_txq(vsi, i) { 221 ice_for_each_alloc_txq(vsi, i) {
222 snprintf(p, ETH_GSTRING_LEN, 222 snprintf(p, ETH_GSTRING_LEN,
223 "tx-queue-%u.tx_packets", i); 223 "tx-queue-%u.tx_packets", i);
224 p += ETH_GSTRING_LEN; 224 p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
226 p += ETH_GSTRING_LEN; 226 p += ETH_GSTRING_LEN;
227 } 227 }
228 228
229 ice_for_each_rxq(vsi, i) { 229 ice_for_each_alloc_rxq(vsi, i) {
230 snprintf(p, ETH_GSTRING_LEN, 230 snprintf(p, ETH_GSTRING_LEN,
231 "rx-queue-%u.rx_packets", i); 231 "rx-queue-%u.rx_packets", i);
232 p += ETH_GSTRING_LEN; 232 p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
253{ 253{
254 switch (sset) { 254 switch (sset) {
255 case ETH_SS_STATS: 255 case ETH_SS_STATS:
256 /* The number (and order) of strings reported *must* remain
257 * constant for a given netdevice. This function must not
258 * report a different number based on run time parameters
259 * (such as the number of queues in use, or the setting of
260 * a private ethtool flag). This is due to the nature of the
261 * ethtool stats API.
262 *
263 * User space programs such as ethtool must make 3 separate
264 * ioctl requests, one for size, one for the strings, and
265 * finally one for the stats. Since these cross into
266 * user space, changes to the number or size could result in
267 * undefined memory access or incorrect string<->value
268 * correlations for statistics.
269 *
270 * Even if it appears to be safe, changes to the size or
271 * order of strings will suffer from race conditions and are
272 * not safe.
273 */
256 return ICE_ALL_STATS_LEN(netdev); 274 return ICE_ALL_STATS_LEN(netdev);
257 default: 275 default:
258 return -EOPNOTSUPP; 276 return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
280 /* populate per queue stats */ 298 /* populate per queue stats */
281 rcu_read_lock(); 299 rcu_read_lock();
282 300
283 ice_for_each_txq(vsi, j) { 301 ice_for_each_alloc_txq(vsi, j) {
284 ring = READ_ONCE(vsi->tx_rings[j]); 302 ring = READ_ONCE(vsi->tx_rings[j]);
285 if (!ring) 303 if (ring) {
286 continue; 304 data[i++] = ring->stats.pkts;
287 data[i++] = ring->stats.pkts; 305 data[i++] = ring->stats.bytes;
288 data[i++] = ring->stats.bytes; 306 } else {
307 data[i++] = 0;
308 data[i++] = 0;
309 }
289 } 310 }
290 311
291 ice_for_each_rxq(vsi, j) { 312 ice_for_each_alloc_rxq(vsi, j) {
292 ring = READ_ONCE(vsi->rx_rings[j]); 313 ring = READ_ONCE(vsi->rx_rings[j]);
293 data[i++] = ring->stats.pkts; 314 if (ring) {
294 data[i++] = ring->stats.bytes; 315 data[i++] = ring->stats.pkts;
316 data[i++] = ring->stats.bytes;
317 } else {
318 data[i++] = 0;
319 data[i++] = 0;
320 }
295 } 321 }
296 322
297 rcu_read_unlock(); 323 rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
519 goto done; 545 goto done;
520 } 546 }
521 547
522 for (i = 0; i < vsi->num_txq; i++) { 548 for (i = 0; i < vsi->alloc_txq; i++) {
523 /* clone ring and setup updated count */ 549 /* clone ring and setup updated count */
524 tx_rings[i] = *vsi->tx_rings[i]; 550 tx_rings[i] = *vsi->tx_rings[i];
525 tx_rings[i].count = new_tx_cnt; 551 tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
551 goto done; 577 goto done;
552 } 578 }
553 579
554 for (i = 0; i < vsi->num_rxq; i++) { 580 for (i = 0; i < vsi->alloc_rxq; i++) {
555 /* clone ring and setup updated count */ 581 /* clone ring and setup updated count */
556 rx_rings[i] = *vsi->rx_rings[i]; 582 rx_rings[i] = *vsi->rx_rings[i];
557 rx_rings[i].count = new_rx_cnt; 583 rx_rings[i].count = new_rx_cnt;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 499904874b3f..6076fc87df9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -121,10 +121,6 @@
121#define PFINT_FW_CTL_CAUSE_ENA_S 30 121#define PFINT_FW_CTL_CAUSE_ENA_S 30
122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) 122#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
123#define PFINT_OICR 0x0016CA00 123#define PFINT_OICR 0x0016CA00
124#define PFINT_OICR_HLP_RDY_S 14
125#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
126#define PFINT_OICR_CPM_RDY_S 15
127#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
128#define PFINT_OICR_ECC_ERR_S 16 124#define PFINT_OICR_ECC_ERR_S 16
129#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) 125#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
130#define PFINT_OICR_MAL_DETECT_S 19 126#define PFINT_OICR_MAL_DETECT_S 19
@@ -133,10 +129,6 @@
133#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) 129#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
134#define PFINT_OICR_PCI_EXCEPTION_S 21 130#define PFINT_OICR_PCI_EXCEPTION_S 21
135#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) 131#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
136#define PFINT_OICR_GPIO_S 22
137#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
138#define PFINT_OICR_STORM_DETECT_S 24
139#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
140#define PFINT_OICR_HMC_ERR_S 26 132#define PFINT_OICR_HMC_ERR_S 26
141#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) 133#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
142#define PFINT_OICR_PE_CRITERR_S 28 134#define PFINT_OICR_PE_CRITERR_S 28
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d23a91665b46..068dbc740b76 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
265struct ice_rlan_ctx { 265struct ice_rlan_ctx {
266 u16 head; 266 u16 head;
267 u16 cpuid; /* bigger than needed, see above for reason */ 267 u16 cpuid; /* bigger than needed, see above for reason */
268#define ICE_RLAN_BASE_S 7
268 u64 base; 269 u64 base;
269 u16 qlen; 270 u16 qlen;
270#define ICE_RLAN_CTX_DBUF_S 7 271#define ICE_RLAN_CTX_DBUF_S 7
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5299caf55a7f..3f047bb43348 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
901 case ice_aqc_opc_get_link_status: 901 case ice_aqc_opc_get_link_status:
902 if (ice_handle_link_event(pf)) 902 if (ice_handle_link_event(pf))
903 dev_err(&pf->pdev->dev, 903 dev_err(&pf->pdev->dev,
904 "Could not handle link event"); 904 "Could not handle link event\n");
905 break; 905 break;
906 default: 906 default:
907 dev_dbg(&pf->pdev->dev, 907 dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
917} 917}
918 918
919/** 919/**
920 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
921 * @hw: pointer to hardware info
922 * @cq: control queue information
923 *
924 * returns true if there are pending messages in a queue, false if there aren't
925 */
926static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
927{
928 u16 ntu;
929
930 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
931 return cq->rq.next_to_clean != ntu;
932}
933
934/**
920 * ice_clean_adminq_subtask - clean the AdminQ rings 935 * ice_clean_adminq_subtask - clean the AdminQ rings
921 * @pf: board private structure 936 * @pf: board private structure
922 */ 937 */
923static void ice_clean_adminq_subtask(struct ice_pf *pf) 938static void ice_clean_adminq_subtask(struct ice_pf *pf)
924{ 939{
925 struct ice_hw *hw = &pf->hw; 940 struct ice_hw *hw = &pf->hw;
926 u32 val;
927 941
928 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) 942 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
929 return; 943 return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
933 947
934 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state); 948 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
935 949
936 /* re-enable Admin queue interrupt causes */ 950 /* There might be a situation where new messages arrive to a control
937 val = rd32(hw, PFINT_FW_CTL); 951 * queue between processing the last message and clearing the
938 wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M)); 952 * EVENT_PENDING bit. So before exiting, check queue head again (using
953 * ice_ctrlq_pending) and process new messages if any.
954 */
955 if (ice_ctrlq_pending(hw, &hw->adminq))
956 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
939 957
940 ice_flush(hw); 958 ice_flush(hw);
941} 959}
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1295 qcount = numq_tc; 1313 qcount = numq_tc;
1296 } 1314 }
1297 1315
1298 /* find higher power-of-2 of qcount */ 1316 /* find the (rounded up) power-of-2 of qcount */
1299 pow = ilog2(qcount); 1317 pow = order_base_2(qcount);
1300
1301 if (!is_power_of_2(qcount))
1302 pow++;
1303 1318
1304 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { 1319 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
1305 if (!(vsi->tc_cfg.ena_tc & BIT(i))) { 1320 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
1352 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; 1367 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
1353 /* Traffic from VSI can be sent to LAN */ 1368 /* Traffic from VSI can be sent to LAN */
1354 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; 1369 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
1355 /* Allow all packets untagged/tagged */ 1370
1356 ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL & 1371 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
1357 ICE_AQ_VSI_PVLAN_MODE_M) >> 1372 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
1358 ICE_AQ_VSI_PVLAN_MODE_S); 1373 * packets untagged/tagged.
1359 /* Show VLAN/UP from packets in Rx descriptors */ 1374 */
1360 ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH & 1375 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
1361 ICE_AQ_VSI_PVLAN_EMOD_M) >> 1376 ICE_AQ_VSI_VLAN_MODE_M) >>
1362 ICE_AQ_VSI_PVLAN_EMOD_S); 1377 ICE_AQ_VSI_VLAN_MODE_S);
1378
1363 /* Have 1:1 UP mapping for both ingress/egress tables */ 1379 /* Have 1:1 UP mapping for both ingress/egress tables */
1364 table |= ICE_UP_TABLE_TRANSLATE(0, 0); 1380 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
1365 table |= ICE_UP_TABLE_TRANSLATE(1, 1); 1381 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
1688 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */ 1704 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
1689 rd32(hw, PFINT_OICR); /* read to clear */ 1705 rd32(hw, PFINT_OICR); /* read to clear */
1690 1706
1691 val = (PFINT_OICR_HLP_RDY_M | 1707 val = (PFINT_OICR_ECC_ERR_M |
1692 PFINT_OICR_CPM_RDY_M |
1693 PFINT_OICR_ECC_ERR_M |
1694 PFINT_OICR_MAL_DETECT_M | 1708 PFINT_OICR_MAL_DETECT_M |
1695 PFINT_OICR_GRST_M | 1709 PFINT_OICR_GRST_M |
1696 PFINT_OICR_PCI_EXCEPTION_M | 1710 PFINT_OICR_PCI_EXCEPTION_M |
1697 PFINT_OICR_GPIO_M | 1711 PFINT_OICR_HMC_ERR_M |
1698 PFINT_OICR_STORM_DETECT_M | 1712 PFINT_OICR_PE_CRITERR_M);
1699 PFINT_OICR_HMC_ERR_M);
1700 1713
1701 wr32(hw, PFINT_OICR_ENA, val); 1714 wr32(hw, PFINT_OICR_ENA, val);
1702 1715
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
2058skip_req_irq: 2071skip_req_irq:
2059 ice_ena_misc_vector(pf); 2072 ice_ena_misc_vector(pf);
2060 2073
2061 val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | 2074 val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2062 (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) | 2075 PFINT_OICR_CTL_CAUSE_ENA_M);
2063 PFINT_OICR_CTL_CAUSE_ENA_M;
2064 wr32(hw, PFINT_OICR_CTL, val); 2076 wr32(hw, PFINT_OICR_CTL, val);
2065 2077
2066 /* This enables Admin queue Interrupt causes */ 2078 /* This enables Admin queue Interrupt causes */
2067 val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | 2079 val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2068 (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) | 2080 PFINT_FW_CTL_CAUSE_ENA_M);
2069 PFINT_FW_CTL_CAUSE_ENA_M;
2070 wr32(hw, PFINT_FW_CTL, val); 2081 wr32(hw, PFINT_FW_CTL, val);
2071 2082
2072 itr_gran = hw->itr_gran_200; 2083 itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3246 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) 3257 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
3247 ice_dis_msix(pf); 3258 ice_dis_msix(pf);
3248 3259
3249 devm_kfree(&pf->pdev->dev, pf->irq_tracker); 3260 if (pf->irq_tracker) {
3250 pf->irq_tracker = NULL; 3261 devm_kfree(&pf->pdev->dev, pf->irq_tracker);
3262 pf->irq_tracker = NULL;
3263 }
3251} 3264}
3252 3265
3253/** 3266/**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
3271 3284
3272 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev)); 3285 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
3273 if (err) { 3286 if (err) {
3274 dev_err(&pdev->dev, "I/O map error %d\n", err); 3287 dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
3275 return err; 3288 return err;
3276 } 3289 }
3277 3290
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3720 enum ice_status status; 3733 enum ice_status status;
3721 3734
3722 /* Here we are configuring the VSI to let the driver add VLAN tags by 3735 /* Here we are configuring the VSI to let the driver add VLAN tags by
3723 * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN 3736 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
3724 * tag insertion happens in the Tx hot path, in ice_tx_map. 3737 * insertion happens in the Tx hot path, in ice_tx_map.
3725 */ 3738 */
3726 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL; 3739 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
3727 3740
3728 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3741 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3729 ctxt.vsi_num = vsi->vsi_num; 3742 ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
3735 return -EIO; 3748 return -EIO;
3736 } 3749 }
3737 3750
3738 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3751 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3739 return 0; 3752 return 0;
3740} 3753}
3741 3754
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3757 */ 3770 */
3758 if (ena) { 3771 if (ena) {
3759 /* Strip VLAN tag from Rx packet and put it in the desc */ 3772 /* Strip VLAN tag from Rx packet and put it in the desc */
3760 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH; 3773 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
3761 } else { 3774 } else {
3762 /* Disable stripping. Leave tag in packet */ 3775 /* Disable stripping. Leave tag in packet */
3763 ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING; 3776 ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3764 } 3777 }
3765 3778
3779 /* Allow all packets untagged/tagged */
3780 ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
3781
3766 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); 3782 ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
3767 ctxt.vsi_num = vsi->vsi_num; 3783 ctxt.vsi_num = vsi->vsi_num;
3768 3784
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
3773 return -EIO; 3789 return -EIO;
3774 } 3790 }
3775 3791
3776 vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags; 3792 vsi->info.vlan_flags = ctxt.info.vlan_flags;
3777 return 0; 3793 return 0;
3778} 3794}
3779 3795
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
3986 /* clear the context structure first */ 4002 /* clear the context structure first */
3987 memset(&rlan_ctx, 0, sizeof(rlan_ctx)); 4003 memset(&rlan_ctx, 0, sizeof(rlan_ctx));
3988 4004
3989 rlan_ctx.base = ring->dma >> 7; 4005 rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
3990 4006
3991 rlan_ctx.qlen = ring->count; 4007 rlan_ctx.qlen = ring->count;
3992 4008
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
4098{ 4114{
4099 int err; 4115 int err;
4100 4116
4101 ice_set_rx_mode(vsi->netdev); 4117 if (vsi->netdev) {
4102 4118 ice_set_rx_mode(vsi->netdev);
4103 err = ice_restore_vlan(vsi); 4119 err = ice_restore_vlan(vsi);
4104 if (err) 4120 if (err)
4105 return err; 4121 return err;
4122 }
4106 4123
4107 err = ice_vsi_cfg_txqs(vsi); 4124 err = ice_vsi_cfg_txqs(vsi);
4108 if (!err) 4125 if (!err)
@@ -4789,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
4789 stats->rx_length_errors = vsi_stats->rx_length_errors; 4806 stats->rx_length_errors = vsi_stats->rx_length_errors;
4790} 4807}
4791 4808
4792#ifdef CONFIG_NET_POLL_CONTROLLER
4793/**
4794 * ice_netpoll - polling "interrupt" handler
4795 * @netdev: network interface device structure
4796 *
4797 * Used by netconsole to send skbs without having to re-enable interrupts.
4798 * This is not called in the normal interrupt path.
4799 */
4800static void ice_netpoll(struct net_device *netdev)
4801{
4802 struct ice_netdev_priv *np = netdev_priv(netdev);
4803 struct ice_vsi *vsi = np->vsi;
4804 struct ice_pf *pf = vsi->back;
4805 int i;
4806
4807 if (test_bit(__ICE_DOWN, vsi->state) ||
4808 !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4809 return;
4810
4811 for (i = 0; i < vsi->num_q_vectors; i++)
4812 ice_msix_clean_rings(0, vsi->q_vectors[i]);
4813}
4814#endif /* CONFIG_NET_POLL_CONTROLLER */
4815
4816/** 4809/**
4817 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4810 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4818 * @vsi: VSI having NAPI disabled 4811 * @vsi: VSI having NAPI disabled
@@ -4868,7 +4861,7 @@ int ice_down(struct ice_vsi *vsi)
4868 */ 4861 */
4869static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) 4862static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4870{ 4863{
4871 int i, err; 4864 int i, err = 0;
4872 4865
4873 if (!vsi->num_txq) { 4866 if (!vsi->num_txq) {
4874 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n", 4867 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4886,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
4893 */ 4886 */
4894static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) 4887static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
4895{ 4888{
4896 int i, err; 4889 int i, err = 0;
4897 4890
4898 if (!vsi->num_rxq) { 4891 if (!vsi->num_rxq) {
4899 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n", 4892 dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5228,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
5235 u8 count = 0; 5228 u8 count = 0;
5236 5229
5237 if (new_mtu == netdev->mtu) { 5230 if (new_mtu == netdev->mtu) {
5238 netdev_warn(netdev, "mtu is already %d\n", netdev->mtu); 5231 netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
5239 return 0; 5232 return 0;
5240 } 5233 }
5241 5234
@@ -5480,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
5480 .ndo_validate_addr = eth_validate_addr, 5473 .ndo_validate_addr = eth_validate_addr,
5481 .ndo_change_mtu = ice_change_mtu, 5474 .ndo_change_mtu = ice_change_mtu,
5482 .ndo_get_stats64 = ice_get_stats64, 5475 .ndo_get_stats64 = ice_get_stats64,
5483#ifdef CONFIG_NET_POLL_CONTROLLER
5484 .ndo_poll_controller = ice_netpoll,
5485#endif /* CONFIG_NET_POLL_CONTROLLER */
5486 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 5476 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
5487 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 5477 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
5488 .ndo_set_features = ice_set_features, 5478 .ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 92da0a626ce0..295a8cd87fc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
131 * 131 *
132 * This function will request NVM ownership. 132 * This function will request NVM ownership.
133 */ 133 */
134static enum 134static enum ice_status
135ice_status ice_acquire_nvm(struct ice_hw *hw, 135ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
136 enum ice_aq_res_access_type access)
137{ 136{
138 if (hw->nvm.blank_nvm_mode) 137 if (hw->nvm.blank_nvm_mode)
139 return 0; 138 return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2e6c1d92cc88..eeae199469b6 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
1576 return status; 1576 return status;
1577 } 1577 }
1578 1578
1579 if (owner == ICE_SCHED_NODE_OWNER_LAN) 1579 vsi->max_lanq[tc] = new_numqs;
1580 vsi->max_lanq[tc] = new_numqs;
1581 1580
1582 return status; 1581 return status;
1583} 1582}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 723d15f1e90b..6b7ec2ae5ad6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; 645 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act); 646 lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
647 647
648 act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M; 648 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
649 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
649 650
650 /* Third action Marker value */ 651 /* Third action Marker value */
651 act |= ICE_LG_ACT_GENERIC; 652 act |= ICE_LG_ACT_GENERIC;
652 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & 653 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
653 ICE_LG_ACT_GENERIC_VALUE_M; 654 ICE_LG_ACT_GENERIC_VALUE_M;
654 655
655 act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); 656 lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
657 657
658 /* call the fill switch rule to fill the lookup tx rx structure */ 658 /* call the fill switch rule to fill the lookup tx rx structure */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 6f4a0d159dbf..9b8ec128ee31 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
17 u16 vsis_unallocated; 17 u16 vsis_unallocated;
18 u16 flags; 18 u16 flags;
19 struct ice_aqc_vsi_props info; 19 struct ice_aqc_vsi_props info;
20 bool alloc_from_pool; 20 u8 alloc_from_pool;
21}; 21};
22 22
23enum ice_sw_fwd_act_type { 23enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
94 u8 qgrp_size; 94 u8 qgrp_size;
95 95
96 /* Rule creations populate these indicators basing on the switch type */ 96 /* Rule creations populate these indicators basing on the switch type */
97 bool lb_en; /* Indicate if packet can be looped back */ 97 u8 lb_en; /* Indicate if packet can be looped back */
98 bool lan_en; /* Indicate if packet can be forwarded to the uplink */ 98 u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
99}; 99};
100 100
101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ 101/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 567067b650c4..31bc998fe200 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -143,7 +143,7 @@ struct ice_ring {
143 u16 next_to_use; 143 u16 next_to_use;
144 u16 next_to_clean; 144 u16 next_to_clean;
145 145
146 bool ring_active; /* is ring online or not */ 146 u8 ring_active; /* is ring online or not */
147 147
148 /* stats structs */ 148 /* stats structs */
149 struct ice_q_stats stats; 149 struct ice_q_stats stats;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 99c8a9a71b5e..97c366e0ca59 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -83,7 +83,7 @@ struct ice_link_status {
83 u64 phy_type_low; 83 u64 phy_type_low;
84 u16 max_frame_size; 84 u16 max_frame_size;
85 u16 link_speed; 85 u16 link_speed;
86 bool lse_ena; /* Link Status Event notification */ 86 u8 lse_ena; /* Link Status Event notification */
87 u8 link_info; 87 u8 link_info;
88 u8 an_info; 88 u8 an_info;
89 u8 ext_info; 89 u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
101 struct ice_link_status link_info_old; 101 struct ice_link_status link_info_old;
102 u64 phy_type_low; 102 u64 phy_type_low;
103 enum ice_media_type media_type; 103 enum ice_media_type media_type;
104 bool get_link_info; 104 u8 get_link_info;
105}; 105};
106 106
107/* Common HW capabilities for SW use */ 107/* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
167 u32 oem_ver; /* OEM version info */ 167 u32 oem_ver; /* OEM version info */
168 u16 sr_words; /* Shadow RAM size in words */ 168 u16 sr_words; /* Shadow RAM size in words */
169 u16 ver; /* NVM package version */ 169 u16 ver; /* NVM package version */
170 bool blank_nvm_mode; /* is NVM empty (no FW present) */ 170 u8 blank_nvm_mode; /* is NVM empty (no FW present) */
171}; 171};
172 172
173/* Max number of port to queue branches w.r.t topology */ 173/* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
181 struct ice_aqc_txsched_elem_data info; 181 struct ice_aqc_txsched_elem_data info;
182 u32 agg_id; /* aggregator group id */ 182 u32 agg_id; /* aggregator group id */
183 u16 vsi_id; 183 u16 vsi_id;
184 bool in_use; /* suspended or in use */ 184 u8 in_use; /* suspended or in use */
185 u8 tx_sched_layer; /* Logical Layer (1-9) */ 185 u8 tx_sched_layer; /* Logical Layer (1-9) */
186 u8 num_children; 186 u8 num_children;
187 u8 tc_num; 187 u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
218struct ice_sched_tx_policy { 218struct ice_sched_tx_policy {
219 u16 max_num_vsis; 219 u16 max_num_vsis;
220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS]; 220 u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
221 bool rdma_ena; 221 u8 rdma_ena;
222}; 222};
223 223
224struct ice_port_info { 224struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
243 struct list_head agg_list; /* lists all aggregator */ 243 struct list_head agg_list; /* lists all aggregator */
244 u8 lport; 244 u8 lport;
245#define ICE_LPORT_MASK 0xff 245#define ICE_LPORT_MASK 0xff
246 bool is_vf; 246 u8 is_vf;
247}; 247};
248 248
249struct ice_switch_info { 249struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
287 u8 max_cgds; 287 u8 max_cgds;
288 u8 sw_entry_point_layer; 288 u8 sw_entry_point_layer;
289 289
290 bool evb_veb; /* true for VEB, false for VEPA */ 290 u8 evb_veb; /* true for VEB, false for VEPA */
291 struct ice_bus_info bus; 291 struct ice_bus_info bus;
292 struct ice_nvm_info nvm; 292 struct ice_nvm_info nvm;
293 struct ice_hw_dev_caps dev_caps; /* device capabilities */ 293 struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
318 u8 itr_gran_100; 318 u8 itr_gran_100;
319 u8 itr_gran_50; 319 u8 itr_gran_50;
320 u8 itr_gran_25; 320 u8 itr_gran_25;
321 bool ucast_shared; /* true if VSIs can share unicast addr */ 321 u8 ucast_shared; /* true if VSIs can share unicast addr */
322 322
323}; 323};
324 324
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index f92f7918112d..5acf3b743876 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1649 if (hw->phy.type == e1000_phy_m88) 1649 if (hw->phy.type == e1000_phy_m88)
1650 igb_phy_disable_receiver(adapter); 1650 igb_phy_disable_receiver(adapter);
1651 1651
1652 mdelay(500); 1652 msleep(500);
1653 return 0; 1653 return 0;
1654} 1654}
1655 1655
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d03c2f0d7592..0796cef96fa3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
205 .priority = 0 205 .priority = 0
206}; 206};
207#endif 207#endif
208#ifdef CONFIG_NET_POLL_CONTROLLER
209/* for netdump / net console */
210static void igb_netpoll(struct net_device *);
211#endif
212#ifdef CONFIG_PCI_IOV 208#ifdef CONFIG_PCI_IOV
213static unsigned int max_vfs; 209static unsigned int max_vfs;
214module_param(max_vfs, uint, 0); 210module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
2881 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2877 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2882 .ndo_set_vf_trust = igb_ndo_set_vf_trust, 2878 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2883 .ndo_get_vf_config = igb_ndo_get_vf_config, 2879 .ndo_get_vf_config = igb_ndo_get_vf_config,
2884#ifdef CONFIG_NET_POLL_CONTROLLER
2885 .ndo_poll_controller = igb_netpoll,
2886#endif
2887 .ndo_fix_features = igb_fix_features, 2880 .ndo_fix_features = igb_fix_features,
2888 .ndo_set_features = igb_set_features, 2881 .ndo_set_features = igb_set_features,
2889 .ndo_fdb_add = igb_ndo_fdb_add, 2882 .ndo_fdb_add = igb_ndo_fdb_add,
@@ -3873,7 +3866,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3873 3866
3874 adapter->mac_table = kcalloc(hw->mac.rar_entry_count, 3867 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
3875 sizeof(struct igb_mac_addr), 3868 sizeof(struct igb_mac_addr),
3876 GFP_ATOMIC); 3869 GFP_KERNEL);
3877 if (!adapter->mac_table) 3870 if (!adapter->mac_table)
3878 return -ENOMEM; 3871 return -ENOMEM;
3879 3872
@@ -3883,7 +3876,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3883 3876
3884 /* Setup and initialize a copy of the hw vlan table array */ 3877 /* Setup and initialize a copy of the hw vlan table array */
3885 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), 3878 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3886 GFP_ATOMIC); 3879 GFP_KERNEL);
3887 if (!adapter->shadow_vfta) 3880 if (!adapter->shadow_vfta)
3888 return -ENOMEM; 3881 return -ENOMEM;
3889 3882
@@ -5816,7 +5809,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5816 5809
5817 if (skb->ip_summed != CHECKSUM_PARTIAL) { 5810 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5818csum_failed: 5811csum_failed:
5819 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) 5812 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
5813 !tx_ring->launchtime_enable)
5820 return; 5814 return;
5821 goto no_csum; 5815 goto no_csum;
5822 } 5816 }
@@ -9052,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9052 return 0; 9046 return 0;
9053} 9047}
9054 9048
9055#ifdef CONFIG_NET_POLL_CONTROLLER
9056/* Polling 'interrupt' - used by things like netconsole to send skbs
9057 * without having to re-enable interrupts. It's not called while
9058 * the interrupt routine is executing.
9059 */
9060static void igb_netpoll(struct net_device *netdev)
9061{
9062 struct igb_adapter *adapter = netdev_priv(netdev);
9063 struct e1000_hw *hw = &adapter->hw;
9064 struct igb_q_vector *q_vector;
9065 int i;
9066
9067 for (i = 0; i < adapter->num_q_vectors; i++) {
9068 q_vector = adapter->q_vector[i];
9069 if (adapter->flags & IGB_FLAG_HAS_MSIX)
9070 wr32(E1000_EIMC, q_vector->eims_value);
9071 else
9072 igb_irq_disable(adapter);
9073 napi_schedule(&q_vector->napi);
9074 }
9075}
9076#endif /* CONFIG_NET_POLL_CONTROLLER */
9077
9078/** 9049/**
9079 * igb_io_error_detected - called when PCI error is detected 9050 * igb_io_error_detected - called when PCI error is detected
9080 * @pdev: Pointer to PCI device 9051 * @pdev: Pointer to PCI device
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 43664adf7a3c..7722153c4ac2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
81 __be16 proto, u16 vid); 81 __be16 proto, u16 vid);
82static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 82static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
83 83
84#ifdef CONFIG_NET_POLL_CONTROLLER
85/* for netdump / net console */
86static void ixgb_netpoll(struct net_device *dev);
87#endif
88
89static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 84static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
90 enum pci_channel_state state); 85 enum pci_channel_state state);
91static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); 86static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
348 .ndo_tx_timeout = ixgb_tx_timeout, 343 .ndo_tx_timeout = ixgb_tx_timeout,
349 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, 344 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
350 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, 345 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
351#ifdef CONFIG_NET_POLL_CONTROLLER
352 .ndo_poll_controller = ixgb_netpoll,
353#endif
354 .ndo_fix_features = ixgb_fix_features, 346 .ndo_fix_features = ixgb_fix_features,
355 .ndo_set_features = ixgb_set_features, 347 .ndo_set_features = ixgb_set_features,
356}; 348};
@@ -771,14 +763,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
771 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
772 rxdr->size = ALIGN(rxdr->size, 4096); 764 rxdr->size = ALIGN(rxdr->size, 4096);
773 765
774 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 766 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
775 GFP_KERNEL); 767 GFP_KERNEL);
776 768
777 if (!rxdr->desc) { 769 if (!rxdr->desc) {
778 vfree(rxdr->buffer_info); 770 vfree(rxdr->buffer_info);
779 return -ENOMEM; 771 return -ENOMEM;
780 } 772 }
781 memset(rxdr->desc, 0, rxdr->size);
782 773
783 rxdr->next_to_clean = 0; 774 rxdr->next_to_clean = 0;
784 rxdr->next_to_use = 0; 775 rxdr->next_to_use = 0;
@@ -2196,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2196 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 2187 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2197} 2188}
2198 2189
2199#ifdef CONFIG_NET_POLL_CONTROLLER
2200/*
2201 * Polling 'interrupt' - used by things like netconsole to send skbs
2202 * without having to re-enable interrupts. It's not called while
2203 * the interrupt routine is executing.
2204 */
2205
2206static void ixgb_netpoll(struct net_device *dev)
2207{
2208 struct ixgb_adapter *adapter = netdev_priv(dev);
2209
2210 disable_irq(adapter->pdev->irq);
2211 ixgb_intr(adapter->pdev->irq, dev);
2212 enable_irq(adapter->pdev->irq);
2213}
2214#endif
2215
2216/** 2190/**
2217 * ixgb_io_error_detected - called when PCI error is detected 2191 * ixgb_io_error_detected - called when PCI error is detected
2218 * @pdev: pointer to pci device with error 2192 * @pdev: pointer to pci device with error
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 94b3165ff543..ccd852ad62a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
192 } 192 }
193 193
194 /* alloc the udl from per cpu ddp pool */ 194 /* alloc the udl from per cpu ddp pool */
195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); 195 ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
196 if (!ddp->udl) { 196 if (!ddp->udl) {
197 e_err(drv, "failed allocated ddp context\n"); 197 e_err(drv, "failed allocated ddp context\n");
198 goto out_noddp_unmap; 198 goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
760 return 0; 760 return 0;
761 761
762 /* Extra buffer to be shared by all DDPs for HW work around */ 762 /* Extra buffer to be shared by all DDPs for HW work around */
763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); 763 buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
764 if (!buffer) 764 if (!buffer)
765 return -ENOMEM; 765 return -ENOMEM;
766 766
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 447098005490..6cdd58d9d461 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
3196 return budget; 3196 return budget;
3197 3197
3198 /* all work done, exit the polling mode */ 3198 /* all work done, exit the polling mode */
3199 napi_complete_done(napi, work_done); 3199 if (likely(napi_complete_done(napi, work_done))) {
3200 if (adapter->rx_itr_setting & 1) 3200 if (adapter->rx_itr_setting & 1)
3201 ixgbe_set_itr(q_vector); 3201 ixgbe_set_itr(q_vector);
3202 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 3202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3203 ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); 3203 ixgbe_irq_enable_queues(adapter,
3204 BIT_ULL(q_vector->v_idx));
3205 }
3204 3206
3205 return min(work_done, budget - 1); 3207 return min(work_done, budget - 1);
3206} 3208}
@@ -6201,7 +6203,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
6201 6203
6202 adapter->mac_table = kcalloc(hw->mac.num_rar_entries, 6204 adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
6203 sizeof(struct ixgbe_mac_addr), 6205 sizeof(struct ixgbe_mac_addr),
6204 GFP_ATOMIC); 6206 GFP_KERNEL);
6205 if (!adapter->mac_table) 6207 if (!adapter->mac_table)
6206 return -ENOMEM; 6208 return -ENOMEM;
6207 6209
@@ -6620,8 +6622,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
6620 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6622 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6621 6623
6622 if (adapter->xdp_prog) { 6624 if (adapter->xdp_prog) {
6623 e_warn(probe, "MTU cannot be changed while XDP program is loaded\n"); 6625 int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
6624 return -EPERM; 6626 VLAN_HLEN;
6627 int i;
6628
6629 for (i = 0; i < adapter->num_rx_queues; i++) {
6630 struct ixgbe_ring *ring = adapter->rx_ring[i];
6631
6632 if (new_frame_size > ixgbe_rx_bufsz(ring)) {
6633 e_warn(probe, "Requested MTU size is not supported with XDP\n");
6634 return -EINVAL;
6635 }
6636 }
6625 } 6637 }
6626 6638
6627 /* 6639 /*
@@ -8758,28 +8770,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8758 return err; 8770 return err;
8759} 8771}
8760 8772
8761#ifdef CONFIG_NET_POLL_CONTROLLER
8762/*
8763 * Polling 'interrupt' - used by things like netconsole to send skbs
8764 * without having to re-enable interrupts. It's not called while
8765 * the interrupt routine is executing.
8766 */
8767static void ixgbe_netpoll(struct net_device *netdev)
8768{
8769 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8770 int i;
8771
8772 /* if interface is down do nothing */
8773 if (test_bit(__IXGBE_DOWN, &adapter->state))
8774 return;
8775
8776 /* loop through and schedule all active queues */
8777 for (i = 0; i < adapter->num_q_vectors; i++)
8778 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8779}
8780
8781#endif
8782
8783static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, 8773static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8784 struct ixgbe_ring *ring) 8774 struct ixgbe_ring *ring)
8785{ 8775{
@@ -8983,6 +8973,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
8983 8973
8984#ifdef CONFIG_IXGBE_DCB 8974#ifdef CONFIG_IXGBE_DCB
8985 if (tc) { 8975 if (tc) {
8976 if (adapter->xdp_prog) {
8977 e_warn(probe, "DCB is not supported with XDP\n");
8978
8979 ixgbe_init_interrupt_scheme(adapter);
8980 if (netif_running(dev))
8981 ixgbe_open(dev);
8982 return -EINVAL;
8983 }
8984
8986 netdev_set_num_tc(dev, tc); 8985 netdev_set_num_tc(dev, tc);
8987 ixgbe_set_prio_tc_map(adapter); 8986 ixgbe_set_prio_tc_map(adapter);
8988 8987
@@ -9171,14 +9170,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
9171 struct tcf_exts *exts, u64 *action, u8 *queue) 9170 struct tcf_exts *exts, u64 *action, u8 *queue)
9172{ 9171{
9173 const struct tc_action *a; 9172 const struct tc_action *a;
9174 LIST_HEAD(actions); 9173 int i;
9175 9174
9176 if (!tcf_exts_has_actions(exts)) 9175 if (!tcf_exts_has_actions(exts))
9177 return -EINVAL; 9176 return -EINVAL;
9178 9177
9179 tcf_exts_to_list(exts, &actions); 9178 tcf_exts_for_each_action(i, a, exts) {
9180 list_for_each_entry(a, &actions, list) {
9181
9182 /* Drop action */ 9179 /* Drop action */
9183 if (is_tcf_gact_shot(a)) { 9180 if (is_tcf_gact_shot(a)) {
9184 *action = IXGBE_FDIR_DROP_QUEUE; 9181 *action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9933,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
9936 int tcs = adapter->hw_tcs ? : 1; 9933 int tcs = adapter->hw_tcs ? : 1;
9937 int pool, err; 9934 int pool, err;
9938 9935
9936 if (adapter->xdp_prog) {
9937 e_warn(probe, "L2FW offload is not supported with XDP\n");
9938 return ERR_PTR(-EINVAL);
9939 }
9940
9939 /* The hardware supported by ixgbe only filters on the destination MAC 9941 /* The hardware supported by ixgbe only filters on the destination MAC
9940 * address. In order to avoid issues we only support offloading modes 9942 * address. In order to avoid issues we only support offloading modes
9941 * where the hardware can actually provide the functionality. 9943 * where the hardware can actually provide the functionality.
@@ -10229,9 +10231,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
10229 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 10231 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10230 .ndo_get_stats64 = ixgbe_get_stats64, 10232 .ndo_get_stats64 = ixgbe_get_stats64,
10231 .ndo_setup_tc = __ixgbe_setup_tc, 10233 .ndo_setup_tc = __ixgbe_setup_tc,
10232#ifdef CONFIG_NET_POLL_CONTROLLER
10233 .ndo_poll_controller = ixgbe_netpoll,
10234#endif
10235#ifdef IXGBE_FCOE 10234#ifdef IXGBE_FCOE
10236 .ndo_select_queue = ixgbe_select_queue, 10235 .ndo_select_queue = ixgbe_select_queue,
10237 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 10236 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 6f59933cdff7..3c6f01c41b78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
53 struct ixgbe_hw *hw = &adapter->hw; 53 struct ixgbe_hw *hw = &adapter->hw;
54 int i; 54 int i;
55 55
56 if (adapter->xdp_prog) {
57 e_warn(probe, "SRIOV is not supported with XDP\n");
58 return -EINVAL;
59 }
60
56 /* Enable VMDq flag so device will be set in VM mode */ 61 /* Enable VMDq flag so device will be set in VM mode */
57 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | 62 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
58 IXGBE_FLAG_VMDQ_ENABLED; 63 IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
688static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 693static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
689{ 694{
690 struct ixgbe_hw *hw = &adapter->hw; 695 struct ixgbe_hw *hw = &adapter->hw;
696 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
691 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 697 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
698 u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
692 u8 num_tcs = adapter->hw_tcs; 699 u8 num_tcs = adapter->hw_tcs;
700 u32 reg_val;
701 u32 queue;
702 u32 word;
693 703
694 /* remove VLAN filters beloning to this VF */ 704 /* remove VLAN filters beloning to this VF */
695 ixgbe_clear_vf_vlans(adapter, vf); 705 ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
726 736
727 /* reset VF api back to unknown */ 737 /* reset VF api back to unknown */
728 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; 738 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
739
740 /* Restart each queue for given VF */
741 for (queue = 0; queue < q_per_pool; queue++) {
742 unsigned int reg_idx = (vf * q_per_pool) + queue;
743
744 reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
745
746 /* Re-enabling only configured queues */
747 if (reg_val) {
748 reg_val |= IXGBE_TXDCTL_ENABLE;
749 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
750 reg_val &= ~IXGBE_TXDCTL_ENABLE;
751 IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
752 }
753 }
754
755 /* Clear VF's mailbox memory */
756 for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
757 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
758
759 IXGBE_WRITE_FLUSH(hw);
729} 760}
730 761
731static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 762static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 44cfb2021145..41bcbb337e83 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2518,6 +2518,7 @@ enum {
2518/* Translated register #defines */ 2518/* Translated register #defines */
2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) 2519#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P)))
2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) 2520#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P)))
2521#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
2521#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) 2522#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
2522#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) 2523#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
2523 2524
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d86446d202d5..5a228582423b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4233 return 0; 4233 return 0;
4234} 4234}
4235 4235
4236#ifdef CONFIG_NET_POLL_CONTROLLER
4237/* Polling 'interrupt' - used by things like netconsole to send skbs
4238 * without having to re-enable interrupts. It's not called while
4239 * the interrupt routine is executing.
4240 */
4241static void ixgbevf_netpoll(struct net_device *netdev)
4242{
4243 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4244 int i;
4245
4246 /* if interface is down do nothing */
4247 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
4248 return;
4249 for (i = 0; i < adapter->num_rx_queues; i++)
4250 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
4251}
4252#endif /* CONFIG_NET_POLL_CONTROLLER */
4253
4254static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 4236static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4255{ 4237{
4256 struct net_device *netdev = pci_get_drvdata(pdev); 4238 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
4482 .ndo_tx_timeout = ixgbevf_tx_timeout, 4464 .ndo_tx_timeout = ixgbevf_tx_timeout,
4483 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 4465 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4484 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 4466 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4485#ifdef CONFIG_NET_POLL_CONTROLLER
4486 .ndo_poll_controller = ixgbevf_netpoll,
4487#endif
4488 .ndo_features_check = ixgbevf_features_check, 4467 .ndo_features_check = ixgbevf_features_check,
4489 .ndo_bpf = ixgbevf_xdp, 4468 .ndo_bpf = ixgbevf_xdp,
4490}; 4469};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7a637b51c7d2..e08301d833e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
274 struct ltq_etop_chan *ch = &priv->ch[i]; 274 struct ltq_etop_chan *ch = &priv->ch[i];
275 275
276 ch->idx = ch->dma.nr = i; 276 ch->idx = ch->dma.nr = i;
277 ch->dma.dev = &priv->pdev->dev;
277 278
278 if (IS_TX(i)) { 279 if (IS_TX(i)) {
279 ltq_dma_alloc_tx(&ch->dma); 280 ltq_dma_alloc_tx(&ch->dma);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc80a678abc3..b4ed7d394d07 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1890 if (!data || !(rx_desc->buf_phys_addr)) 1890 if (!data || !(rx_desc->buf_phys_addr))
1891 continue; 1891 continue;
1892 1892
1893 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1893 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1894 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1894 PAGE_SIZE, DMA_FROM_DEVICE);
1895 __free_page(data); 1895 __free_page(data);
1896 } 1896 }
1897} 1897}
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2008 skb_add_rx_frag(rxq->skb, frag_num, page, 2008 skb_add_rx_frag(rxq->skb, frag_num, page,
2009 frag_offset, frag_size, 2009 frag_offset, frag_size,
2010 PAGE_SIZE); 2010 PAGE_SIZE);
2011 dma_unmap_single(dev->dev.parent, phys_addr, 2011 dma_unmap_page(dev->dev.parent, phys_addr,
2012 PAGE_SIZE, DMA_FROM_DEVICE); 2012 PAGE_SIZE, DMA_FROM_DEVICE);
2013 rxq->left_size -= frag_size; 2013 rxq->left_size -= frag_size;
2014 } 2014 }
2015 } else { 2015 } else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2039 frag_offset, frag_size, 2039 frag_offset, frag_size,
2040 PAGE_SIZE); 2040 PAGE_SIZE);
2041 2041
2042 dma_unmap_single(dev->dev.parent, phys_addr, 2042 dma_unmap_page(dev->dev.parent, phys_addr,
2043 PAGE_SIZE, 2043 PAGE_SIZE, DMA_FROM_DEVICE);
2044 DMA_FROM_DEVICE);
2045 2044
2046 rxq->left_size -= frag_size; 2045 rxq->left_size -= frag_size;
2047 } 2046 }
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 32d785b616e1..38cc01beea79 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
58 */ 58 */
59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, 59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
60 const struct phylink_link_state *state); 60 const struct phylink_link_state *state);
61static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
61 63
62/* Queue modes */ 64/* Queue modes */
63#define MVPP2_QDIST_SINGLE_MODE 0 65#define MVPP2_QDIST_SINGLE_MODE 0
@@ -3053,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
3053 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 3055 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3054 } 3056 }
3055 3057
3056 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 3058 if (port->has_tx_irqs) {
3057 if (cause_tx) { 3059 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3058 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 3060 if (cause_tx) {
3059 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 3061 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3062 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3063 }
3060 } 3064 }
3061 3065
3062 /* Process RX packets */ 3066 /* Process RX packets */
@@ -3142,6 +3146,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3142 mvpp22_mode_reconfigure(port); 3146 mvpp22_mode_reconfigure(port);
3143 3147
3144 if (port->phylink) { 3148 if (port->phylink) {
3149 netif_carrier_off(port->dev);
3145 phylink_start(port->phylink); 3150 phylink_start(port->phylink);
3146 } else { 3151 } else {
3147 /* Phylink isn't used as of now for ACPI, so the MAC has to be 3152 /* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3155,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3150 */ 3155 */
3151 struct phylink_link_state state = { 3156 struct phylink_link_state state = {
3152 .interface = port->phy_interface, 3157 .interface = port->phy_interface,
3153 .link = 1,
3154 }; 3158 };
3155 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); 3159 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
3160 mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
3161 NULL);
3156 } 3162 }
3157 3163
3158 netif_tx_start_all_queues(port->dev); 3164 netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4501,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4495 return; 4501 return;
4496 } 4502 }
4497 4503
4498 netif_tx_stop_all_queues(port->dev);
4499 if (!port->has_phy)
4500 netif_carrier_off(port->dev);
4501
4502 /* Make sure the port is disabled when reconfiguring the mode */ 4504 /* Make sure the port is disabled when reconfiguring the mode */
4503 mvpp2_port_disable(port); 4505 mvpp2_port_disable(port);
4504 4506
@@ -4523,16 +4525,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4523 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 4525 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
4524 mvpp2_port_loopback_set(port, state); 4526 mvpp2_port_loopback_set(port, state);
4525 4527
4526 /* If the port already was up, make sure it's still in the same state */ 4528 mvpp2_port_enable(port);
4527 if (state->link || !port->has_phy) {
4528 mvpp2_port_enable(port);
4529
4530 mvpp2_egress_enable(port);
4531 mvpp2_ingress_enable(port);
4532 if (!port->has_phy)
4533 netif_carrier_on(dev);
4534 netif_tx_wake_all_queues(dev);
4535 }
4536} 4529}
4537 4530
4538static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, 4531static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4803,6 +4796,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
4803 dev->min_mtu = ETH_MIN_MTU; 4796 dev->min_mtu = ETH_MIN_MTU;
4804 /* 9704 == 9728 - 20 and rounding to 8 */ 4797 /* 9704 == 9728 - 20 and rounding to 8 */
4805 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; 4798 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
4799 dev->dev.of_node = port_node;
4806 4800
4807 /* Phylink isn't used w/ ACPI as of now */ 4801 /* Phylink isn't used w/ ACPI as of now */
4808 if (port_node) { 4802 if (port_node) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6785661d1a72..fe49384eba48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1286,20 +1286,6 @@ out:
1286 mutex_unlock(&mdev->state_lock); 1286 mutex_unlock(&mdev->state_lock);
1287} 1287}
1288 1288
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290static void mlx4_en_netpoll(struct net_device *dev)
1291{
1292 struct mlx4_en_priv *priv = netdev_priv(dev);
1293 struct mlx4_en_cq *cq;
1294 int i;
1295
1296 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1297 cq = priv->tx_cq[TX][i];
1298 napi_schedule(&cq->napi);
1299 }
1300}
1301#endif
1302
1303static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1289static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1304{ 1290{
1305 u64 reg_id; 1291 u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
2946 .ndo_tx_timeout = mlx4_en_tx_timeout, 2932 .ndo_tx_timeout = mlx4_en_tx_timeout,
2947 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2933 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2948 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2934 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2949#ifdef CONFIG_NET_POLL_CONTROLLER
2950 .ndo_poll_controller = mlx4_en_netpoll,
2951#endif
2952 .ndo_set_features = mlx4_en_set_features, 2935 .ndo_set_features = mlx4_en_set_features,
2953 .ndo_fix_features = mlx4_en_fix_features, 2936 .ndo_fix_features = mlx4_en_fix_features,
2954 .ndo_setup_tc = __mlx4_en_setup_tc, 2937 .ndo_setup_tc = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2983 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2966 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2984 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2967 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2985 .ndo_get_vf_config = mlx4_en_get_vf_config, 2968 .ndo_get_vf_config = mlx4_en_get_vf_config,
2986#ifdef CONFIG_NET_POLL_CONTROLLER
2987 .ndo_poll_controller = mlx4_en_netpoll,
2988#endif
2989 .ndo_set_features = mlx4_en_set_features, 2969 .ndo_set_features = mlx4_en_set_features,
2990 .ndo_fix_features = mlx4_en_fix_features, 2970 .ndo_fix_features = mlx4_en_fix_features,
2991 .ndo_setup_tc = __mlx4_en_setup_tc, 2971 .ndo_setup_tc = __mlx4_en_setup_tc,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1f3372c1802e..2df92dbd38e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
240 struct mlx4_dev *dev = &priv->dev; 240 struct mlx4_dev *dev = &priv->dev;
241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec];
242 242
243 if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) 243 if (!cpumask_available(eq->affinity_mask) ||
244 cpumask_empty(eq->affinity_mask))
244 return; 245 return;
245 246
246 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); 247 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ce14d42ddc8..a53736c26c0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
206 u8 own; 206 u8 own;
207 207
208 do { 208 do {
209 own = ent->lay->status_own; 209 own = READ_ONCE(ent->lay->status_own);
210 if (!(own & CMD_OWNER_HW)) { 210 if (!(own & CMD_OWNER_HW)) {
211 ent->ret = 0; 211 ent->ret = 0;
212 return; 212 return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index b994b80d5714..37ba7c78859d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
132 delayed_event_start(priv); 132 delayed_event_start(priv);
133 133
134 dev_ctx->context = intf->add(dev); 134 dev_ctx->context = intf->add(dev);
135 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
136 if (intf->attach)
137 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
138
139 if (dev_ctx->context) { 135 if (dev_ctx->context) {
136 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
137 if (intf->attach)
138 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
139
140 spin_lock_irq(&priv->ctx_lock); 140 spin_lock_irq(&priv->ctx_lock);
141 list_add_tail(&dev_ctx->list, &priv->ctx_list); 141 list_add_tail(&dev_ctx->list, &priv->ctx_list);
142 142
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
211 if (intf->attach) { 211 if (intf->attach) {
212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) 212 if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
213 goto out; 213 goto out;
214 intf->attach(dev, dev_ctx->context); 214 if (intf->attach(dev, dev_ctx->context))
215 goto out;
216
215 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); 217 set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
216 } else { 218 } else {
217 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) 219 if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
218 goto out; 220 goto out;
219 dev_ctx->context = intf->add(dev); 221 dev_ctx->context = intf->add(dev);
222 if (!dev_ctx->context)
223 goto out;
224
220 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); 225 set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
221 } 226 }
222 227
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
391 } 396 }
392} 397}
393 398
394static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) 399static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
395{ 400{
396 return (u16)((dev->pdev->bus->number << 8) | 401 return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
402 (dev->pdev->bus->number << 8) |
397 PCI_SLOT(dev->pdev->devfn)); 403 PCI_SLOT(dev->pdev->devfn));
398} 404}
399 405
400/* Must be called with intf_mutex held */ 406/* Must be called with intf_mutex held */
401struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) 407struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
402{ 408{
403 u16 pci_id = mlx5_gen_pci_id(dev); 409 u32 pci_id = mlx5_gen_pci_id(dev);
404 struct mlx5_core_dev *res = NULL; 410 struct mlx5_core_dev *res = NULL;
405 struct mlx5_core_dev *tmp_dev; 411 struct mlx5_core_dev *tmp_dev;
406 struct mlx5_priv *priv; 412 struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index db2cfcd21d43..0f189f873859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
54#include "en_stats.h" 54#include "en_stats.h"
55#include "en/fs.h" 55#include "en/fs.h"
56 56
57extern const struct net_device_ops mlx5e_netdev_ops;
57struct page_pool; 58struct page_pool;
58 59
59#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) 60#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
16 16
17 DECLARE_HASHTABLE(mod_hdr_tbl, 8); 17 DECLARE_HASHTABLE(mod_hdr_tbl, 8);
18 DECLARE_HASHTABLE(hairpin_tbl, 8); 18 DECLARE_HASHTABLE(hairpin_tbl, 8);
19
20 struct notifier_block netdevice_nb;
19}; 21};
20 22
21struct mlx5e_flow_table { 23struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index eddd7702680b..e88340e196f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
183 183
184void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) 184void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
185{ 185{
186 u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
187 struct net_device *netdev = priv->netdev; 186 struct net_device *netdev = priv->netdev;
187 u32 caps;
188 188
189 if (!mlx5_accel_is_tls_device(priv->mdev)) 189 if (!mlx5_accel_is_tls_device(priv->mdev))
190 return; 190 return;
191 191
192 caps = mlx5_accel_tls_device_caps(priv->mdev);
192 if (caps & MLX5_ACCEL_TLS_TX) { 193 if (caps & MLX5_ACCEL_TLS_TX) {
193 netdev->features |= NETIF_F_HW_TLS_TX; 194 netdev->features |= NETIF_F_HW_TLS_TX;
194 netdev->hw_features |= NETIF_F_HW_TLS_TX; 195 netdev->hw_features |= NETIF_F_HW_TLS_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 75bb981e00b7..41cde926cdab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
191{ 191{
192 if (psrc_m) { 192 if (psrc_m) {
193 MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); 193 MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
194 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); 194 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
195 } 195 }
196 196
197 if (pdst_m) { 197 if (pdst_m) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a7939e70190..f291d1bf1558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4315,23 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4315 } 4315 }
4316} 4316}
4317 4317
4318#ifdef CONFIG_NET_POLL_CONTROLLER 4318const struct net_device_ops mlx5e_netdev_ops = {
4319/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
4320 * reenabling interrupts.
4321 */
4322static void mlx5e_netpoll(struct net_device *dev)
4323{
4324 struct mlx5e_priv *priv = netdev_priv(dev);
4325 struct mlx5e_channels *chs = &priv->channels;
4326
4327 int i;
4328
4329 for (i = 0; i < chs->num; i++)
4330 napi_schedule(&chs->c[i]->napi);
4331}
4332#endif
4333
4334static const struct net_device_ops mlx5e_netdev_ops = {
4335 .ndo_open = mlx5e_open, 4319 .ndo_open = mlx5e_open,
4336 .ndo_stop = mlx5e_close, 4320 .ndo_stop = mlx5e_close,
4337 .ndo_start_xmit = mlx5e_xmit, 4321 .ndo_start_xmit = mlx5e_xmit,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
4356#ifdef CONFIG_MLX5_EN_ARFS 4340#ifdef CONFIG_MLX5_EN_ARFS
4357 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 4341 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4358#endif 4342#endif
4359#ifdef CONFIG_NET_POLL_CONTROLLER
4360 .ndo_poll_controller = mlx5e_netpoll,
4361#endif
4362#ifdef CONFIG_MLX5_ESWITCH 4343#ifdef CONFIG_MLX5_ESWITCH
4363 /* SRIOV E-Switch NDOs */ 4344 /* SRIOV E-Switch NDOs */
4364 .ndo_set_vf_mac = mlx5e_set_vf_mac, 4345 .ndo_set_vf_mac = mlx5e_set_vf_mac,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9131a1376e7d..85796727093e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1368 1368
1369 *match_level = MLX5_MATCH_L2; 1369 *match_level = MLX5_MATCH_L2;
1370 } 1370 }
1371 } else {
1372 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1373 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1371 } 1374 }
1372 1375
1373 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { 1376 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1982,14 +1985,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1982 goto out_ok; 1985 goto out_ok;
1983 1986
1984 modify_ip_header = false; 1987 modify_ip_header = false;
1985 tcf_exts_to_list(exts, &actions); 1988 tcf_exts_for_each_action(i, a, exts) {
1986 list_for_each_entry(a, &actions, list) { 1989 int k;
1990
1987 if (!is_tcf_pedit(a)) 1991 if (!is_tcf_pedit(a))
1988 continue; 1992 continue;
1989 1993
1990 nkeys = tcf_pedit_nkeys(a); 1994 nkeys = tcf_pedit_nkeys(a);
1991 for (i = 0; i < nkeys; i++) { 1995 for (k = 0; k < nkeys; k++) {
1992 htype = tcf_pedit_htype(a, i); 1996 htype = tcf_pedit_htype(a, k);
1993 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || 1997 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1994 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { 1998 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1995 modify_ip_header = true; 1999 modify_ip_header = true;
@@ -2053,15 +2057,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2053 const struct tc_action *a; 2057 const struct tc_action *a;
2054 LIST_HEAD(actions); 2058 LIST_HEAD(actions);
2055 u32 action = 0; 2059 u32 action = 0;
2056 int err; 2060 int err, i;
2057 2061
2058 if (!tcf_exts_has_actions(exts)) 2062 if (!tcf_exts_has_actions(exts))
2059 return -EINVAL; 2063 return -EINVAL;
2060 2064
2061 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 2065 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2062 2066
2063 tcf_exts_to_list(exts, &actions); 2067 tcf_exts_for_each_action(i, a, exts) {
2064 list_for_each_entry(a, &actions, list) {
2065 if (is_tcf_gact_shot(a)) { 2068 if (is_tcf_gact_shot(a)) {
2066 action |= MLX5_FLOW_CONTEXT_ACTION_DROP; 2069 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2067 if (MLX5_CAP_FLOWTABLE(priv->mdev, 2070 if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2669,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2666 LIST_HEAD(actions); 2669 LIST_HEAD(actions);
2667 bool encap = false; 2670 bool encap = false;
2668 u32 action = 0; 2671 u32 action = 0;
2669 int err; 2672 int err, i;
2670 2673
2671 if (!tcf_exts_has_actions(exts)) 2674 if (!tcf_exts_has_actions(exts))
2672 return -EINVAL; 2675 return -EINVAL;
@@ -2674,8 +2677,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2674 attr->in_rep = rpriv->rep; 2677 attr->in_rep = rpriv->rep;
2675 attr->in_mdev = priv->mdev; 2678 attr->in_mdev = priv->mdev;
2676 2679
2677 tcf_exts_to_list(exts, &actions); 2680 tcf_exts_for_each_action(i, a, exts) {
2678 list_for_each_entry(a, &actions, list) {
2679 if (is_tcf_gact_shot(a)) { 2681 if (is_tcf_gact_shot(a)) {
2680 action |= MLX5_FLOW_CONTEXT_ACTION_DROP | 2682 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2681 MLX5_FLOW_CONTEXT_ACTION_COUNT; 2683 MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -2947,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
2947 return 0; 2949 return 0;
2948} 2950}
2949 2951
2952static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
2953 struct mlx5e_priv *peer_priv)
2954{
2955 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
2956 struct mlx5e_hairpin_entry *hpe;
2957 u16 peer_vhca_id;
2958 int bkt;
2959
2960 if (!same_hw_devs(priv, peer_priv))
2961 return;
2962
2963 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
2964
2965 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
2966 if (hpe->peer_vhca_id == peer_vhca_id)
2967 hpe->hp->pair->peer_gone = true;
2968 }
2969}
2970
2971static int mlx5e_tc_netdev_event(struct notifier_block *this,
2972 unsigned long event, void *ptr)
2973{
2974 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2975 struct mlx5e_flow_steering *fs;
2976 struct mlx5e_priv *peer_priv;
2977 struct mlx5e_tc_table *tc;
2978 struct mlx5e_priv *priv;
2979
2980 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
2981 event != NETDEV_UNREGISTER ||
2982 ndev->reg_state == NETREG_REGISTERED)
2983 return NOTIFY_DONE;
2984
2985 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
2986 fs = container_of(tc, struct mlx5e_flow_steering, tc);
2987 priv = container_of(fs, struct mlx5e_priv, fs);
2988 peer_priv = netdev_priv(ndev);
2989 if (priv == peer_priv ||
2990 !(priv->netdev->features & NETIF_F_HW_TC))
2991 return NOTIFY_DONE;
2992
2993 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
2994
2995 return NOTIFY_DONE;
2996}
2997
2950int mlx5e_tc_nic_init(struct mlx5e_priv *priv) 2998int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
2951{ 2999{
2952 struct mlx5e_tc_table *tc = &priv->fs.tc; 3000 struct mlx5e_tc_table *tc = &priv->fs.tc;
3001 int err;
2953 3002
2954 hash_init(tc->mod_hdr_tbl); 3003 hash_init(tc->mod_hdr_tbl);
2955 hash_init(tc->hairpin_tbl); 3004 hash_init(tc->hairpin_tbl);
2956 3005
2957 return rhashtable_init(&tc->ht, &tc_ht_params); 3006 err = rhashtable_init(&tc->ht, &tc_ht_params);
3007 if (err)
3008 return err;
3009
3010 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3011 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3012 tc->netdevice_nb.notifier_call = NULL;
3013 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3014 }
3015
3016 return err;
2958} 3017}
2959 3018
2960static void _mlx5e_tc_del_flow(void *ptr, void *arg) 3019static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2970,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
2970{ 3029{
2971 struct mlx5e_tc_table *tc = &priv->fs.tc; 3030 struct mlx5e_tc_table *tc = &priv->fs.tc;
2972 3031
3032 if (tc->netdevice_nb.notifier_call)
3033 unregister_netdevice_notifier(&tc->netdevice_nb);
3034
2973 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); 3035 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
2974 3036
2975 if (!IS_ERR_OR_NULL(tc->t)) { 3037 if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..ea7dedc2d5ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2000 u32 max_guarantee = 0; 2000 u32 max_guarantee = 0;
2001 int i; 2001 int i;
2002 2002
2003 for (i = 0; i <= esw->total_vports; i++) { 2003 for (i = 0; i < esw->total_vports; i++) {
2004 evport = &esw->vports[i]; 2004 evport = &esw->vports[i];
2005 if (!evport->enabled || evport->info.min_rate < max_guarantee) 2005 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2006 continue; 2006 continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2020 int err; 2020 int err;
2021 int i; 2021 int i;
2022 2022
2023 for (i = 0; i <= esw->total_vports; i++) { 2023 for (i = 0; i < esw->total_vports; i++) {
2024 evport = &esw->vports[i]; 2024 evport = &esw->vports[i];
2025 if (!evport->enabled) 2025 if (!evport->enabled)
2026 continue; 2026 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f72b5c9dcfe9..3028e8d90920 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
663 if (err) 663 if (err)
664 goto miss_rule_err; 664 goto miss_rule_err;
665 665
666 kvfree(flow_group_in);
666 return 0; 667 return 0;
667 668
668miss_rule_err: 669miss_rule_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f418541af7cf..37d114c668b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
1578 return version; 1578 return version;
1579} 1579}
1580 1580
1581static struct fs_fte *
1582lookup_fte_locked(struct mlx5_flow_group *g,
1583 u32 *match_value,
1584 bool take_write)
1585{
1586 struct fs_fte *fte_tmp;
1587
1588 if (take_write)
1589 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1590 else
1591 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1592 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1593 rhash_fte);
1594 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1595 fte_tmp = NULL;
1596 goto out;
1597 }
1598
1599 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1600out:
1601 if (take_write)
1602 up_write_ref_node(&g->node);
1603 else
1604 up_read_ref_node(&g->node);
1605 return fte_tmp;
1606}
1607
1581static struct mlx5_flow_handle * 1608static struct mlx5_flow_handle *
1582try_add_to_existing_fg(struct mlx5_flow_table *ft, 1609try_add_to_existing_fg(struct mlx5_flow_table *ft,
1583 struct list_head *match_head, 1610 struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
1600 if (IS_ERR(fte)) 1627 if (IS_ERR(fte))
1601 return ERR_PTR(-ENOMEM); 1628 return ERR_PTR(-ENOMEM);
1602 1629
1603 list_for_each_entry(iter, match_head, list) {
1604 nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
1605 }
1606
1607search_again_locked: 1630search_again_locked:
1608 version = matched_fgs_get_version(match_head); 1631 version = matched_fgs_get_version(match_head);
1609 /* Try to find a fg that already contains a matching fte */ 1632 /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
1611 struct fs_fte *fte_tmp; 1634 struct fs_fte *fte_tmp;
1612 1635
1613 g = iter->g; 1636 g = iter->g;
1614 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, 1637 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1615 rhash_fte); 1638 if (!fte_tmp)
1616 if (!fte_tmp || !tree_get_node(&fte_tmp->node))
1617 continue; 1639 continue;
1618
1619 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1620 if (!take_write) {
1621 list_for_each_entry(iter, match_head, list)
1622 up_read_ref_node(&iter->g->node);
1623 } else {
1624 list_for_each_entry(iter, match_head, list)
1625 up_write_ref_node(&iter->g->node);
1626 }
1627
1628 rule = add_rule_fg(g, spec->match_value, 1640 rule = add_rule_fg(g, spec->match_value,
1629 flow_act, dest, dest_num, fte_tmp); 1641 flow_act, dest, dest_num, fte_tmp);
1630 up_write_ref_node(&fte_tmp->node); 1642 up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
1633 return rule; 1645 return rule;
1634 } 1646 }
1635 1647
1636 /* No group with matching fte found. Try to add a new fte to any
1637 * matching fg.
1638 */
1639
1640 if (!take_write) {
1641 list_for_each_entry(iter, match_head, list)
1642 up_read_ref_node(&iter->g->node);
1643 list_for_each_entry(iter, match_head, list)
1644 nested_down_write_ref_node(&iter->g->node,
1645 FS_LOCK_PARENT);
1646 take_write = true;
1647 }
1648
1649 /* Check the ft version, for case that new flow group 1648 /* Check the ft version, for case that new flow group
1650 * was added while the fgs weren't locked 1649 * was added while the fgs weren't locked
1651 */ 1650 */
@@ -1657,27 +1656,30 @@ search_again_locked:
1657 /* Check the fgs version, for case the new FTE with the 1656 /* Check the fgs version, for case the new FTE with the
1658 * same values was added while the fgs weren't locked 1657 * same values was added while the fgs weren't locked
1659 */ 1658 */
1660 if (version != matched_fgs_get_version(match_head)) 1659 if (version != matched_fgs_get_version(match_head)) {
1660 take_write = true;
1661 goto search_again_locked; 1661 goto search_again_locked;
1662 }
1662 1663
1663 list_for_each_entry(iter, match_head, list) { 1664 list_for_each_entry(iter, match_head, list) {
1664 g = iter->g; 1665 g = iter->g;
1665 1666
1666 if (!g->node.active) 1667 if (!g->node.active)
1667 continue; 1668 continue;
1669
1670 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1671
1668 err = insert_fte(g, fte); 1672 err = insert_fte(g, fte);
1669 if (err) { 1673 if (err) {
1674 up_write_ref_node(&g->node);
1670 if (err == -ENOSPC) 1675 if (err == -ENOSPC)
1671 continue; 1676 continue;
1672 list_for_each_entry(iter, match_head, list)
1673 up_write_ref_node(&iter->g->node);
1674 kmem_cache_free(steering->ftes_cache, fte); 1677 kmem_cache_free(steering->ftes_cache, fte);
1675 return ERR_PTR(err); 1678 return ERR_PTR(err);
1676 } 1679 }
1677 1680
1678 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); 1681 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1679 list_for_each_entry(iter, match_head, list) 1682 up_write_ref_node(&g->node);
1680 up_write_ref_node(&iter->g->node);
1681 rule = add_rule_fg(g, spec->match_value, 1683 rule = add_rule_fg(g, spec->match_value,
1682 flow_act, dest, dest_num, fte); 1684 flow_act, dest, dest_num, fte);
1683 up_write_ref_node(&fte->node); 1685 up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
1686 } 1688 }
1687 rule = ERR_PTR(-ENOENT); 1689 rule = ERR_PTR(-ENOENT);
1688out: 1690out:
1689 list_for_each_entry(iter, match_head, list)
1690 up_write_ref_node(&iter->g->node);
1691 kmem_cache_free(steering->ftes_cache, fte); 1691 kmem_cache_free(steering->ftes_cache, fte);
1692 return rule; 1692 return rule;
1693} 1693}
@@ -1726,6 +1726,8 @@ search_again_locked:
1726 if (err) { 1726 if (err) {
1727 if (take_write) 1727 if (take_write)
1728 up_write_ref_node(&ft->node); 1728 up_write_ref_node(&ft->node);
1729 else
1730 up_read_ref_node(&ft->node);
1729 return ERR_PTR(err); 1731 return ERR_PTR(err);
1730 } 1732 }
1731 1733
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d39b0b7011b2..9f39aeca863f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
331 add_timer(&health->timer); 331 add_timer(&health->timer);
332} 332}
333 333
334void mlx5_stop_health_poll(struct mlx5_core_dev *dev) 334void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
335{ 335{
336 struct mlx5_core_health *health = &dev->priv.health; 336 struct mlx5_core_health *health = &dev->priv.health;
337 unsigned long flags;
338
339 if (disable_health) {
340 spin_lock_irqsave(&health->wq_lock, flags);
341 set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
342 set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
343 spin_unlock_irqrestore(&health->wq_lock, flags);
344 }
337 345
338 del_timer_sync(&health->timer); 346 del_timer_sync(&health->timer);
339} 347}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cf3e4a659052..b5e9f664fc66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
878 priv->numa_node = dev_to_node(&dev->pdev->dev); 878 priv->numa_node = dev_to_node(&dev->pdev->dev);
879 879
880 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); 880 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
881 if (!priv->dbg_root) 881 if (!priv->dbg_root) {
882 dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
882 return -ENOMEM; 883 return -ENOMEM;
884 }
883 885
884 err = mlx5_pci_enable_device(dev); 886 err = mlx5_pci_enable_device(dev);
885 if (err) { 887 if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
928 pci_clear_master(dev->pdev); 930 pci_clear_master(dev->pdev);
929 release_bar(dev->pdev); 931 release_bar(dev->pdev);
930 mlx5_pci_disable_device(dev); 932 mlx5_pci_disable_device(dev);
931 debugfs_remove(priv->dbg_root); 933 debugfs_remove_recursive(priv->dbg_root);
932} 934}
933 935
934static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) 936static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
1286 mlx5_cleanup_once(dev); 1288 mlx5_cleanup_once(dev);
1287 1289
1288err_stop_poll: 1290err_stop_poll:
1289 mlx5_stop_health_poll(dev); 1291 mlx5_stop_health_poll(dev, boot);
1290 if (mlx5_cmd_teardown_hca(dev)) { 1292 if (mlx5_cmd_teardown_hca(dev)) {
1291 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); 1293 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1292 goto out_err; 1294 goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1346 mlx5_free_irq_vectors(dev); 1348 mlx5_free_irq_vectors(dev);
1347 if (cleanup) 1349 if (cleanup)
1348 mlx5_cleanup_once(dev); 1350 mlx5_cleanup_once(dev);
1349 mlx5_stop_health_poll(dev); 1351 mlx5_stop_health_poll(dev, cleanup);
1350 err = mlx5_cmd_teardown_hca(dev); 1352 err = mlx5_cmd_teardown_hca(dev);
1351 if (err) { 1353 if (err) {
1352 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); 1354 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1608 * with the HCA, so the health polll is no longer needed. 1610 * with the HCA, so the health polll is no longer needed.
1609 */ 1611 */
1610 mlx5_drain_health_wq(dev); 1612 mlx5_drain_health_wq(dev);
1611 mlx5_stop_health_poll(dev); 1613 mlx5_stop_health_poll(dev, false);
1612 1614
1613 ret = mlx5_cmd_force_teardown_hca(dev); 1615 ret = mlx5_cmd_force_teardown_hca(dev);
1614 if (ret) { 1616 if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index dae1c5c5d27c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
475 475
476 for (i = 0; i < hp->num_channels; i++) { 476 for (i = 0; i < hp->num_channels; i++) {
477 mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]); 477 mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
478 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); 478 if (!hp->peer_gone)
479 mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
479 } 480 }
480} 481}
481 482
@@ -509,7 +510,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
509 510
510 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 511 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
511 512
512 if (next_state == MLX5_RQC_STATE_RDY) { 513 if (next_state == MLX5_SQC_STATE_RDY) {
513 MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); 514 MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
514 MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); 515 MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
515 } 516 }
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
567 MLX5_RQC_STATE_RST, 0, 0); 568 MLX5_RQC_STATE_RST, 0, 0);
568 569
569 /* unset peer SQs */ 570 /* unset peer SQs */
571 if (hp->peer_gone)
572 return;
570 for (i = 0; i < hp->num_channels; i++) 573 for (i = 0; i < hp->num_channels; i++)
571 mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY, 574 mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
572 MLX5_SQC_STATE_RST, 0, 0); 575 MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 86478a6b99c5..68e7f8df2a6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
39 return (u32)wq->fbc.sz_m1 + 1; 39 return (u32)wq->fbc.sz_m1 + 1;
40} 40}
41 41
42u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) 42u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
43{ 43{
44 return (u32)wq->fbc.frag_sz_m1 + 1; 44 return wq->fbc.frag_sz_m1 + 1;
45} 45}
46 46
47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) 47u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
138 void *qpc, struct mlx5_wq_qp *wq, 138 void *qpc, struct mlx5_wq_qp *wq,
139 struct mlx5_wq_ctrl *wq_ctrl) 139 struct mlx5_wq_ctrl *wq_ctrl)
140{ 140{
141 u32 sq_strides_offset; 141 u16 sq_strides_offset;
142 u32 rq_pg_remainder;
142 int err; 143 int err;
143 144
144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, 145 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
145 MLX5_GET(qpc, qpc, log_rq_size), 146 MLX5_GET(qpc, qpc, log_rq_size),
146 &wq->rq.fbc); 147 &wq->rq.fbc);
147 148
148 sq_strides_offset = 149 rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
149 ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB; 150 sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
150 151
151 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB), 152 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
152 MLX5_GET(qpc, qpc, log_sq_size), 153 MLX5_GET(qpc, qpc, log_sq_size),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 2bd4c3184eba..3a1a170bb2d7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
80 void *wqc, struct mlx5_wq_cyc *wq, 80 void *wqc, struct mlx5_wq_cyc *wq,
81 struct mlx5_wq_ctrl *wq_ctrl); 81 struct mlx5_wq_ctrl *wq_ctrl);
82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 82u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
83u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); 83u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
84 84
85int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 85int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
86 void *qpc, struct mlx5_wq_qp *wq, 86 void *qpc, struct mlx5_wq_qp *wq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6070d1591d1e..b492152c8881 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -44,8 +44,8 @@
44#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 44#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
45 45
46#define MLXSW_SP1_FWREV_MAJOR 13 46#define MLXSW_SP1_FWREV_MAJOR 13
47#define MLXSW_SP1_FWREV_MINOR 1702 47#define MLXSW_SP1_FWREV_MINOR 1703
48#define MLXSW_SP1_FWREV_SUBMINOR 6 48#define MLXSW_SP1_FWREV_SUBMINOR 4
49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50 50
51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1346 return -ENOMEM; 1346 return -ENOMEM;
1347 mall_tc_entry->cookie = f->cookie; 1347 mall_tc_entry->cookie = f->cookie;
1348 1348
1349 tcf_exts_to_list(f->exts, &actions); 1349 a = tcf_exts_first_action(f->exts);
1350 a = list_first_entry(&actions, struct tc_action, list);
1351 1350
1352 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 1351 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1353 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; 1352 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3ae930196741..3cdb7aca90b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
414void 414void
415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); 415mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); 416void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
417void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
418 struct net_device *dev);
417 419
418/* spectrum_kvdl.c */ 420/* spectrum_kvdl.c */
419enum mlxsw_sp_kvdl_entry_type { 421enum mlxsw_sp_kvdl_entry_type {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 4327487553c5..3589432d1643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
337 MLXSW_SP_SB_CM(1500, 9, 0), 337 MLXSW_SP_SB_CM(1500, 9, 0),
338 MLXSW_SP_SB_CM(1500, 9, 0), 338 MLXSW_SP_SB_CM(1500, 9, 0),
339 MLXSW_SP_SB_CM(1500, 9, 0), 339 MLXSW_SP_SB_CM(1500, 9, 0),
340 MLXSW_SP_SB_CM(0, 0, 0), 340 MLXSW_SP_SB_CM(0, 140000, 15),
341 MLXSW_SP_SB_CM(0, 0, 0), 341 MLXSW_SP_SB_CM(0, 140000, 15),
342 MLXSW_SP_SB_CM(0, 0, 0), 342 MLXSW_SP_SB_CM(0, 140000, 15),
343 MLXSW_SP_SB_CM(0, 0, 0), 343 MLXSW_SP_SB_CM(0, 140000, 15),
344 MLXSW_SP_SB_CM(0, 0, 0), 344 MLXSW_SP_SB_CM(0, 140000, 15),
345 MLXSW_SP_SB_CM(0, 0, 0), 345 MLXSW_SP_SB_CM(0, 140000, 15),
346 MLXSW_SP_SB_CM(0, 0, 0), 346 MLXSW_SP_SB_CM(0, 140000, 15),
347 MLXSW_SP_SB_CM(0, 0, 0), 347 MLXSW_SP_SB_CM(0, 140000, 15),
348 MLXSW_SP_SB_CM(1, 0xff, 0), 348 MLXSW_SP_SB_CM(1, 0xff, 0),
349}; 349};
350 350
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index ebd1b24ebaa5..8d211972c5e9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
21 struct netlink_ext_ack *extack) 21 struct netlink_ext_ack *extack)
22{ 22{
23 const struct tc_action *a; 23 const struct tc_action *a;
24 LIST_HEAD(actions); 24 int err, i;
25 int err;
26 25
27 if (!tcf_exts_has_actions(exts)) 26 if (!tcf_exts_has_actions(exts))
28 return 0; 27 return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
32 if (err) 31 if (err)
33 return err; 32 return err;
34 33
35 tcf_exts_to_list(exts, &actions); 34 tcf_exts_for_each_action(i, a, exts) {
36 list_for_each_entry(a, &actions, list) {
37 if (is_tcf_gact_ok(a)) { 35 if (is_tcf_gact_ok(a)) {
38 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 36 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 if (err) { 37 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 3a96307f51b0..2ab9cf25a08a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6234 mlxsw_sp_vr_put(mlxsw_sp, vr); 6234 mlxsw_sp_vr_put(mlxsw_sp, vr);
6235} 6235}
6236 6236
6237void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6238 struct net_device *dev)
6239{
6240 struct mlxsw_sp_rif *rif;
6241
6242 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6243 if (!rif)
6244 return;
6245 mlxsw_sp_rif_destroy(rif);
6246}
6247
6237static void 6248static void
6238mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, 6249mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) 6250 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d8444aaba01..db715da7bab7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 127 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
128} 128}
129 129
130static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
131 void *data)
132{
133 struct mlxsw_sp *mlxsw_sp = data;
134
135 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
136 return 0;
137}
138
139static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
140 struct net_device *dev)
141{
142 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
143 netdev_walk_all_upper_dev_rcu(dev,
144 mlxsw_sp_bridge_device_upper_rif_destroy,
145 mlxsw_sp);
146}
147
130static struct mlxsw_sp_bridge_device * 148static struct mlxsw_sp_bridge_device *
131mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, 149mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
132 struct net_device *br_dev) 150 struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
165mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, 183mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
166 struct mlxsw_sp_bridge_device *bridge_device) 184 struct mlxsw_sp_bridge_device *bridge_device)
167{ 185{
186 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
187 bridge_device->dev);
168 list_del(&bridge_device->list); 188 list_del(&bridge_device->list);
169 if (bridge_device->vlan_enabled) 189 if (bridge_device->vlan_enabled)
170 bridge->vlan_enabled_exists = false; 190 bridge->vlan_enabled_exists = false;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7dce79ff2c9..001b5f714c1b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2850 lan743x_hardware_cleanup(adapter); 2850 lan743x_hardware_cleanup(adapter);
2851} 2851}
2852 2852
2853#ifdef CONFIG_PM 2853#ifdef CONFIG_PM_SLEEP
2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2855{ 2855{
2856 return bitrev16(crc16(0xFFFF, buf, len)); 2856 return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
3016static const struct dev_pm_ops lan743x_pm_ops = { 3016static const struct dev_pm_ops lan743x_pm_ops = {
3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3018}; 3018};
3019#endif /*CONFIG_PM */ 3019#endif /* CONFIG_PM_SLEEP */
3020 3020
3021static const struct pci_device_id lan743x_pcidev_tbl[] = { 3021static const struct pci_device_id lan743x_pcidev_tbl[] = {
3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
3028 .id_table = lan743x_pcidev_tbl, 3028 .id_table = lan743x_pcidev_tbl,
3029 .probe = lan743x_pcidev_probe, 3029 .probe = lan743x_pcidev_probe,
3030 .remove = lan743x_pcidev_remove, 3030 .remove = lan743x_pcidev_remove,
3031#ifdef CONFIG_PM 3031#ifdef CONFIG_PM_SLEEP
3032 .driver.pm = &lan743x_pm_ops, 3032 .driver.pm = &lan743x_pm_ops,
3033#endif 3033#endif
3034 .shutdown = lan743x_pcidev_shutdown, 3034 .shutdown = lan743x_pcidev_shutdown,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 26bb3b18f3be..3cdf63e35b53 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
91 struct sk_buff *skb; 91 struct sk_buff *skb;
92 struct net_device *dev; 92 struct net_device *dev;
93 u32 *buf; 93 u32 *buf;
94 int sz, len; 94 int sz, len, buf_len;
95 u32 ifh[4]; 95 u32 ifh[4];
96 u32 val; 96 u32 val;
97 struct frame_info info; 97 struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
116 err = -ENOMEM; 116 err = -ENOMEM;
117 break; 117 break;
118 } 118 }
119 buf = (u32 *)skb_put(skb, info.len); 119 buf_len = info.len - ETH_FCS_LEN;
120 buf = (u32 *)skb_put(skb, buf_len);
120 121
121 len = 0; 122 len = 0;
122 do { 123 do {
123 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 124 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
124 *buf++ = val; 125 *buf++ = val;
125 len += sz; 126 len += sz;
126 } while ((sz == 4) && (len < info.len)); 127 } while (len < buf_len);
128
129 /* Read the FCS and discard it */
130 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
131 /* Update the statistics if part of the FCS was read before */
132 len -= ETH_FCS_LEN - sz;
127 133
128 if (sz < 0) { 134 if (sz < 0) {
129 err = sz; 135 err = sz;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 0ba0356ec4e6..46ba0cf257c6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -52,6 +52,7 @@
52#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) 52#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
53#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) 53#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
54#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) 54#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
55#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
55#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ 56#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
56 NFP_FL_TUNNEL_KEY | \ 57 NFP_FL_TUNNEL_KEY | \
57 NFP_FL_TUNNEL_GENEVE_OPT) 58 NFP_FL_TUNNEL_GENEVE_OPT)
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
741 nfp_fl_push_vlan(psh_v, a); 742 nfp_fl_push_vlan(psh_v, a);
742 *a_len += sizeof(struct nfp_fl_push_vlan); 743 *a_len += sizeof(struct nfp_fl_push_vlan);
743 } else if (is_tcf_tunnel_set(a)) { 744 } else if (is_tcf_tunnel_set(a)) {
745 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
744 struct nfp_repr *repr = netdev_priv(netdev); 746 struct nfp_repr *repr = netdev_priv(netdev);
747
745 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); 748 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
746 if (*tun_type == NFP_FL_TUNNEL_NONE) 749 if (*tun_type == NFP_FL_TUNNEL_NONE)
747 return -EOPNOTSUPP; 750 return -EOPNOTSUPP;
748 751
752 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
753 return -EOPNOTSUPP;
754
749 /* Pre-tunnel action is required for tunnel encap. 755 /* Pre-tunnel action is required for tunnel encap.
750 * This checks for next hop entries on NFP. 756 * This checks for next hop entries on NFP.
751 * If none, the packet falls back before applying other actions. 757 * If none, the packet falls back before applying other actions.
@@ -796,11 +802,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
796 struct net_device *netdev, 802 struct net_device *netdev,
797 struct nfp_fl_payload *nfp_flow) 803 struct nfp_fl_payload *nfp_flow)
798{ 804{
799 int act_len, act_cnt, err, tun_out_cnt, out_cnt; 805 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
800 enum nfp_flower_tun_type tun_type; 806 enum nfp_flower_tun_type tun_type;
801 const struct tc_action *a; 807 const struct tc_action *a;
802 u32 csum_updated = 0; 808 u32 csum_updated = 0;
803 LIST_HEAD(actions);
804 809
805 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); 810 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
806 nfp_flow->meta.act_len = 0; 811 nfp_flow->meta.act_len = 0;
@@ -810,8 +815,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
810 tun_out_cnt = 0; 815 tun_out_cnt = 0;
811 out_cnt = 0; 816 out_cnt = 0;
812 817
813 tcf_exts_to_list(flow->exts, &actions); 818 tcf_exts_for_each_action(i, a, flow->exts) {
814 list_for_each_entry(a, &actions, list) {
815 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, 819 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
816 netdev, &tun_type, &tun_out_cnt, 820 netdev, &tun_type, &tun_out_cnt,
817 &out_cnt, &csum_updated); 821 &out_cnt, &csum_updated);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 85f8209bf007..81d941ab895c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -70,6 +70,7 @@ struct nfp_app;
70#define NFP_FL_FEATS_GENEVE BIT(0) 70#define NFP_FL_FEATS_GENEVE BIT(0)
71#define NFP_FL_NBI_MTU_SETTING BIT(1) 71#define NFP_FL_NBI_MTU_SETTING BIT(1)
72#define NFP_FL_FEATS_GENEVE_OPT BIT(2) 72#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
73#define NFP_FL_FEATS_VLAN_PCP BIT(3)
73#define NFP_FL_FEATS_LAG BIT(31) 74#define NFP_FL_FEATS_LAG BIT(31)
74 75
75struct nfp_fl_mask_id { 76struct nfp_fl_mask_id {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index a0c72f277faa..17acb8cc6044 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
56 FLOW_DISSECTOR_KEY_VLAN, 56 FLOW_DISSECTOR_KEY_VLAN,
57 target); 57 target);
58 /* Populate the tci field. */ 58 /* Populate the tci field. */
59 if (flow_vlan->vlan_id) { 59 if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 60 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61 flow_vlan->vlan_priority) | 61 flow_vlan->vlan_priority) |
62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 62 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2edab01c3beb..bd19624f10cf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
192 key_size += sizeof(struct nfp_flower_mac_mpls); 192 key_size += sizeof(struct nfp_flower_mac_mpls);
193 } 193 }
194 194
195 if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
196 struct flow_dissector_key_vlan *flow_vlan;
197
198 flow_vlan = skb_flow_dissector_target(flow->dissector,
199 FLOW_DISSECTOR_KEY_VLAN,
200 flow->mask);
201 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
202 flow_vlan->vlan_priority)
203 return -EOPNOTSUPP;
204 }
205
195 if (dissector_uses_key(flow->dissector, 206 if (dissector_uses_key(flow->dissector,
196 FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 207 FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
197 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; 208 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index a8b9fbab5f73..c6d29fdbb880 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -229,29 +229,16 @@ done:
229 spin_unlock_bh(&nn->reconfig_lock); 229 spin_unlock_bh(&nn->reconfig_lock);
230} 230}
231 231
232/** 232static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
233 * nfp_net_reconfig() - Reconfigure the firmware
234 * @nn: NFP Net device to reconfigure
235 * @update: The value for the update field in the BAR config
236 *
237 * Write the update word to the BAR and ping the reconfig queue. The
238 * poll until the firmware has acknowledged the update by zeroing the
239 * update word.
240 *
241 * Return: Negative errno on error, 0 on success
242 */
243int nfp_net_reconfig(struct nfp_net *nn, u32 update)
244{ 233{
245 bool cancelled_timer = false; 234 bool cancelled_timer = false;
246 u32 pre_posted_requests; 235 u32 pre_posted_requests;
247 int ret;
248 236
249 spin_lock_bh(&nn->reconfig_lock); 237 spin_lock_bh(&nn->reconfig_lock);
250 238
251 nn->reconfig_sync_present = true; 239 nn->reconfig_sync_present = true;
252 240
253 if (nn->reconfig_timer_active) { 241 if (nn->reconfig_timer_active) {
254 del_timer(&nn->reconfig_timer);
255 nn->reconfig_timer_active = false; 242 nn->reconfig_timer_active = false;
256 cancelled_timer = true; 243 cancelled_timer = true;
257 } 244 }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
260 247
261 spin_unlock_bh(&nn->reconfig_lock); 248 spin_unlock_bh(&nn->reconfig_lock);
262 249
263 if (cancelled_timer) 250 if (cancelled_timer) {
251 del_timer_sync(&nn->reconfig_timer);
264 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); 252 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
253 }
265 254
266 /* Run the posted reconfigs which were issued before we started */ 255 /* Run the posted reconfigs which were issued before we started */
267 if (pre_posted_requests) { 256 if (pre_posted_requests) {
268 nfp_net_reconfig_start(nn, pre_posted_requests); 257 nfp_net_reconfig_start(nn, pre_posted_requests);
269 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 258 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
270 } 259 }
260}
261
262static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
263{
264 nfp_net_reconfig_sync_enter(nn);
265
266 spin_lock_bh(&nn->reconfig_lock);
267 nn->reconfig_sync_present = false;
268 spin_unlock_bh(&nn->reconfig_lock);
269}
270
271/**
272 * nfp_net_reconfig() - Reconfigure the firmware
273 * @nn: NFP Net device to reconfigure
274 * @update: The value for the update field in the BAR config
275 *
276 * Write the update word to the BAR and ping the reconfig queue. The
277 * poll until the firmware has acknowledged the update by zeroing the
278 * update word.
279 *
280 * Return: Negative errno on error, 0 on success
281 */
282int nfp_net_reconfig(struct nfp_net *nn, u32 update)
283{
284 int ret;
285
286 nfp_net_reconfig_sync_enter(nn);
271 287
272 nfp_net_reconfig_start(nn, update); 288 nfp_net_reconfig_start(nn, update);
273 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); 289 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -2061,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2061 return true; 2077 return true;
2062} 2078}
2063 2079
2064static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec) 2080static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2065{ 2081{
2066 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring; 2082 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2067 struct nfp_net *nn = r_vec->nfp_net; 2083 struct nfp_net *nn = r_vec->nfp_net;
2068 struct nfp_net_dp *dp = &nn->dp; 2084 struct nfp_net_dp *dp = &nn->dp;
2085 unsigned int budget = 512;
2069 2086
2070 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring)) 2087 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2071 continue; 2088 continue;
2089
2090 return budget;
2072} 2091}
2073 2092
2074static void nfp_ctrl_poll(unsigned long arg) 2093static void nfp_ctrl_poll(unsigned long arg)
@@ -2080,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
2080 __nfp_ctrl_tx_queued(r_vec); 2099 __nfp_ctrl_tx_queued(r_vec);
2081 spin_unlock_bh(&r_vec->lock); 2100 spin_unlock_bh(&r_vec->lock);
2082 2101
2083 nfp_ctrl_rx(r_vec); 2102 if (nfp_ctrl_rx(r_vec)) {
2084 2103 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2085 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); 2104 } else {
2105 tasklet_schedule(&r_vec->tasklet);
2106 nn_dp_warn(&r_vec->nfp_net->dp,
2107 "control message budget exceeded!\n");
2108 }
2086} 2109}
2087 2110
2088/* Setup and Configuration 2111/* Setup and Configuration
@@ -3130,21 +3153,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3130 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); 3153 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3131} 3154}
3132 3155
3133#ifdef CONFIG_NET_POLL_CONTROLLER
3134static void nfp_net_netpoll(struct net_device *netdev)
3135{
3136 struct nfp_net *nn = netdev_priv(netdev);
3137 int i;
3138
3139 /* nfp_net's NAPIs are statically allocated so even if there is a race
3140 * with reconfig path this will simply try to schedule some disabled
3141 * NAPI instances.
3142 */
3143 for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
3144 napi_schedule_irqoff(&nn->r_vecs[i].napi);
3145}
3146#endif
3147
3148static void nfp_net_stat64(struct net_device *netdev, 3156static void nfp_net_stat64(struct net_device *netdev,
3149 struct rtnl_link_stats64 *stats) 3157 struct rtnl_link_stats64 *stats)
3150{ 3158{
@@ -3503,9 +3511,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
3503 .ndo_get_stats64 = nfp_net_stat64, 3511 .ndo_get_stats64 = nfp_net_stat64,
3504 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, 3512 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3505 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, 3513 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3506#ifdef CONFIG_NET_POLL_CONTROLLER
3507 .ndo_poll_controller = nfp_net_netpoll,
3508#endif
3509 .ndo_set_vf_mac = nfp_app_set_vf_mac, 3514 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3510 .ndo_set_vf_vlan = nfp_app_set_vf_vlan, 3515 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3511 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, 3516 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
@@ -3633,6 +3638,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3633 */ 3638 */
3634void nfp_net_free(struct nfp_net *nn) 3639void nfp_net_free(struct nfp_net *nn)
3635{ 3640{
3641 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3636 if (nn->dp.netdev) 3642 if (nn->dp.netdev)
3637 free_netdev(nn->dp.netdev); 3643 free_netdev(nn->dp.netdev);
3638 else 3644 else
@@ -3920,4 +3926,5 @@ void nfp_net_clean(struct nfp_net *nn)
3920 return; 3926 return;
3921 3927
3922 unregister_netdev(nn->dp.netdev); 3928 unregister_netdev(nn->dp.netdev);
3929 nfp_net_reconfig_wait_posted(nn);
3923} 3930}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..59c70be22a84 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
72 work_func_t func, int delay); 72 work_func_t func, int delay);
73static void netxen_cancel_fw_work(struct netxen_adapter *adapter); 73static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
74static int netxen_nic_poll(struct napi_struct *napi, int budget); 74static int netxen_nic_poll(struct napi_struct *napi, int budget);
75#ifdef CONFIG_NET_POLL_CONTROLLER
76static void netxen_nic_poll_controller(struct net_device *netdev);
77#endif
78 75
79static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 76static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
80static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 77static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
581 .ndo_tx_timeout = netxen_tx_timeout, 578 .ndo_tx_timeout = netxen_tx_timeout,
582 .ndo_fix_features = netxen_fix_features, 579 .ndo_fix_features = netxen_fix_features,
583 .ndo_set_features = netxen_set_features, 580 .ndo_set_features = netxen_set_features,
584#ifdef CONFIG_NET_POLL_CONTROLLER
585 .ndo_poll_controller = netxen_nic_poll_controller,
586#endif
587}; 581};
588 582
589static inline bool netxen_function_zero(struct pci_dev *pdev) 583static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2402 return work_done; 2396 return work_done;
2403} 2397}
2404 2398
2405#ifdef CONFIG_NET_POLL_CONTROLLER
2406static void netxen_nic_poll_controller(struct net_device *netdev)
2407{
2408 int ring;
2409 struct nx_host_sds_ring *sds_ring;
2410 struct netxen_adapter *adapter = netdev_priv(netdev);
2411 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2412
2413 disable_irq(adapter->irq);
2414 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2415 sds_ring = &recv_ctx->sds_rings[ring];
2416 netxen_intr(adapter->irq, sds_ring);
2417 }
2418 enable_irq(adapter->irq);
2419}
2420#endif
2421
2422static int 2399static int
2423nx_incr_dev_ref_cnt(struct netxen_adapter *adapter) 2400nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
2424{ 2401{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6bb76e6d3c14..f5459de6d60a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
190 190
191static void 191static void
192qed_dcbx_set_params(struct qed_dcbx_results *p_data, 192qed_dcbx_set_params(struct qed_dcbx_results *p_data,
193 struct qed_hw_info *p_info, 193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
194 bool enable, 194 bool enable, u8 prio, u8 tc,
195 u8 prio,
196 u8 tc,
197 enum dcbx_protocol_type type, 195 enum dcbx_protocol_type type,
198 enum qed_pci_personality personality) 196 enum qed_pci_personality personality)
199{ 197{
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
206 else 204 else
207 p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; 205 p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
208 206
207 /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
208 if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
209 test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
210 p_data->arr[type].dont_add_vlan0 = true;
211
209 /* QM reconf data */ 212 /* QM reconf data */
210 if (p_info->personality == personality) 213 if (p_hwfn->hw_info.personality == personality)
211 qed_hw_info_set_offload_tc(p_info, tc); 214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
215
216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */
217 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
218 type == DCBX_PROTOCOL_ROCE) {
219 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
220 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
221 }
212} 222}
213 223
214/* Update app protocol data and hw_info fields with the TLV info */ 224/* Update app protocol data and hw_info fields with the TLV info */
215static void 225static void
216qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, 226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
217 struct qed_hwfn *p_hwfn, 227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
218 bool enable, 228 bool enable, u8 prio, u8 tc,
219 u8 prio, u8 tc, enum dcbx_protocol_type type) 229 enum dcbx_protocol_type type)
220{ 230{
221 struct qed_hw_info *p_info = &p_hwfn->hw_info;
222 enum qed_pci_personality personality; 231 enum qed_pci_personality personality;
223 enum dcbx_protocol_type id; 232 enum dcbx_protocol_type id;
224 int i; 233 int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
231 240
232 personality = qed_dcbx_app_update[i].personality; 241 personality = qed_dcbx_app_update[i].personality;
233 242
234 qed_dcbx_set_params(p_data, p_info, enable, 243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
235 prio, tc, type, personality); 244 prio, tc, type, personality);
236 } 245 }
237} 246}
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
265 * reconfiguring QM. Get protocol specific data for PF update ramrod command. 274 * reconfiguring QM. Get protocol specific data for PF update ramrod command.
266 */ 275 */
267static int 276static int
268qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, 277qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
269 struct qed_dcbx_results *p_data, 278 struct qed_dcbx_results *p_data,
270 struct dcbx_app_priority_entry *p_tbl, 279 struct dcbx_app_priority_entry *p_tbl,
271 u32 pri_tc_tbl, int count, u8 dcbx_version) 280 u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
309 enable = true; 318 enable = true;
310 } 319 }
311 320
312 qed_dcbx_update_app_info(p_data, p_hwfn, enable, 321 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
313 priority, tc, type); 322 priority, tc, type);
314 } 323 }
315 } 324 }
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
331 continue; 340 continue;
332 341
333 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; 342 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
334 qed_dcbx_update_app_info(p_data, p_hwfn, enable, 343 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
335 priority, tc, type); 344 priority, tc, type);
336 } 345 }
337 346
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
341/* Parse app TLV's to update TC information in hw_info structure for 350/* Parse app TLV's to update TC information in hw_info structure for
342 * reconfiguring QM. Get protocol specific data for PF update ramrod command. 351 * reconfiguring QM. Get protocol specific data for PF update ramrod command.
343 */ 352 */
344static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) 353static int
354qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
345{ 355{
346 struct dcbx_app_priority_feature *p_app; 356 struct dcbx_app_priority_feature *p_app;
347 struct dcbx_app_priority_entry *p_tbl; 357 struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
365 p_info = &p_hwfn->hw_info; 375 p_info = &p_hwfn->hw_info;
366 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); 376 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
367 377
368 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, 378 rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
369 num_entries, dcbx_version); 379 num_entries, dcbx_version);
370 if (rc) 380 if (rc)
371 return rc; 381 return rc;
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
891 return rc; 901 return rc;
892 902
893 if (type == QED_DCBX_OPERATIONAL_MIB) { 903 if (type == QED_DCBX_OPERATIONAL_MIB) {
894 rc = qed_dcbx_process_mib_info(p_hwfn); 904 rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
895 if (!rc) { 905 if (!rc) {
896 /* reconfigure tcs of QM queues according 906 /* reconfigure tcs of QM queues according
897 * to negotiation results 907 * to negotiation results
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
954 p_data->dcb_enable_flag = p_src->arr[type].enable; 964 p_data->dcb_enable_flag = p_src->arr[type].enable;
955 p_data->dcb_priority = p_src->arr[type].priority; 965 p_data->dcb_priority = p_src->arr[type].priority;
956 p_data->dcb_tc = p_src->arr[type].tc; 966 p_data->dcb_tc = p_src->arr[type].tc;
967 p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
957} 968}
958 969
959/* Set pf update ramrod command params */ 970/* Set pf update ramrod command params */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index a4d688c04e18..01f253ea4b22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
55 u8 update; /* Update indication */ 55 u8 update; /* Update indication */
56 u8 priority; /* Priority */ 56 u8 priority; /* Priority */
57 u8 tc; /* Traffic Class */ 57 u8 tc; /* Traffic Class */
58 bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
58}; 59};
59 60
60#define QED_DCBX_VERSION_DISABLED 0 61#define QED_DCBX_VERSION_DISABLED 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 016ca8a7ec8a..97f073fd3725 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
1706int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1706int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1707{ 1707{
1708 struct qed_load_req_params load_req_params; 1708 struct qed_load_req_params load_req_params;
1709 u32 load_code, param, drv_mb_param; 1709 u32 load_code, resp, param, drv_mb_param;
1710 bool b_default_mtu = true; 1710 bool b_default_mtu = true;
1711 struct qed_hwfn *p_hwfn; 1711 struct qed_hwfn *p_hwfn;
1712 int rc = 0, mfw_rc, i; 1712 int rc = 0, mfw_rc, i;
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1852 1852
1853 if (IS_PF(cdev)) { 1853 if (IS_PF(cdev)) {
1854 p_hwfn = QED_LEADING_HWFN(cdev); 1854 p_hwfn = QED_LEADING_HWFN(cdev);
1855
1856 /* Get pre-negotiated values for stag, bandwidth etc. */
1857 DP_VERBOSE(p_hwfn,
1858 QED_MSG_SPQ,
1859 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
1860 drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
1861 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1862 DRV_MSG_CODE_GET_OEM_UPDATES,
1863 drv_mb_param, &resp, &param);
1864 if (rc)
1865 DP_NOTICE(p_hwfn,
1866 "Failed to send GET_OEM_UPDATES attention request\n");
1867
1855 drv_mb_param = STORM_FW_VERSION; 1868 drv_mb_param = STORM_FW_VERSION;
1856 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1869 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1857 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 1870 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 8faceb691657..a71382687ef2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -11987,6 +11987,7 @@ struct public_global {
11987 u32 running_bundle_id; 11987 u32 running_bundle_id;
11988 s32 external_temperature; 11988 s32 external_temperature;
11989 u32 mdump_reason; 11989 u32 mdump_reason;
11990 u64 reserved;
11990 u32 data_ptr; 11991 u32 data_ptr;
11991 u32 data_size; 11992 u32 data_size;
11992}; 11993};
@@ -12414,6 +12415,7 @@ struct public_drv_mb {
12414#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 12415#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
12415#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 12416#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
12416#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 12417#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
12418#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
12417 12419
12418#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 12420#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
12419#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 12421#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
@@ -12541,6 +12543,9 @@ struct public_drv_mb {
12541#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 12543#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
12542#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 12544#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
12543 12545
12546#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
12547#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
12548
12544#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 12549#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
12545#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 12550#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
12546#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 12551#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index d9ab5add27a8..34193c2f1699 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
407 407
408 if (i == QED_INIT_MAX_POLL_COUNT) { 408 if (i == QED_INIT_MAX_POLL_COUNT) {
409 DP_ERR(p_hwfn, 409 DP_ERR(p_hwfn,
410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", 410 "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
411 addr, le32_to_cpu(cmd->expected_val), 411 addr, le32_to_cpu(cmd->expected_val),
412 val, le32_to_cpu(cmd->op_data)); 412 val, le32_to_cpu(cmd->op_data));
413 } 413 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 17f3dfa2cc94..e860bdf0f752 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1710 1710
1711 cm_info->local_ip[0] = ntohl(iph->daddr); 1711 cm_info->local_ip[0] = ntohl(iph->daddr);
1712 cm_info->remote_ip[0] = ntohl(iph->saddr); 1712 cm_info->remote_ip[0] = ntohl(iph->saddr);
1713 cm_info->ip_version = TCP_IPV4; 1713 cm_info->ip_version = QED_TCP_IPV4;
1714 1714
1715 ip_hlen = (iph->ihl) * sizeof(u32); 1715 ip_hlen = (iph->ihl) * sizeof(u32);
1716 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1716 *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1730 cm_info->remote_ip[i] = 1730 cm_info->remote_ip[i] =
1731 ntohl(ip6h->saddr.in6_u.u6_addr32[i]); 1731 ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1732 } 1732 }
1733 cm_info->ip_version = TCP_IPV6; 1733 cm_info->ip_version = QED_TCP_IPV6;
1734 1734
1735 ip_hlen = sizeof(*ip6h); 1735 ip_hlen = sizeof(*ip6h);
1736 *payload_len = ntohs(ip6h->payload_len); 1736 *payload_len = ntohs(ip6h->payload_len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index d89a0e22f6e4..58c7eb9d8e1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -48,7 +48,7 @@
48#include "qed_reg_addr.h" 48#include "qed_reg_addr.h"
49#include "qed_sriov.h" 49#include "qed_sriov.h"
50 50
51#define CHIP_MCP_RESP_ITER_US 10 51#define QED_MCP_RESP_ITER_US 10
52 52
53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ 53#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ 54#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
183 return 0; 183 return 0;
184} 184}
185 185
186/* Maximum of 1 sec to wait for the SHMEM ready indication */
187#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
188#define QED_MCP_SHMEM_RDY_ITER_MS 50
189
186static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 190static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
187{ 191{
188 struct qed_mcp_info *p_info = p_hwfn->mcp_info; 192 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
193 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
194 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
189 u32 drv_mb_offsize, mfw_mb_offsize; 195 u32 drv_mb_offsize, mfw_mb_offsize;
190 u32 mcp_pf_id = MCP_PF_ID(p_hwfn); 196 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
191 197
192 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); 198 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
193 if (!p_info->public_base) 199 if (!p_info->public_base) {
194 return 0; 200 DP_NOTICE(p_hwfn,
201 "The address of the MCP scratch-pad is not configured\n");
202 return -EINVAL;
203 }
195 204
196 p_info->public_base |= GRCBASE_MCP; 205 p_info->public_base |= GRCBASE_MCP;
197 206
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
213 p_info->mfw_mb_addr +
214 offsetof(struct public_mfw_mb,
215 sup_msgs));
216
217 /* The driver can notify that there was an MCP reset, and might read the
218 * SHMEM values before the MFW has completed initializing them.
219 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220 * data ready indication.
221 */
222 while (!p_info->mfw_mb_length && --cnt) {
223 msleep(msec);
224 p_info->mfw_mb_length =
225 (u16)qed_rd(p_hwfn, p_ptt,
226 p_info->mfw_mb_addr +
227 offsetof(struct public_mfw_mb, sup_msgs));
228 }
229
230 if (!cnt) {
231 DP_NOTICE(p_hwfn,
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
234 return -EBUSY;
235 }
236
198 /* Calculate the driver and MFW mailbox address */ 237 /* Calculate the driver and MFW mailbox address */
199 drv_mb_offsize = qed_rd(p_hwfn, p_ptt, 238 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
200 SECTION_OFFSIZE_ADDR(p_info->public_base, 239 SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
204 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", 243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
205 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); 244 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
206 245
207 /* Set the MFW MB address */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 PUBLIC_MFW_MB));
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
213
214 /* Get the current driver mailbox sequence before sending 246 /* Get the current driver mailbox sequence before sending
215 * the first command 247 * the first command
216 */ 248 */
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
285 317
286int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 318int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
287{ 319{
288 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0; 320 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
289 int rc = 0; 321 int rc = 0;
290 322
323 if (p_hwfn->mcp_info->b_block_cmd) {
324 DP_NOTICE(p_hwfn,
325 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
326 return -EBUSY;
327 }
328
291 /* Ensure that only a single thread is accessing the mailbox */ 329 /* Ensure that only a single thread is accessing the mailbox */
292 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 330 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
293 331
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
413 (p_mb_params->cmd | seq_num), p_mb_params->param); 451 (p_mb_params->cmd | seq_num), p_mb_params->param);
414} 452}
415 453
454static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
455{
456 p_hwfn->mcp_info->b_block_cmd = block_cmd;
457
458 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
459 block_cmd ? "Block" : "Unblock");
460}
461
462static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
463 struct qed_ptt *p_ptt)
464{
465 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466 u32 delay = QED_MCP_RESP_ITER_US;
467
468 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
469 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
470 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471 udelay(delay);
472 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473 udelay(delay);
474 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
475
476 DP_NOTICE(p_hwfn,
477 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
479}
480
416static int 481static int
417_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, 482_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
418 struct qed_ptt *p_ptt, 483 struct qed_ptt *p_ptt,
419 struct qed_mcp_mb_params *p_mb_params, 484 struct qed_mcp_mb_params *p_mb_params,
420 u32 max_retries, u32 delay) 485 u32 max_retries, u32 usecs)
421{ 486{
487 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
422 struct qed_mcp_cmd_elem *p_cmd_elem; 488 struct qed_mcp_cmd_elem *p_cmd_elem;
423 u32 cnt = 0;
424 u16 seq_num; 489 u16 seq_num;
425 int rc = 0; 490 int rc = 0;
426 491
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
443 goto err; 508 goto err;
444 509
445 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 510 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
446 udelay(delay); 511
512 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
513 msleep(msecs);
514 else
515 udelay(usecs);
447 } while (++cnt < max_retries); 516 } while (++cnt < max_retries);
448 517
449 if (cnt >= max_retries) { 518 if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
472 * The spinlock stays locked until the list element is removed. 541 * The spinlock stays locked until the list element is removed.
473 */ 542 */
474 543
475 udelay(delay); 544 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
545 msleep(msecs);
546 else
547 udelay(usecs);
548
476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 549 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477 550
478 if (p_cmd_elem->b_is_completed) 551 if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
491 DP_NOTICE(p_hwfn, 564 DP_NOTICE(p_hwfn,
492 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", 565 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
493 p_mb_params->cmd, p_mb_params->param); 566 p_mb_params->cmd, p_mb_params->param);
567 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
494 568
495 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); 569 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
496 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); 570 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
497 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); 571 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
498 572
573 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
574 qed_mcp_cmd_set_blocking(p_hwfn, true);
575
499 return -EAGAIN; 576 return -EAGAIN;
500 } 577 }
501 578
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
507 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", 584 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
508 p_mb_params->mcp_resp, 585 p_mb_params->mcp_resp,
509 p_mb_params->mcp_param, 586 p_mb_params->mcp_param,
510 (cnt * delay) / 1000, (cnt * delay) % 1000); 587 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
511 588
512 /* Clear the sequence number from the MFW response */ 589 /* Clear the sequence number from the MFW response */
513 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; 590 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
525{ 602{
526 size_t union_data_size = sizeof(union drv_union_data); 603 size_t union_data_size = sizeof(union drv_union_data);
527 u32 max_retries = QED_DRV_MB_MAX_RETRIES; 604 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
528 u32 delay = CHIP_MCP_RESP_ITER_US; 605 u32 usecs = QED_MCP_RESP_ITER_US;
529 606
530 /* MCP not initialized */ 607 /* MCP not initialized */
531 if (!qed_mcp_is_init(p_hwfn)) { 608 if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
533 return -EBUSY; 610 return -EBUSY;
534 } 611 }
535 612
613 if (p_hwfn->mcp_info->b_block_cmd) {
614 DP_NOTICE(p_hwfn,
615 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616 p_mb_params->cmd, p_mb_params->param);
617 return -EBUSY;
618 }
619
536 if (p_mb_params->data_src_size > union_data_size || 620 if (p_mb_params->data_src_size > union_data_size ||
537 p_mb_params->data_dst_size > union_data_size) { 621 p_mb_params->data_dst_size > union_data_size) {
538 DP_ERR(p_hwfn, 622 DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
542 return -EINVAL; 626 return -EINVAL;
543 } 627 }
544 628
629 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
630 max_retries = DIV_ROUND_UP(max_retries, 1000);
631 usecs *= 1000;
632 }
633
545 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, 634 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
546 delay); 635 usecs);
547} 636}
548 637
549int qed_mcp_cmd(struct qed_hwfn *p_hwfn, 638int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
761 mb_params.data_src_size = sizeof(load_req); 850 mb_params.data_src_size = sizeof(load_req);
762 mb_params.p_data_dst = &load_rsp; 851 mb_params.p_data_dst = &load_rsp;
763 mb_params.data_dst_size = sizeof(load_rsp); 852 mb_params.data_dst_size = sizeof(load_rsp);
853 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
764 854
765 DP_VERBOSE(p_hwfn, QED_MSG_SP, 855 DP_VERBOSE(p_hwfn, QED_MSG_SP,
766 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", 856 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
982 1072
983int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1073int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
984{ 1074{
985 u32 wol_param, mcp_resp, mcp_param; 1075 struct qed_mcp_mb_params mb_params;
1076 u32 wol_param;
986 1077
987 switch (p_hwfn->cdev->wol_config) { 1078 switch (p_hwfn->cdev->wol_config) {
988 case QED_OV_WOL_DISABLED: 1079 case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1000 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; 1091 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1001 } 1092 }
1002 1093
1003 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param, 1094 memset(&mb_params, 0, sizeof(mb_params));
1004 &mcp_resp, &mcp_param); 1095 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1096 mb_params.param = wol_param;
1097 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1098
1099 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1005} 1100}
1006 1101
1007int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1102int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1486,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1486 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & 1581 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1487 FUNC_MF_CFG_OV_STAG_MASK; 1582 FUNC_MF_CFG_OV_STAG_MASK;
1488 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; 1583 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1489 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && 1584 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1490 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { 1585 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1491 qed_wr(p_hwfn, p_ptt, 1586 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1492 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); 1587 p_hwfn->hw_info.ovlan);
1588 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1589
1590 /* Configure DB to add external vlan to EDPM packets */
1591 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1592 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1593 p_hwfn->hw_info.ovlan);
1594 } else {
1595 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1596 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1597 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1598 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1599 }
1600
1493 qed_sp_pf_update_stag(p_hwfn); 1601 qed_sp_pf_update_stag(p_hwfn);
1494 } 1602 }
1495 1603
1604 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1605 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1606
1496 /* Acknowledge the MFW */ 1607 /* Acknowledge the MFW */
1497 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, 1608 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1498 &resp, &param); 1609 &resp, &param);
@@ -2077,31 +2188,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2077 return rc; 2188 return rc;
2078} 2189}
2079 2190
2191/* A maximal 100 msec waiting time for the MCP to halt */
2192#define QED_MCP_HALT_SLEEP_MS 10
2193#define QED_MCP_HALT_MAX_RETRIES 10
2194
2080int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2195int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2081{ 2196{
2082 u32 resp = 0, param = 0; 2197 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2083 int rc; 2198 int rc;
2084 2199
2085 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, 2200 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2086 &param); 2201 &param);
2087 if (rc) 2202 if (rc) {
2088 DP_ERR(p_hwfn, "MCP response failure, aborting\n"); 2203 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2204 return rc;
2205 }
2089 2206
2090 return rc; 2207 do {
2208 msleep(QED_MCP_HALT_SLEEP_MS);
2209 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2210 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2211 break;
2212 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2213
2214 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2215 DP_NOTICE(p_hwfn,
2216 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2217 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2218 return -EBUSY;
2219 }
2220
2221 qed_mcp_cmd_set_blocking(p_hwfn, true);
2222
2223 return 0;
2091} 2224}
2092 2225
2226#define QED_MCP_RESUME_SLEEP_MS 10
2227
2093int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 2228int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2094{ 2229{
2095 u32 value, cpu_mode; 2230 u32 cpu_mode, cpu_state;
2096 2231
2097 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); 2232 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2098 2233
2099 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2100 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2101 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2102 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); 2234 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2235 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2236 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2237 msleep(QED_MCP_RESUME_SLEEP_MS);
2238 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2239
2240 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2241 DP_NOTICE(p_hwfn,
2242 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2243 cpu_mode, cpu_state);
2244 return -EBUSY;
2245 }
2103 2246
2104 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; 2247 qed_mcp_cmd_set_blocking(p_hwfn, false);
2248
2249 return 0;
2105} 2250}
2106 2251
2107int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, 2252int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 047976d5c6e9..85e6b3989e7a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -635,11 +635,14 @@ struct qed_mcp_info {
635 */ 635 */
636 spinlock_t cmd_lock; 636 spinlock_t cmd_lock;
637 637
638 /* Flag to indicate whether sending a MFW mailbox command is blocked */
639 bool b_block_cmd;
640
638 /* Spinlock used for syncing SW link-changes and link-changes 641 /* Spinlock used for syncing SW link-changes and link-changes
639 * originating from attention context. 642 * originating from attention context.
640 */ 643 */
641 spinlock_t link_lock; 644 spinlock_t link_lock;
642 bool block_mb_sending; 645
643 u32 public_base; 646 u32 public_base;
644 u32 drv_mb_addr; 647 u32 drv_mb_addr;
645 u32 mfw_mb_addr; 648 u32 mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
660}; 663};
661 664
662struct qed_mcp_mb_params { 665struct qed_mcp_mb_params {
663 u32 cmd; 666 u32 cmd;
664 u32 param; 667 u32 param;
665 void *p_data_src; 668 void *p_data_src;
666 u8 data_src_size; 669 void *p_data_dst;
667 void *p_data_dst; 670 u8 data_src_size;
668 u8 data_dst_size; 671 u8 data_dst_size;
669 u32 mcp_resp; 672 u32 mcp_resp;
670 u32 mcp_param; 673 u32 mcp_param;
674 u32 flags;
675#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
676#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
677#define QED_MB_FLAGS_IS_SET(params, flag) \
678 ({ typeof(params) __params = (params); \
679 (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
671}; 680};
672 681
673struct qed_drv_tlv_hdr { 682struct qed_drv_tlv_hdr {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
228 num_cons, "Toggle"); 228 num_cons, "Toggle");
229 if (rc) { 229 if (rc) {
230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
231 "Failed to allocate toogle bits, rc = %d\n", rc); 231 "Failed to allocate toggle bits, rc = %d\n", rc);
232 goto free_cq_map; 232 goto free_cq_map;
233 } 233 }
234 234
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d8ad2dcad8d5..2440970882c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -216,6 +216,12 @@
216 0x00c000UL 216 0x00c000UL
217#define DORQ_REG_IFEN \ 217#define DORQ_REG_IFEN \
218 0x100040UL 218 0x100040UL
219#define DORQ_REG_TAG1_OVRD_MODE \
220 0x1008b4UL
221#define DORQ_REG_PF_PCP_BB_K2 \
222 0x1008c4UL
223#define DORQ_REG_PF_EXT_VID_BB_K2 \
224 0x1008c8UL
219#define DORQ_REG_DB_DROP_REASON \ 225#define DORQ_REG_DB_DROP_REASON \
220 0x100a2cUL 226 0x100a2cUL
221#define DORQ_REG_DB_DROP_DETAILS \ 227#define DORQ_REG_DB_DROP_DETAILS \
@@ -562,8 +568,10 @@
562 0 568 0
563#define MCP_REG_CPU_STATE \ 569#define MCP_REG_CPU_STATE \
564 0xe05004UL 570 0xe05004UL
571#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
565#define MCP_REG_CPU_EVENT_MASK \ 572#define MCP_REG_CPU_EVENT_MASK \
566 0xe05008UL 573 0xe05008UL
574#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
567#define PGLUE_B_REG_PF_BAR0_SIZE \ 575#define PGLUE_B_REG_PF_BAR0_SIZE \
568 0x2aae60UL 576 0x2aae60UL
569#define PGLUE_B_REG_PF_BAR1_SIZE \ 577#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
140 140
141static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) 141static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
142{ 142{
143 enum roce_flavor flavor;
144
145 switch (roce_mode) { 143 switch (roce_mode) {
146 case ROCE_V1: 144 case ROCE_V1:
147 flavor = PLAIN_ROCE; 145 return PLAIN_ROCE;
148 break;
149 case ROCE_V2_IPV4: 146 case ROCE_V2_IPV4:
150 flavor = RROCE_IPV4; 147 return RROCE_IPV4;
151 break;
152 case ROCE_V2_IPV6: 148 case ROCE_V2_IPV6:
153 flavor = ROCE_V2_IPV6; 149 return RROCE_IPV6;
154 break;
155 default: 150 default:
156 flavor = MAX_ROCE_MODE; 151 return MAX_ROCE_FLAVOR;
157 break;
158 } 152 }
159 return flavor;
160} 153}
161 154
162static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) 155static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
154static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, 154static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
155 struct qed_tunnel_info *p_src) 155 struct qed_tunnel_info *p_src)
156{ 156{
157 enum tunnel_clss type; 157 int type;
158 158
159 p_tun->b_update_rx_cls = p_src->b_update_rx_cls; 159 p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
160 p_tun->b_update_tx_cls = p_src->b_update_tx_cls; 160 p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..be118d057b92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
413 } 413 }
414 414
415 if (!p_iov->b_pre_fp_hsi && 415 if (!p_iov->b_pre_fp_hsi &&
416 ETH_HSI_VER_MINOR &&
417 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { 416 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
418 DP_INFO(p_hwfn, 417 DP_INFO(p_hwfn,
419 "PF is using older fastpath HSI; %02x.%02x is configured\n", 418 "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
572static void 571static void
573__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 572__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
574 struct qed_tunn_update_type *p_src, 573 struct qed_tunn_update_type *p_src,
575 enum qed_tunn_clss mask, u8 *p_cls) 574 enum qed_tunn_mode mask, u8 *p_cls)
576{ 575{
577 if (p_src->b_update_mode) { 576 if (p_src->b_update_mode) {
578 p_req->tun_mode_update_mask |= BIT(mask); 577 p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
587static void 586static void
588qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 587qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
589 struct qed_tunn_update_type *p_src, 588 struct qed_tunn_update_type *p_src,
590 enum qed_tunn_clss mask, 589 enum qed_tunn_mode mask,
591 u8 *p_cls, struct qed_tunn_update_udp_port *p_port, 590 u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
592 u8 *p_update_port, u16 *p_udp_port) 591 u8 *p_update_port, u16 *p_udp_port)
593{ 592{
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 9673d19308e6..b16ce7d93caf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -2006,18 +2006,16 @@ unlock:
2006static int qede_parse_actions(struct qede_dev *edev, 2006static int qede_parse_actions(struct qede_dev *edev,
2007 struct tcf_exts *exts) 2007 struct tcf_exts *exts)
2008{ 2008{
2009 int rc = -EINVAL, num_act = 0; 2009 int rc = -EINVAL, num_act = 0, i;
2010 const struct tc_action *a; 2010 const struct tc_action *a;
2011 bool is_drop = false; 2011 bool is_drop = false;
2012 LIST_HEAD(actions);
2013 2012
2014 if (!tcf_exts_has_actions(exts)) { 2013 if (!tcf_exts_has_actions(exts)) {
2015 DP_NOTICE(edev, "No tc actions received\n"); 2014 DP_NOTICE(edev, "No tc actions received\n");
2016 return rc; 2015 return rc;
2017 } 2016 }
2018 2017
2019 tcf_exts_to_list(exts, &actions); 2018 tcf_exts_for_each_action(i, a, exts) {
2020 list_for_each_entry(a, &actions, list) {
2021 num_act++; 2019 num_act++;
2022 2020
2023 if (is_tcf_gact_shot(a)) 2021 if (is_tcf_gact_shot(a))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
1800 int (*config_loopback) (struct qlcnic_adapter *, u8); 1800 int (*config_loopback) (struct qlcnic_adapter *, u8);
1801 int (*clear_loopback) (struct qlcnic_adapter *, u8); 1801 int (*clear_loopback) (struct qlcnic_adapter *, u8);
1802 int (*config_promisc_mode) (struct qlcnic_adapter *, u32); 1802 int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
1803 void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16); 1803 void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
1804 u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
1804 int (*get_board_info) (struct qlcnic_adapter *); 1805 int (*get_board_info) (struct qlcnic_adapter *);
1805 void (*set_mac_filter_count) (struct qlcnic_adapter *); 1806 void (*set_mac_filter_count) (struct qlcnic_adapter *);
1806 void (*free_mac_list) (struct qlcnic_adapter *); 1807 void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
2064} 2065}
2065 2066
2066static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter, 2067static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
2067 u64 *addr, u16 id) 2068 u64 *addr, u16 vlan,
2069 struct qlcnic_host_tx_ring *tx_ring)
2068{ 2070{
2069 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id); 2071 adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
2070} 2072}
2071 2073
2072static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter) 2074static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..a79d84f99102 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
2135} 2135}
2136 2136
2137void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr, 2137void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
2138 u16 vlan_id) 2138 u16 vlan_id,
2139 struct qlcnic_host_tx_ring *tx_ring)
2139{ 2140{
2140 u8 mac[ETH_ALEN]; 2141 u8 mac[ETH_ALEN];
2141 memcpy(&mac, addr, ETH_ALEN); 2142 memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
550int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32); 550int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
551int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int); 551int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
552int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int); 552int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
553void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16); 553void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
554 u16 vlan, struct qlcnic_host_tx_ring *ring);
554int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *); 555int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
555int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 556int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
556void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int); 557void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
173 struct net_device *netdev); 173 struct net_device *netdev);
174void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *); 174void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
175void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, 175void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
176 u64 *uaddr, u16 vlan_id); 176 u64 *uaddr, u16 vlan_id,
177 struct qlcnic_host_tx_ring *tx_ring);
177int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *, 178int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
178 struct ethtool_coalesce *); 179 struct ethtool_coalesce *);
179int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *); 180int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
268} 268}
269 269
270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr, 270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
271 u16 vlan_id) 271 u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
272{ 272{
273 struct cmd_desc_type0 *hwdesc; 273 struct cmd_desc_type0 *hwdesc;
274 struct qlcnic_nic_req *req; 274 struct qlcnic_nic_req *req;
275 struct qlcnic_mac_req *mac_req; 275 struct qlcnic_mac_req *mac_req;
276 struct qlcnic_vlan_req *vlan_req; 276 struct qlcnic_vlan_req *vlan_req;
277 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
278 u32 producer; 277 u32 producer;
279 u64 word; 278 u64 word;
280 279
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
301 300
302static void qlcnic_send_filter(struct qlcnic_adapter *adapter, 301static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
303 struct cmd_desc_type0 *first_desc, 302 struct cmd_desc_type0 *first_desc,
304 struct sk_buff *skb) 303 struct sk_buff *skb,
304 struct qlcnic_host_tx_ring *tx_ring)
305{ 305{
306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data); 306 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
307 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 307 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
335 tmp_fil->vlan_id == vlan_id) { 335 tmp_fil->vlan_id == vlan_id) {
336 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) 336 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
337 qlcnic_change_filter(adapter, &src_addr, 337 qlcnic_change_filter(adapter, &src_addr,
338 vlan_id); 338 vlan_id, tx_ring);
339 tmp_fil->ftime = jiffies; 339 tmp_fil->ftime = jiffies;
340 return; 340 return;
341 } 341 }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
350 if (!fil) 350 if (!fil)
351 return; 351 return;
352 352
353 qlcnic_change_filter(adapter, &src_addr, vlan_id); 353 qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
354 fil->ftime = jiffies; 354 fil->ftime = jiffies;
355 fil->vlan_id = vlan_id; 355 fil->vlan_id = vlan_id;
356 memcpy(fil->faddr, &src_addr, ETH_ALEN); 356 memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
766 } 766 }
767 767
768 if (adapter->drv_mac_learn) 768 if (adapter->drv_mac_learn)
769 qlcnic_send_filter(adapter, first_desc, skb); 769 qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
770 770
771 tx_ring->tx_stats.tx_bytes += skb->len; 771 tx_ring->tx_stats.tx_bytes += skb->len;
772 tx_ring->tx_stats.xmit_called++; 772 tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..dbd48012224f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
59static void qlcnic_tx_timeout(struct net_device *netdev); 59static void qlcnic_tx_timeout(struct net_device *netdev);
60static void qlcnic_attach_work(struct work_struct *work); 60static void qlcnic_attach_work(struct work_struct *work);
61static void qlcnic_fwinit_work(struct work_struct *work); 61static void qlcnic_fwinit_work(struct work_struct *work);
62#ifdef CONFIG_NET_POLL_CONTROLLER
63static void qlcnic_poll_controller(struct net_device *netdev);
64#endif
65 62
66static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 63static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
67static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 64static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
545 .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, 542 .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
546 .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, 543 .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
547 .ndo_features_check = qlcnic_features_check, 544 .ndo_features_check = qlcnic_features_check,
548#ifdef CONFIG_NET_POLL_CONTROLLER
549 .ndo_poll_controller = qlcnic_poll_controller,
550#endif
551#ifdef CONFIG_QLCNIC_SRIOV 545#ifdef CONFIG_QLCNIC_SRIOV
552 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, 546 .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
553 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate, 547 .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
3200 return IRQ_HANDLED; 3194 return IRQ_HANDLED;
3201} 3195}
3202 3196
3203#ifdef CONFIG_NET_POLL_CONTROLLER
3204static void qlcnic_poll_controller(struct net_device *netdev)
3205{
3206 struct qlcnic_adapter *adapter = netdev_priv(netdev);
3207 struct qlcnic_host_sds_ring *sds_ring;
3208 struct qlcnic_recv_context *recv_ctx;
3209 struct qlcnic_host_tx_ring *tx_ring;
3210 int ring;
3211
3212 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3213 return;
3214
3215 recv_ctx = adapter->recv_ctx;
3216
3217 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
3218 sds_ring = &recv_ctx->sds_rings[ring];
3219 qlcnic_disable_sds_intr(adapter, sds_ring);
3220 napi_schedule(&sds_ring->napi);
3221 }
3222
3223 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
3224 /* Only Multi-Tx queue capable devices need to
3225 * schedule NAPI for TX rings
3226 */
3227 if ((qlcnic_83xx_check(adapter) &&
3228 (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
3229 (qlcnic_82xx_check(adapter) &&
3230 !qlcnic_check_multi_tx(adapter)))
3231 return;
3232
3233 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
3234 tx_ring = &adapter->tx_ring[ring];
3235 qlcnic_disable_tx_intr(adapter, tx_ring);
3236 napi_schedule(&tx_ring->napi);
3237 }
3238 }
3239}
3240#endif
3241
3242static void 3197static void
3243qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding) 3198qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
3244{ 3199{
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 353f1c129af1..059ba9429e51 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
2384 return status; 2384 return status;
2385} 2385}
2386 2386
2387static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388 netdev_features_t features)
2389{
2390 int err;
2391
2392 /* Update the behavior of vlan accel in the adapter */
2393 err = qlge_update_hw_vlan_features(ndev, features);
2394 if (err)
2395 return err;
2396
2397 return features;
2398}
2399
2400static int qlge_set_features(struct net_device *ndev, 2387static int qlge_set_features(struct net_device *ndev,
2401 netdev_features_t features) 2388 netdev_features_t features)
2402{ 2389{
2403 netdev_features_t changed = ndev->features ^ features; 2390 netdev_features_t changed = ndev->features ^ features;
2391 int err;
2392
2393 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2394 /* Update the behavior of vlan accel in the adapter */
2395 err = qlge_update_hw_vlan_features(ndev, features);
2396 if (err)
2397 return err;
2404 2398
2405 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2406 qlge_vlan_mode(ndev, features); 2399 qlge_vlan_mode(ndev, features);
2400 }
2407 2401
2408 return 0; 2402 return 0;
2409} 2403}
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
4719 .ndo_set_mac_address = qlge_set_mac_address, 4713 .ndo_set_mac_address = qlge_set_mac_address,
4720 .ndo_validate_addr = eth_validate_addr, 4714 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_tx_timeout = qlge_tx_timeout, 4715 .ndo_tx_timeout = qlge_tx_timeout,
4722 .ndo_fix_features = qlge_fix_features,
4723 .ndo_set_features = qlge_set_features, 4716 .ndo_set_features = qlge_set_features,
4724 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, 4717 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4718 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index ffe7a16bdfc8..6c8543fb90c0 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
45{ 45{
46 __be16 rx_data; 46 __be16 rx_data;
47 __be16 tx_data; 47 __be16 tx_data;
48 struct spi_transfer *transfer; 48 struct spi_transfer transfer[2];
49 struct spi_message *msg; 49 struct spi_message msg;
50 int ret; 50 int ret;
51 51
52 memset(transfer, 0, sizeof(transfer));
53
54 spi_message_init(&msg);
55
52 tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); 56 tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
57 *result = 0;
58
59 transfer[0].tx_buf = &tx_data;
60 transfer[0].len = QCASPI_CMD_LEN;
61 transfer[1].rx_buf = &rx_data;
62 transfer[1].len = QCASPI_CMD_LEN;
63
64 spi_message_add_tail(&transfer[0], &msg);
53 65
54 if (qca->legacy_mode) { 66 if (qca->legacy_mode) {
55 msg = &qca->spi_msg1; 67 spi_sync(qca->spi_dev, &msg);
56 transfer = &qca->spi_xfer1; 68 spi_message_init(&msg);
57 transfer->tx_buf = &tx_data;
58 transfer->rx_buf = NULL;
59 transfer->len = QCASPI_CMD_LEN;
60 spi_sync(qca->spi_dev, msg);
61 } else {
62 msg = &qca->spi_msg2;
63 transfer = &qca->spi_xfer2[0];
64 transfer->tx_buf = &tx_data;
65 transfer->rx_buf = NULL;
66 transfer->len = QCASPI_CMD_LEN;
67 transfer = &qca->spi_xfer2[1];
68 } 69 }
69 transfer->tx_buf = NULL; 70 spi_message_add_tail(&transfer[1], &msg);
70 transfer->rx_buf = &rx_data; 71 ret = spi_sync(qca->spi_dev, &msg);
71 transfer->len = QCASPI_CMD_LEN;
72 ret = spi_sync(qca->spi_dev, msg);
73 72
74 if (!ret) 73 if (!ret)
75 ret = msg->status; 74 ret = msg.status;
76 75
77 if (ret) 76 if (ret)
78 qcaspi_spi_error(qca); 77 qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
86qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) 85qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
87{ 86{
88 __be16 tx_data[2]; 87 __be16 tx_data[2];
89 struct spi_transfer *transfer; 88 struct spi_transfer transfer[2];
90 struct spi_message *msg; 89 struct spi_message msg;
91 int ret; 90 int ret;
92 91
92 memset(&transfer, 0, sizeof(transfer));
93
94 spi_message_init(&msg);
95
93 tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); 96 tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
94 tx_data[1] = cpu_to_be16(value); 97 tx_data[1] = cpu_to_be16(value);
95 98
99 transfer[0].tx_buf = &tx_data[0];
100 transfer[0].len = QCASPI_CMD_LEN;
101 transfer[1].tx_buf = &tx_data[1];
102 transfer[1].len = QCASPI_CMD_LEN;
103
104 spi_message_add_tail(&transfer[0], &msg);
96 if (qca->legacy_mode) { 105 if (qca->legacy_mode) {
97 msg = &qca->spi_msg1; 106 spi_sync(qca->spi_dev, &msg);
98 transfer = &qca->spi_xfer1; 107 spi_message_init(&msg);
99 transfer->tx_buf = &tx_data[0];
100 transfer->rx_buf = NULL;
101 transfer->len = QCASPI_CMD_LEN;
102 spi_sync(qca->spi_dev, msg);
103 } else {
104 msg = &qca->spi_msg2;
105 transfer = &qca->spi_xfer2[0];
106 transfer->tx_buf = &tx_data[0];
107 transfer->rx_buf = NULL;
108 transfer->len = QCASPI_CMD_LEN;
109 transfer = &qca->spi_xfer2[1];
110 } 108 }
111 transfer->tx_buf = &tx_data[1]; 109 spi_message_add_tail(&transfer[1], &msg);
112 transfer->rx_buf = NULL; 110 ret = spi_sync(qca->spi_dev, &msg);
113 transfer->len = QCASPI_CMD_LEN;
114 ret = spi_sync(qca->spi_dev, msg);
115 111
116 if (!ret) 112 if (!ret)
117 ret = msg->status; 113 ret = msg.status;
118 114
119 if (ret) 115 if (ret)
120 qcaspi_spi_error(qca); 116 qcaspi_spi_error(qca);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 206f0266463e..66b775d462fd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -99,22 +99,24 @@ static u32
99qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) 99qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
100{ 100{
101 __be16 cmd; 101 __be16 cmd;
102 struct spi_message *msg = &qca->spi_msg2; 102 struct spi_message msg;
103 struct spi_transfer *transfer = &qca->spi_xfer2[0]; 103 struct spi_transfer transfer[2];
104 int ret; 104 int ret;
105 105
106 memset(&transfer, 0, sizeof(transfer));
107 spi_message_init(&msg);
108
106 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); 109 cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
107 transfer->tx_buf = &cmd; 110 transfer[0].tx_buf = &cmd;
108 transfer->rx_buf = NULL; 111 transfer[0].len = QCASPI_CMD_LEN;
109 transfer->len = QCASPI_CMD_LEN; 112 transfer[1].tx_buf = src;
110 transfer = &qca->spi_xfer2[1]; 113 transfer[1].len = len;
111 transfer->tx_buf = src;
112 transfer->rx_buf = NULL;
113 transfer->len = len;
114 114
115 ret = spi_sync(qca->spi_dev, msg); 115 spi_message_add_tail(&transfer[0], &msg);
116 spi_message_add_tail(&transfer[1], &msg);
117 ret = spi_sync(qca->spi_dev, &msg);
116 118
117 if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 119 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
118 qcaspi_spi_error(qca); 120 qcaspi_spi_error(qca);
119 return 0; 121 return 0;
120 } 122 }
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
125static u32 127static u32
126qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) 128qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
127{ 129{
128 struct spi_message *msg = &qca->spi_msg1; 130 struct spi_message msg;
129 struct spi_transfer *transfer = &qca->spi_xfer1; 131 struct spi_transfer transfer;
130 int ret; 132 int ret;
131 133
132 transfer->tx_buf = src; 134 memset(&transfer, 0, sizeof(transfer));
133 transfer->rx_buf = NULL; 135 spi_message_init(&msg);
134 transfer->len = len; 136
137 transfer.tx_buf = src;
138 transfer.len = len;
135 139
136 ret = spi_sync(qca->spi_dev, msg); 140 spi_message_add_tail(&transfer, &msg);
141 ret = spi_sync(qca->spi_dev, &msg);
137 142
138 if (ret || (msg->actual_length != len)) { 143 if (ret || (msg.actual_length != len)) {
139 qcaspi_spi_error(qca); 144 qcaspi_spi_error(qca);
140 return 0; 145 return 0;
141 } 146 }
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
146static u32 151static u32
147qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) 152qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
148{ 153{
149 struct spi_message *msg = &qca->spi_msg2; 154 struct spi_message msg;
150 __be16 cmd; 155 __be16 cmd;
151 struct spi_transfer *transfer = &qca->spi_xfer2[0]; 156 struct spi_transfer transfer[2];
152 int ret; 157 int ret;
153 158
159 memset(&transfer, 0, sizeof(transfer));
160 spi_message_init(&msg);
161
154 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); 162 cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
155 transfer->tx_buf = &cmd; 163 transfer[0].tx_buf = &cmd;
156 transfer->rx_buf = NULL; 164 transfer[0].len = QCASPI_CMD_LEN;
157 transfer->len = QCASPI_CMD_LEN; 165 transfer[1].rx_buf = dst;
158 transfer = &qca->spi_xfer2[1]; 166 transfer[1].len = len;
159 transfer->tx_buf = NULL;
160 transfer->rx_buf = dst;
161 transfer->len = len;
162 167
163 ret = spi_sync(qca->spi_dev, msg); 168 spi_message_add_tail(&transfer[0], &msg);
169 spi_message_add_tail(&transfer[1], &msg);
170 ret = spi_sync(qca->spi_dev, &msg);
164 171
165 if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { 172 if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
166 qcaspi_spi_error(qca); 173 qcaspi_spi_error(qca);
167 return 0; 174 return 0;
168 } 175 }
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
173static u32 180static u32
174qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) 181qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
175{ 182{
176 struct spi_message *msg = &qca->spi_msg1; 183 struct spi_message msg;
177 struct spi_transfer *transfer = &qca->spi_xfer1; 184 struct spi_transfer transfer;
178 int ret; 185 int ret;
179 186
180 transfer->tx_buf = NULL; 187 memset(&transfer, 0, sizeof(transfer));
181 transfer->rx_buf = dst; 188 spi_message_init(&msg);
182 transfer->len = len;
183 189
184 ret = spi_sync(qca->spi_dev, msg); 190 transfer.rx_buf = dst;
191 transfer.len = len;
185 192
186 if (ret || (msg->actual_length != len)) { 193 spi_message_add_tail(&transfer, &msg);
194 ret = spi_sync(qca->spi_dev, &msg);
195
196 if (ret || (msg.actual_length != len)) {
187 qcaspi_spi_error(qca); 197 qcaspi_spi_error(qca);
188 return 0; 198 return 0;
189 } 199 }
@@ -195,19 +205,23 @@ static int
195qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) 205qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
196{ 206{
197 __be16 tx_data; 207 __be16 tx_data;
198 struct spi_message *msg = &qca->spi_msg1; 208 struct spi_message msg;
199 struct spi_transfer *transfer = &qca->spi_xfer1; 209 struct spi_transfer transfer;
200 int ret; 210 int ret;
201 211
212 memset(&transfer, 0, sizeof(transfer));
213
214 spi_message_init(&msg);
215
202 tx_data = cpu_to_be16(cmd); 216 tx_data = cpu_to_be16(cmd);
203 transfer->len = sizeof(tx_data); 217 transfer.len = sizeof(cmd);
204 transfer->tx_buf = &tx_data; 218 transfer.tx_buf = &tx_data;
205 transfer->rx_buf = NULL; 219 spi_message_add_tail(&transfer, &msg);
206 220
207 ret = spi_sync(qca->spi_dev, msg); 221 ret = spi_sync(qca->spi_dev, &msg);
208 222
209 if (!ret) 223 if (!ret)
210 ret = msg->status; 224 ret = msg.status;
211 225
212 if (ret) 226 if (ret)
213 qcaspi_spi_error(qca); 227 qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
835 qca = netdev_priv(dev); 849 qca = netdev_priv(dev);
836 memset(qca, 0, sizeof(struct qcaspi)); 850 memset(qca, 0, sizeof(struct qcaspi));
837 851
838 memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
839 memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
840
841 spi_message_init(&qca->spi_msg1);
842 spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
843
844 spi_message_init(&qca->spi_msg2);
845 spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
846 spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
847
848 memset(&qca->txr, 0, sizeof(qca->txr)); 852 memset(&qca->txr, 0, sizeof(qca->txr));
849 qca->txr.count = TX_RING_MAX_LEN; 853 qca->txr.count = TX_RING_MAX_LEN;
850} 854}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index fc4beb1b32d1..fc0e98726b36 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -83,11 +83,6 @@ struct qcaspi {
83 struct tx_ring txr; 83 struct tx_ring txr;
84 struct qcaspi_stats stats; 84 struct qcaspi_stats stats;
85 85
86 struct spi_message spi_msg1;
87 struct spi_message spi_msg2;
88 struct spi_transfer spi_xfer1;
89 struct spi_transfer spi_xfer2[2];
90
91 u8 *rx_buffer; 86 u8 *rx_buffer;
92 u32 buffer_size; 87 u32 buffer_size;
93 u8 sync; 88 u8 sync;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
113 struct sk_buff *skbn; 113 struct sk_buff *skbn;
114 114
115 if (skb->dev->type == ARPHRD_ETHER) { 115 if (skb->dev->type == ARPHRD_ETHER) {
116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { 116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
117 kfree_skb(skb); 117 kfree_skb(skb);
118 return; 118 return;
119 } 119 }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
147 } 147 }
148 148
149 if (skb_headroom(skb) < required_headroom) { 149 if (skb_headroom(skb) < required_headroom) {
150 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 150 if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
151 return -ENOMEM; 151 return -ENOMEM;
152 } 152 }
153 153
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
189 if (!skb) 189 if (!skb)
190 goto done; 190 goto done;
191 191
192 if (skb->pkt_type == PACKET_LOOPBACK)
193 return RX_HANDLER_PASS;
194
192 dev = skb->dev; 195 dev = skb->dev;
193 port = rmnet_get_port(dev); 196 port = rmnet_get_port(dev);
194 197
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0efa977c422d..9a5e2969df61 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/phy.h> 19#include <linux/phy.h>
@@ -218,6 +219,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
218 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, 219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
219 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
220 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, 221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
222 { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
221 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 223 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
222 { PCI_VENDOR_ID_DLINK, 0x4300, 224 { PCI_VENDOR_ID_DLINK, 0x4300,
223 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, 225 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
@@ -630,7 +632,7 @@ struct rtl8169_tc_offsets {
630}; 632};
631 633
632enum rtl_flag { 634enum rtl_flag {
633 RTL_FLAG_TASK_ENABLED, 635 RTL_FLAG_TASK_ENABLED = 0,
634 RTL_FLAG_TASK_SLOW_PENDING, 636 RTL_FLAG_TASK_SLOW_PENDING,
635 RTL_FLAG_TASK_RESET_PENDING, 637 RTL_FLAG_TASK_RESET_PENDING,
636 RTL_FLAG_MAX 638 RTL_FLAG_MAX
@@ -664,6 +666,7 @@ struct rtl8169_private {
664 666
665 u16 event_slow; 667 u16 event_slow;
666 const struct rtl_coalesce_info *coalesce_info; 668 const struct rtl_coalesce_info *coalesce_info;
669 struct clk *clk;
667 670
668 struct mdio_ops { 671 struct mdio_ops {
669 void (*write)(struct rtl8169_private *, int, int); 672 void (*write)(struct rtl8169_private *, int, int);
@@ -4068,6 +4071,14 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4068 phy_speed_up(dev->phydev); 4071 phy_speed_up(dev->phydev);
4069 4072
4070 genphy_soft_reset(dev->phydev); 4073 genphy_soft_reset(dev->phydev);
4074
4075 /* It was reported that several chips end up with 10MBit/Half on a
4076 * 1GBit link after resuming from S3. For whatever reason the PHY on
4077 * these chips doesn't properly start a renegotiation when soft-reset.
4078 * Explicitly requesting a renegotiation fixes this.
4079 */
4080 if (dev->phydev->autoneg == AUTONEG_ENABLE)
4081 phy_restart_aneg(dev->phydev);
4071} 4082}
4072 4083
4073static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) 4084static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4522,11 +4533,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4522 rtl_hw_reset(tp); 4533 rtl_hw_reset(tp);
4523} 4534}
4524 4535
4525static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) 4536static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
4526{ 4537{
4527 /* Set DMA burst size and Interframe Gap Time */ 4538 u32 val = TX_DMA_BURST << TxDMAShift |
4528 RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) | 4539 InterFrameGap << TxInterFrameGapShift;
4529 (InterFrameGap << TxInterFrameGapShift)); 4540
4541 if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
4542 tp->mac_version != RTL_GIGA_MAC_VER_39)
4543 val |= TXCFG_AUTO_FIFO;
4544
4545 RTL_W32(tp, TxConfig, val);
4530} 4546}
4531 4547
4532static void rtl_set_rx_max_size(struct rtl8169_private *tp) 4548static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -4633,12 +4649,14 @@ static void rtl_hw_start(struct rtl8169_private *tp)
4633 4649
4634 rtl_set_rx_max_size(tp); 4650 rtl_set_rx_max_size(tp);
4635 rtl_set_rx_tx_desc_registers(tp); 4651 rtl_set_rx_tx_desc_registers(tp);
4636 rtl_set_rx_tx_config_registers(tp);
4637 RTL_W8(tp, Cfg9346, Cfg9346_Lock); 4652 RTL_W8(tp, Cfg9346, Cfg9346_Lock);
4638 4653
4639 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ 4654 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4640 RTL_R8(tp, IntrMask); 4655 RTL_R8(tp, IntrMask);
4641 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); 4656 RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
4657 rtl_init_rxcfg(tp);
4658 rtl_set_tx_config_registers(tp);
4659
4642 rtl_set_rx_mode(tp->dev); 4660 rtl_set_rx_mode(tp->dev);
4643 /* no early-rx interrupts */ 4661 /* no early-rx interrupts */
4644 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); 4662 RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
@@ -4772,12 +4790,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4772static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) 4790static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
4773{ 4791{
4774 if (enable) { 4792 if (enable) {
4775 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4776 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); 4793 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
4794 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4777 } else { 4795 } else {
4778 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); 4796 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4779 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); 4797 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4780 } 4798 }
4799
4800 udelay(10);
4781} 4801}
4782 4802
4783static void rtl_hw_start_8168bb(struct rtl8169_private *tp) 4803static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5017,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5017 5037
5018 rtl_disable_clock_request(tp); 5038 rtl_disable_clock_request(tp);
5019 5039
5020 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5021 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5040 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5022 5041
5023 /* Adjust EEE LED frequency */ 5042 /* Adjust EEE LED frequency */
@@ -5051,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5051 5070
5052 rtl_disable_clock_request(tp); 5071 rtl_disable_clock_request(tp);
5053 5072
5054 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5055 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5073 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5056 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); 5074 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
5057 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); 5075 RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5096,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5096 5114
5097static void rtl_hw_start_8168g(struct rtl8169_private *tp) 5115static void rtl_hw_start_8168g(struct rtl8169_private *tp)
5098{ 5116{
5099 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5100
5101 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); 5117 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5102 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5118 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5103 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5119 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5195,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
5195 rtl_hw_aspm_clkreq_enable(tp, false); 5211 rtl_hw_aspm_clkreq_enable(tp, false);
5196 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); 5212 rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
5197 5213
5198 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5199
5200 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); 5214 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5201 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5215 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5202 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5216 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5279,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
5279{ 5293{
5280 rtl8168ep_stop_cmac(tp); 5294 rtl8168ep_stop_cmac(tp);
5281 5295
5282 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5283
5284 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); 5296 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
5285 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); 5297 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
5286 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); 5298 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5602,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5602 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5614 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5603 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); 5615 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5604 5616
5605 RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
5606 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB); 5617 RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5607 5618
5608 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); 5619 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5622,6 +5633,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5622 5633
5623static void rtl_hw_start_8106(struct rtl8169_private *tp) 5634static void rtl_hw_start_8106(struct rtl8169_private *tp)
5624{ 5635{
5636 rtl_hw_aspm_clkreq_enable(tp, false);
5637
5625 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5638 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5626 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); 5639 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5627 5640
@@ -5630,6 +5643,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5630 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); 5643 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
5631 5644
5632 rtl_pcie_state_l2l3_enable(tp, false); 5645 rtl_pcie_state_l2l3_enable(tp, false);
5646 rtl_hw_aspm_clkreq_enable(tp, true);
5633} 5647}
5634 5648
5635static void rtl_hw_start_8101(struct rtl8169_private *tp) 5649static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -6652,7 +6666,8 @@ static int rtl8169_close(struct net_device *dev)
6652 rtl8169_update_counters(tp); 6666 rtl8169_update_counters(tp);
6653 6667
6654 rtl_lock_work(tp); 6668 rtl_lock_work(tp);
6655 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6669 /* Clear all task flags */
6670 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6656 6671
6657 rtl8169_down(dev); 6672 rtl8169_down(dev);
6658 rtl_unlock_work(tp); 6673 rtl_unlock_work(tp);
@@ -6835,7 +6850,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
6835 6850
6836 rtl_lock_work(tp); 6851 rtl_lock_work(tp);
6837 napi_disable(&tp->napi); 6852 napi_disable(&tp->napi);
6838 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); 6853 /* Clear all task flags */
6854 bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
6855
6839 rtl_unlock_work(tp); 6856 rtl_unlock_work(tp);
6840 6857
6841 rtl_pll_power_down(tp); 6858 rtl_pll_power_down(tp);
@@ -6847,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
6847{ 6864{
6848 struct pci_dev *pdev = to_pci_dev(device); 6865 struct pci_dev *pdev = to_pci_dev(device);
6849 struct net_device *dev = pci_get_drvdata(pdev); 6866 struct net_device *dev = pci_get_drvdata(pdev);
6867 struct rtl8169_private *tp = netdev_priv(dev);
6850 6868
6851 rtl8169_net_suspend(dev); 6869 rtl8169_net_suspend(dev);
6870 clk_disable_unprepare(tp->clk);
6852 6871
6853 return 0; 6872 return 0;
6854} 6873}
@@ -6876,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
6876{ 6895{
6877 struct pci_dev *pdev = to_pci_dev(device); 6896 struct pci_dev *pdev = to_pci_dev(device);
6878 struct net_device *dev = pci_get_drvdata(pdev); 6897 struct net_device *dev = pci_get_drvdata(pdev);
6898 struct rtl8169_private *tp = netdev_priv(dev);
6899
6900 clk_prepare_enable(tp->clk);
6879 6901
6880 if (netif_running(dev)) 6902 if (netif_running(dev))
6881 __rtl8169_resume(dev); 6903 __rtl8169_resume(dev);
@@ -7251,6 +7273,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
7251 } 7273 }
7252} 7274}
7253 7275
7276static void rtl_disable_clk(void *data)
7277{
7278 clk_disable_unprepare(data);
7279}
7280
7254static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7281static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7255{ 7282{
7256 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 7283 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7271,6 +7298,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7271 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 7298 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
7272 tp->supports_gmii = cfg->has_gmii; 7299 tp->supports_gmii = cfg->has_gmii;
7273 7300
7301 /* Get the *optional* external "ether_clk" used on some boards */
7302 tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
7303 if (IS_ERR(tp->clk)) {
7304 rc = PTR_ERR(tp->clk);
7305 if (rc == -ENOENT) {
7306 /* clk-core allows NULL (for suspend / resume) */
7307 tp->clk = NULL;
7308 } else if (rc == -EPROBE_DEFER) {
7309 return rc;
7310 } else {
7311 dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
7312 return rc;
7313 }
7314 } else {
7315 rc = clk_prepare_enable(tp->clk);
7316 if (rc) {
7317 dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
7318 return rc;
7319 }
7320
7321 rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
7322 tp->clk);
7323 if (rc)
7324 return rc;
7325 }
7326
7274 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 7327 /* enable device (incl. PCI PM wakeup and hotplug setup) */
7275 rc = pcim_enable_device(pdev); 7328 rc = pcim_enable_device(pdev);
7276 if (rc < 0) { 7329 if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index f3f7477043ce..bb0ebdfd4459 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Renesas device configuration 3# Renesas device configuration
3# 4#
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index a05102a7df02..f21ab8c02af0 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -1,3 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
1# 2#
2# Makefile for the Renesas device drivers. 3# Makefile for the Renesas device drivers.
3# 4#
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b81f4faf7b10..9b6bf557a2f5 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#ifndef __RAVB_H__ 11#ifndef __RAVB_H__
@@ -431,6 +428,7 @@ enum EIS_BIT {
431 EIS_CULF1 = 0x00000080, 428 EIS_CULF1 = 0x00000080,
432 EIS_TFFF = 0x00000100, 429 EIS_TFFF = 0x00000100,
433 EIS_QFS = 0x00010000, 430 EIS_QFS = 0x00010000,
431 EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
434}; 432};
435 433
436/* RIC0 */ 434/* RIC0 */
@@ -475,6 +473,7 @@ enum RIS0_BIT {
475 RIS0_FRF15 = 0x00008000, 473 RIS0_FRF15 = 0x00008000,
476 RIS0_FRF16 = 0x00010000, 474 RIS0_FRF16 = 0x00010000,
477 RIS0_FRF17 = 0x00020000, 475 RIS0_FRF17 = 0x00020000,
476 RIS0_RESERVED = GENMASK(31, 18),
478}; 477};
479 478
480/* RIC1 */ 479/* RIC1 */
@@ -531,6 +530,7 @@ enum RIS2_BIT {
531 RIS2_QFF16 = 0x00010000, 530 RIS2_QFF16 = 0x00010000,
532 RIS2_QFF17 = 0x00020000, 531 RIS2_QFF17 = 0x00020000,
533 RIS2_RFFF = 0x80000000, 532 RIS2_RFFF = 0x80000000,
533 RIS2_RESERVED = GENMASK(30, 18),
534}; 534};
535 535
536/* TIC */ 536/* TIC */
@@ -547,6 +547,7 @@ enum TIS_BIT {
547 TIS_FTF1 = 0x00000002, /* Undocumented? */ 547 TIS_FTF1 = 0x00000002, /* Undocumented? */
548 TIS_TFUF = 0x00000100, 548 TIS_TFUF = 0x00000100,
549 TIS_TFWF = 0x00000200, 549 TIS_TFWF = 0x00000200,
550 TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
550}; 551};
551 552
552/* ISS */ 553/* ISS */
@@ -620,6 +621,7 @@ enum GIC_BIT {
620enum GIS_BIT { 621enum GIS_BIT {
621 GIS_PTCF = 0x00000001, /* Undocumented? */ 622 GIS_PTCF = 0x00000001, /* Undocumented? */
622 GIS_PTMF = 0x00000004, 623 GIS_PTMF = 0x00000004,
624 GIS_RESERVED = GENMASK(15, 10),
623}; 625};
624 626
625/* GIE (R-Car Gen3 only) */ 627/* GIE (R-Car Gen3 only) */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c06f2df895c2..d6f753925352 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
2 * 3 *
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 * 7 *
7 * Based on the SuperH Ethernet driver 8 * Based on the SuperH Ethernet driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
12 */ 9 */
13 10
14#include <linux/cache.h> 11#include <linux/cache.h>
@@ -742,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
742 u32 eis, ris2; 739 u32 eis, ris2;
743 740
744 eis = ravb_read(ndev, EIS); 741 eis = ravb_read(ndev, EIS);
745 ravb_write(ndev, ~EIS_QFS, EIS); 742 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
746 if (eis & EIS_QFS) { 743 if (eis & EIS_QFS) {
747 ris2 = ravb_read(ndev, RIS2); 744 ris2 = ravb_read(ndev, RIS2);
748 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); 745 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
746 RIS2);
749 747
750 /* Receive Descriptor Empty int */ 748 /* Receive Descriptor Empty int */
751 if (ris2 & RIS2_QFF0) 749 if (ris2 & RIS2_QFF0)
@@ -798,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
798 u32 tis = ravb_read(ndev, TIS); 796 u32 tis = ravb_read(ndev, TIS);
799 797
800 if (tis & TIS_TFUF) { 798 if (tis & TIS_TFUF) {
801 ravb_write(ndev, ~TIS_TFUF, TIS); 799 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
802 ravb_get_tx_tstamp(ndev); 800 ravb_get_tx_tstamp(ndev);
803 return true; 801 return true;
804 } 802 }
@@ -933,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
933 /* Processing RX Descriptor Ring */ 931 /* Processing RX Descriptor Ring */
934 if (ris0 & mask) { 932 if (ris0 & mask) {
935 /* Clear RX interrupt */ 933 /* Clear RX interrupt */
936 ravb_write(ndev, ~mask, RIS0); 934 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
937 if (ravb_rx(ndev, &quota, q)) 935 if (ravb_rx(ndev, &quota, q))
938 goto out; 936 goto out;
939 } 937 }
@@ -941,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
941 if (tis & mask) { 939 if (tis & mask) {
942 spin_lock_irqsave(&priv->lock, flags); 940 spin_lock_irqsave(&priv->lock, flags);
943 /* Clear TX interrupt */ 941 /* Clear TX interrupt */
944 ravb_write(ndev, ~mask, TIS); 942 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
945 ravb_tx_free(ndev, q, true); 943 ravb_tx_free(ndev, q, true);
946 netif_wake_subqueue(ndev, q); 944 netif_wake_subqueue(ndev, q);
947 mmiowb(); 945 mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index eede70ec37f8..dce2a40a31e3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -1,13 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* PTP 1588 clock using the Renesas Ethernet AVB 2/* PTP 1588 clock using the Renesas Ethernet AVB
2 * 3 *
3 * Copyright (C) 2013-2015 Renesas Electronics Corporation 4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */ 7 */
12 8
13#include "ravb.h" 9#include "ravb.h"
@@ -319,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
319 } 315 }
320 } 316 }
321 317
322 ravb_write(ndev, ~gis, GIS); 318 ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
323} 319}
324 320
325void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) 321void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 5573199c4536..f27a0dc8c563 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2014 Renesas Electronics Corporation 4 * Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
5 * Copyright (C) 2008-2014 Renesas Solutions Corp. 6 * Copyright (C) 2008-2014 Renesas Solutions Corp.
6 * Copyright (C) 2013-2017 Cogent Embedded, Inc. 7 * Copyright (C) 2013-2017 Cogent Embedded, Inc.
7 * Copyright (C) 2014 Codethink Limited 8 * Copyright (C) 2014 Codethink Limited
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 */ 9 */
21 10
22#include <linux/module.h> 11#include <linux/module.h>
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
809 .magic = 1, 798 .magic = 1,
810 .cexcr = 1, 799 .cexcr = 1,
811}; 800};
801
802/* R7S9210 */
803static struct sh_eth_cpu_data r7s9210_data = {
804 .soft_reset = sh_eth_soft_reset,
805
806 .set_duplex = sh_eth_set_duplex,
807 .set_rate = sh_eth_set_rate_rcar,
808
809 .register_type = SH_ETH_REG_FAST_SH4,
810
811 .edtrr_trns = EDTRR_TRNS_ETHER,
812 .ecsr_value = ECSR_ICD,
813 .ecsipr_value = ECSIPR_ICDIP,
814 .eesipr_value = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
815 EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
816 EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
817 EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
818 EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
819 EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
820 EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
821
822 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
823 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
824 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
825
826 .fdr_value = 0x0000070f,
827
828 .apr = 1,
829 .mpr = 1,
830 .tpauser = 1,
831 .hw_swap = 1,
832 .rpadir = 1,
833 .no_ade = 1,
834 .xdfar_rw = 1,
835};
812#endif /* CONFIG_OF */ 836#endif /* CONFIG_OF */
813 837
814static void sh_eth_set_rate_sh7724(struct net_device *ndev) 838static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
3132 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, 3156 { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3133 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, 3157 { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3134 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, 3158 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3159 { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3135 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, 3160 { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3136 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, 3161 { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3137 { } 3162 { }
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index f94be99cf400..0c18650bbfe6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -1,19 +1,8 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* SuperH Ethernet device driver 2/* SuperH Ethernet device driver
2 * 3 *
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2012 Renesas Solutions Corp. 5 * Copyright (C) 2008-2012 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 */ 6 */
18 7
19#ifndef __SH_ETH_H__ 8#ifndef __SH_ETH_H__
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c5bc124b41a9..d1bb73bf9914 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
77static int ether3_rx(struct net_device *dev, unsigned int maxcnt); 77static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
78static void ether3_tx(struct net_device *dev); 78static void ether3_tx(struct net_device *dev);
79static int ether3_open (struct net_device *dev); 79static int ether3_open (struct net_device *dev);
80static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); 80static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
81 struct net_device *dev);
81static irqreturn_t ether3_interrupt (int irq, void *dev_id); 82static irqreturn_t ether3_interrupt (int irq, void *dev_id);
82static int ether3_close (struct net_device *dev); 83static int ether3_close (struct net_device *dev);
83static void ether3_setmulticastlist (struct net_device *dev); 84static void ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
481/* 482/*
482 * Transmit a packet 483 * Transmit a packet
483 */ 484 */
484static int 485static netdev_tx_t
485ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) 486ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
486{ 487{
487 unsigned long flags; 488 unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 573691bc3b71..70cce63a6081 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
578 return 0; 578 return 0;
579} 579}
580 580
581static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) 581static netdev_tx_t
582sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
582{ 583{
583 struct sgiseeq_private *sp = netdev_priv(dev); 584 struct sgiseeq_private *sp = netdev_priv(dev);
584 struct hpc3_ethregs *hregs = sp->hregs; 585 struct hpc3_ethregs *hregs = sp->hregs;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..3d0dd39c289e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
2208 2208
2209/************************************************************************** 2209/**************************************************************************
2210 * 2210 *
2211 * Kernel netpoll interface
2212 *
2213 *************************************************************************/
2214
2215#ifdef CONFIG_NET_POLL_CONTROLLER
2216
2217/* Although in the common case interrupts will be disabled, this is not
2218 * guaranteed. However, all our work happens inside the NAPI callback,
2219 * so no locking is required.
2220 */
2221static void efx_netpoll(struct net_device *net_dev)
2222{
2223 struct efx_nic *efx = netdev_priv(net_dev);
2224 struct efx_channel *channel;
2225
2226 efx_for_each_channel(channel, efx)
2227 efx_schedule_channel(channel);
2228}
2229
2230#endif
2231
2232/**************************************************************************
2233 *
2234 * Kernel net device interface 2211 * Kernel net device interface
2235 * 2212 *
2236 *************************************************************************/ 2213 *************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
2509#endif 2486#endif
2510 .ndo_get_phys_port_id = efx_get_phys_port_id, 2487 .ndo_get_phys_port_id = efx_get_phys_port_id,
2511 .ndo_get_phys_port_name = efx_get_phys_port_name, 2488 .ndo_get_phys_port_name = efx_get_phys_port_name,
2512#ifdef CONFIG_NET_POLL_CONTROLLER
2513 .ndo_poll_controller = efx_netpoll,
2514#endif
2515 .ndo_setup_tc = efx_setup_tc, 2489 .ndo_setup_tc = efx_setup_tc,
2516#ifdef CONFIG_RFS_ACCEL 2490#ifdef CONFIG_RFS_ACCEL
2517 .ndo_rx_flow_steer = efx_filter_rfs, 2491 .ndo_rx_flow_steer = efx_filter_rfs,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..03e2455c502e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
2054 2054
2055/************************************************************************** 2055/**************************************************************************
2056 * 2056 *
2057 * Kernel netpoll interface
2058 *
2059 *************************************************************************/
2060
2061#ifdef CONFIG_NET_POLL_CONTROLLER
2062
2063/* Although in the common case interrupts will be disabled, this is not
2064 * guaranteed. However, all our work happens inside the NAPI callback,
2065 * so no locking is required.
2066 */
2067static void ef4_netpoll(struct net_device *net_dev)
2068{
2069 struct ef4_nic *efx = netdev_priv(net_dev);
2070 struct ef4_channel *channel;
2071
2072 ef4_for_each_channel(channel, efx)
2073 ef4_schedule_channel(channel);
2074}
2075
2076#endif
2077
2078/**************************************************************************
2079 *
2080 * Kernel net device interface 2057 * Kernel net device interface
2081 * 2058 *
2082 *************************************************************************/ 2059 *************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
2250 .ndo_set_mac_address = ef4_set_mac_address, 2227 .ndo_set_mac_address = ef4_set_mac_address,
2251 .ndo_set_rx_mode = ef4_set_rx_mode, 2228 .ndo_set_rx_mode = ef4_set_rx_mode,
2252 .ndo_set_features = ef4_set_features, 2229 .ndo_set_features = ef4_set_features,
2253#ifdef CONFIG_NET_POLL_CONTROLLER
2254 .ndo_poll_controller = ef4_netpoll,
2255#endif
2256 .ndo_setup_tc = ef4_setup_tc, 2230 .ndo_setup_tc = ef4_setup_tc,
2257#ifdef CONFIG_RFS_ACCEL 2231#ifdef CONFIG_RFS_ACCEL
2258 .ndo_rx_flow_steer = ef4_filter_rfs, 2232 .ndo_rx_flow_steer = ef4_filter_rfs,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 18d533fdf14c..3140999642ba 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -99,7 +99,7 @@ struct ioc3_private {
99 99
100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
101static void ioc3_set_multicast_list(struct net_device *dev); 101static void ioc3_set_multicast_list(struct net_device *dev);
102static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 102static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
103static void ioc3_timeout(struct net_device *dev); 103static void ioc3_timeout(struct net_device *dev);
104static inline unsigned int ioc3_hash(const unsigned char *addr); 104static inline unsigned int ioc3_hash(const unsigned char *addr);
105static inline void ioc3_stop(struct ioc3_private *ip); 105static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
1390 .remove = ioc3_remove_one, 1390 .remove = ioc3_remove_one,
1391}; 1391};
1392 1392
1393static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1393static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1394{ 1394{
1395 unsigned long data; 1395 unsigned long data;
1396 struct ioc3_private *ip = netdev_priv(dev); 1396 struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index ea55abd62ec7..703fbbefea44 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
697/* 697/*
698 * Transmit a packet (called by the kernel) 698 * Transmit a packet (called by the kernel)
699 */ 699 */
700static int meth_tx(struct sk_buff *skb, struct net_device *dev) 700static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
701{ 701{
702 struct meth_private *priv = netdev_priv(dev); 702 struct meth_private *priv = netdev_priv(dev);
703 unsigned long flags; 703 unsigned long flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index edf20361ea5f..324049eebb9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
33 select PHYLIB 33 select PHYLIB
34 select CRC32 34 select CRC32
35 select MII 35 select MII
36 depends on OF && COMMON_CLK && HAS_DMA 36 depends on OF && HAS_DMA
37 help 37 help
38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding. 38 Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
39 39
@@ -57,7 +57,7 @@ config DWMAC_ANARION
57config DWMAC_IPQ806X 57config DWMAC_IPQ806X
58 tristate "QCA IPQ806x DWMAC support" 58 tristate "QCA IPQ806x DWMAC support"
59 default ARCH_QCOM 59 default ARCH_QCOM
60 depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST) 60 depends on OF && (ARCH_QCOM || COMPILE_TEST)
61 select MFD_SYSCON 61 select MFD_SYSCON
62 help 62 help
63 Support for QCA IPQ806X DWMAC Ethernet. 63 Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
100config DWMAC_ROCKCHIP 100config DWMAC_ROCKCHIP
101 tristate "Rockchip dwmac support" 101 tristate "Rockchip dwmac support"
102 default ARCH_ROCKCHIP 102 default ARCH_ROCKCHIP
103 depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST) 103 depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
104 select MFD_SYSCON 104 select MFD_SYSCON
105 help 105 help
106 Support for Ethernet controller on Rockchip RK3288 SoC. 106 Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
110 110
111config DWMAC_SOCFPGA 111config DWMAC_SOCFPGA
112 tristate "SOCFPGA dwmac support" 112 tristate "SOCFPGA dwmac support"
113 default ARCH_SOCFPGA 113 default (ARCH_SOCFPGA || ARCH_STRATIX10)
114 depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST) 114 depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
115 select MFD_SYSCON 115 select MFD_SYSCON
116 help 116 help
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
123config DWMAC_STI 123config DWMAC_STI
124 tristate "STi GMAC support" 124 tristate "STi GMAC support"
125 default ARCH_STI 125 default ARCH_STI
126 depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST) 126 depends on OF && (ARCH_STI || COMPILE_TEST)
127 select MFD_SYSCON 127 select MFD_SYSCON
128 ---help--- 128 ---help---
129 Support for ethernet controller on STi SOCs. 129 Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
147config DWMAC_SUNXI 147config DWMAC_SUNXI
148 tristate "Allwinner GMAC support" 148 tristate "Allwinner GMAC support"
149 default ARCH_SUNXI 149 default ARCH_SUNXI
150 depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST) 150 depends on OF && (ARCH_SUNXI || COMPILE_TEST)
151 ---help--- 151 ---help---
152 Support for Allwinner A20/A31 GMAC ethernet controllers. 152 Support for Allwinner A20/A31 GMAC ethernet controllers.
153 153
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1854f270ad66..b1b305f8f414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
258#define MAX_DMA_RIWT 0xff 258#define MAX_DMA_RIWT 0xff
259#define MIN_DMA_RIWT 0x20 259#define MIN_DMA_RIWT 0x20
260/* Tx coalesce parameters */ 260/* Tx coalesce parameters */
261#define STMMAC_COAL_TX_TIMER 40000 261#define STMMAC_COAL_TX_TIMER 1000
262#define STMMAC_MAX_COAL_TX_TICK 100000 262#define STMMAC_MAX_COAL_TX_TICK 100000
263#define STMMAC_TX_MAX_FRAMES 256 263#define STMMAC_TX_MAX_FRAMES 256
264#define STMMAC_TX_FRAMES 64 264#define STMMAC_TX_FRAMES 25
265 265
266/* Packets types */ 266/* Packets types */
267enum packets_types { 267enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 76649adf8fb0..63e1064b27a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
48 48
49/* Frequently used values are kept adjacent for cache effect */ 49/* Frequently used values are kept adjacent for cache effect */
50struct stmmac_tx_queue { 50struct stmmac_tx_queue {
51 u32 tx_count_frames;
52 struct timer_list txtimer;
51 u32 queue_index; 53 u32 queue_index;
52 struct stmmac_priv *priv_data; 54 struct stmmac_priv *priv_data;
53 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; 55 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
73 u32 rx_zeroc_thresh; 75 u32 rx_zeroc_thresh;
74 dma_addr_t dma_rx_phy; 76 dma_addr_t dma_rx_phy;
75 u32 rx_tail_addr; 77 u32 rx_tail_addr;
78};
79
80struct stmmac_channel {
76 struct napi_struct napi ____cacheline_aligned_in_smp; 81 struct napi_struct napi ____cacheline_aligned_in_smp;
82 struct stmmac_priv *priv_data;
83 u32 index;
84 int has_rx;
85 int has_tx;
77}; 86};
78 87
79struct stmmac_tc_entry { 88struct stmmac_tc_entry {
@@ -109,15 +118,12 @@ struct stmmac_pps_cfg {
109 118
110struct stmmac_priv { 119struct stmmac_priv {
111 /* Frequently used values are kept adjacent for cache effect */ 120 /* Frequently used values are kept adjacent for cache effect */
112 u32 tx_count_frames;
113 u32 tx_coal_frames; 121 u32 tx_coal_frames;
114 u32 tx_coal_timer; 122 u32 tx_coal_timer;
115 bool tx_timer_armed;
116 123
117 int tx_coalesce; 124 int tx_coalesce;
118 int hwts_tx_en; 125 int hwts_tx_en;
119 bool tx_path_in_lpi_mode; 126 bool tx_path_in_lpi_mode;
120 struct timer_list txtimer;
121 bool tso; 127 bool tso;
122 128
123 unsigned int dma_buf_sz; 129 unsigned int dma_buf_sz;
@@ -138,6 +144,9 @@ struct stmmac_priv {
138 /* TX Queue */ 144 /* TX Queue */
139 struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; 145 struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
140 146
147 /* Generic channel for NAPI */
148 struct stmmac_channel channel[STMMAC_CH_MAX];
149
141 bool oldlink; 150 bool oldlink;
142 int speed; 151 int speed;
143 int oldduplex; 152 int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ff1ffb46198a..75896d6ba6e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
148static void stmmac_disable_all_queues(struct stmmac_priv *priv) 148static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149{ 149{
150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
151 u32 queue; 153 u32 queue;
152 154
153 for (queue = 0; queue < rx_queues_cnt; queue++) { 155 for (queue = 0; queue < maxq; queue++) {
154 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 156 struct stmmac_channel *ch = &priv->channel[queue];
155 157
156 napi_disable(&rx_q->napi); 158 napi_disable(&ch->napi);
157 } 159 }
158} 160}
159 161
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
164static void stmmac_enable_all_queues(struct stmmac_priv *priv) 166static void stmmac_enable_all_queues(struct stmmac_priv *priv)
165{ 167{
166 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 168 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167 u32 queue; 171 u32 queue;
168 172
169 for (queue = 0; queue < rx_queues_cnt; queue++) { 173 for (queue = 0; queue < maxq; queue++) {
170 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 174 struct stmmac_channel *ch = &priv->channel[queue];
171 175
172 napi_enable(&rx_q->napi); 176 napi_enable(&ch->napi);
173 } 177 }
174} 178}
175 179
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1843 * @queue: TX queue index 1847 * @queue: TX queue index
1844 * Description: it reclaims the transmit resources after transmission completes. 1848 * Description: it reclaims the transmit resources after transmission completes.
1845 */ 1849 */
1846static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) 1850static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1847{ 1851{
1848 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1852 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1849 unsigned int bytes_compl = 0, pkts_compl = 0; 1853 unsigned int bytes_compl = 0, pkts_compl = 0;
1850 unsigned int entry; 1854 unsigned int entry, count = 0;
1851 1855
1852 netif_tx_lock(priv->dev); 1856 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1853 1857
1854 priv->xstats.tx_clean++; 1858 priv->xstats.tx_clean++;
1855 1859
1856 entry = tx_q->dirty_tx; 1860 entry = tx_q->dirty_tx;
1857 while (entry != tx_q->cur_tx) { 1861 while ((entry != tx_q->cur_tx) && (count < budget)) {
1858 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1862 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1859 struct dma_desc *p; 1863 struct dma_desc *p;
1860 int status; 1864 int status;
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1870 if (unlikely(status & tx_dma_own)) 1874 if (unlikely(status & tx_dma_own))
1871 break; 1875 break;
1872 1876
1877 count++;
1878
1873 /* Make sure descriptor fields are read after reading 1879 /* Make sure descriptor fields are read after reading
1874 * the own bit. 1880 * the own bit.
1875 */ 1881 */
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1937 stmmac_enable_eee_mode(priv); 1943 stmmac_enable_eee_mode(priv);
1938 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1944 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1939 } 1945 }
1940 netif_tx_unlock(priv->dev); 1946
1947 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1948
1949 return count;
1941} 1950}
1942 1951
1943/** 1952/**
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2020 return false; 2029 return false;
2021} 2030}
2022 2031
2032static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2033{
2034 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2035 &priv->xstats, chan);
2036 struct stmmac_channel *ch = &priv->channel[chan];
2037 bool needs_work = false;
2038
2039 if ((status & handle_rx) && ch->has_rx) {
2040 needs_work = true;
2041 } else {
2042 status &= ~handle_rx;
2043 }
2044
2045 if ((status & handle_tx) && ch->has_tx) {
2046 needs_work = true;
2047 } else {
2048 status &= ~handle_tx;
2049 }
2050
2051 if (needs_work && napi_schedule_prep(&ch->napi)) {
2052 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2053 __napi_schedule(&ch->napi);
2054 }
2055
2056 return status;
2057}
2058
2023/** 2059/**
2024 * stmmac_dma_interrupt - DMA ISR 2060 * stmmac_dma_interrupt - DMA ISR
2025 * @priv: driver private structure 2061 * @priv: driver private structure
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2034 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2070 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2035 tx_channel_count : rx_channel_count; 2071 tx_channel_count : rx_channel_count;
2036 u32 chan; 2072 u32 chan;
2037 bool poll_scheduled = false;
2038 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2073 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2039 2074
2040 /* Make sure we never check beyond our status buffer. */ 2075 /* Make sure we never check beyond our status buffer. */
2041 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2076 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2042 channels_to_check = ARRAY_SIZE(status); 2077 channels_to_check = ARRAY_SIZE(status);
2043 2078
2044 /* Each DMA channel can be used for rx and tx simultaneously, yet
2045 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2046 * stmmac_channel struct.
2047 * Because of this, stmmac_poll currently checks (and possibly wakes)
2048 * all tx queues rather than just a single tx queue.
2049 */
2050 for (chan = 0; chan < channels_to_check; chan++) 2079 for (chan = 0; chan < channels_to_check; chan++)
2051 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2080 status[chan] = stmmac_napi_check(priv, chan);
2052 &priv->xstats, chan);
2053
2054 for (chan = 0; chan < rx_channel_count; chan++) {
2055 if (likely(status[chan] & handle_rx)) {
2056 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2057
2058 if (likely(napi_schedule_prep(&rx_q->napi))) {
2059 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2060 __napi_schedule(&rx_q->napi);
2061 poll_scheduled = true;
2062 }
2063 }
2064 }
2065
2066 /* If we scheduled poll, we already know that tx queues will be checked.
2067 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2068 * completed transmission, if so, call stmmac_poll (once).
2069 */
2070 if (!poll_scheduled) {
2071 for (chan = 0; chan < tx_channel_count; chan++) {
2072 if (status[chan] & handle_tx) {
2073 /* It doesn't matter what rx queue we choose
2074 * here. We use 0 since it always exists.
2075 */
2076 struct stmmac_rx_queue *rx_q =
2077 &priv->rx_queue[0];
2078
2079 if (likely(napi_schedule_prep(&rx_q->napi))) {
2080 stmmac_disable_dma_irq(priv,
2081 priv->ioaddr, chan);
2082 __napi_schedule(&rx_q->napi);
2083 }
2084 break;
2085 }
2086 }
2087 }
2088 2081
2089 for (chan = 0; chan < tx_channel_count; chan++) { 2082 for (chan = 0; chan < tx_channel_count; chan++) {
2090 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2083 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2220 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2213 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2221 tx_q->dma_tx_phy, chan); 2214 tx_q->dma_tx_phy, chan);
2222 2215
2223 tx_q->tx_tail_addr = tx_q->dma_tx_phy + 2216 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2224 (DMA_TX_SIZE * sizeof(struct dma_desc));
2225 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2217 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2226 tx_q->tx_tail_addr, chan); 2218 tx_q->tx_tail_addr, chan);
2227 } 2219 }
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2233 return ret; 2225 return ret;
2234} 2226}
2235 2227
2228static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2229{
2230 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2231
2232 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2233}
2234
2236/** 2235/**
2237 * stmmac_tx_timer - mitigation sw timer for tx. 2236 * stmmac_tx_timer - mitigation sw timer for tx.
2238 * @data: data pointer 2237 * @data: data pointer
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2241 */ 2240 */
2242static void stmmac_tx_timer(struct timer_list *t) 2241static void stmmac_tx_timer(struct timer_list *t)
2243{ 2242{
2244 struct stmmac_priv *priv = from_timer(priv, t, txtimer); 2243 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2245 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2244 struct stmmac_priv *priv = tx_q->priv_data;
2246 u32 queue; 2245 struct stmmac_channel *ch;
2246
2247 ch = &priv->channel[tx_q->queue_index];
2247 2248
2248 /* let's scan all the tx queues */ 2249 if (likely(napi_schedule_prep(&ch->napi)))
2249 for (queue = 0; queue < tx_queues_count; queue++) 2250 __napi_schedule(&ch->napi);
2250 stmmac_tx_clean(priv, queue);
2251} 2251}
2252 2252
2253/** 2253/**
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
2260 */ 2260 */
2261static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 2261static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2262{ 2262{
2263 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2264 u32 chan;
2265
2263 priv->tx_coal_frames = STMMAC_TX_FRAMES; 2266 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2264 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2267 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2265 timer_setup(&priv->txtimer, stmmac_tx_timer, 0); 2268
2266 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); 2269 for (chan = 0; chan < tx_channel_count; chan++) {
2267 add_timer(&priv->txtimer); 2270 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2271
2272 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2273 }
2268} 2274}
2269 2275
2270static void stmmac_set_rings_length(struct stmmac_priv *priv) 2276static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
2592static int stmmac_open(struct net_device *dev) 2598static int stmmac_open(struct net_device *dev)
2593{ 2599{
2594 struct stmmac_priv *priv = netdev_priv(dev); 2600 struct stmmac_priv *priv = netdev_priv(dev);
2601 u32 chan;
2595 int ret; 2602 int ret;
2596 2603
2597 stmmac_check_ether_addr(priv); 2604 stmmac_check_ether_addr(priv);
@@ -2688,7 +2695,9 @@ irq_error:
2688 if (dev->phydev) 2695 if (dev->phydev)
2689 phy_stop(dev->phydev); 2696 phy_stop(dev->phydev);
2690 2697
2691 del_timer_sync(&priv->txtimer); 2698 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2699 del_timer_sync(&priv->tx_queue[chan].txtimer);
2700
2692 stmmac_hw_teardown(dev); 2701 stmmac_hw_teardown(dev);
2693init_error: 2702init_error:
2694 free_dma_desc_resources(priv); 2703 free_dma_desc_resources(priv);
@@ -2708,6 +2717,7 @@ dma_desc_error:
2708static int stmmac_release(struct net_device *dev) 2717static int stmmac_release(struct net_device *dev)
2709{ 2718{
2710 struct stmmac_priv *priv = netdev_priv(dev); 2719 struct stmmac_priv *priv = netdev_priv(dev);
2720 u32 chan;
2711 2721
2712 if (priv->eee_enabled) 2722 if (priv->eee_enabled)
2713 del_timer_sync(&priv->eee_ctrl_timer); 2723 del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
2722 2732
2723 stmmac_disable_all_queues(priv); 2733 stmmac_disable_all_queues(priv);
2724 2734
2725 del_timer_sync(&priv->txtimer); 2735 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2736 del_timer_sync(&priv->tx_queue[chan].txtimer);
2726 2737
2727 /* Free the IRQ lines */ 2738 /* Free the IRQ lines */
2728 free_irq(dev->irq, dev); 2739 free_irq(dev->irq, dev);
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2936 priv->xstats.tx_tso_nfrags += nfrags; 2947 priv->xstats.tx_tso_nfrags += nfrags;
2937 2948
2938 /* Manage tx mitigation */ 2949 /* Manage tx mitigation */
2939 priv->tx_count_frames += nfrags + 1; 2950 tx_q->tx_count_frames += nfrags + 1;
2940 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 2951 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2941 mod_timer(&priv->txtimer,
2942 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2943 } else {
2944 priv->tx_count_frames = 0;
2945 stmmac_set_tx_ic(priv, desc); 2952 stmmac_set_tx_ic(priv, desc);
2946 priv->xstats.tx_set_ic_bit++; 2953 priv->xstats.tx_set_ic_bit++;
2954 tx_q->tx_count_frames = 0;
2955 } else {
2956 stmmac_tx_timer_arm(priv, queue);
2947 } 2957 }
2948 2958
2949 skb_tx_timestamp(skb); 2959 skb_tx_timestamp(skb);
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2992 3002
2993 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3003 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2994 3004
3005 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2995 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3006 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2996 3007
2997 return NETDEV_TX_OK; 3008 return NETDEV_TX_OK;
@@ -3146,17 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3146 * This approach takes care about the fragments: desc is the first 3157 * This approach takes care about the fragments: desc is the first
3147 * element in case of no SG. 3158 * element in case of no SG.
3148 */ 3159 */
3149 priv->tx_count_frames += nfrags + 1; 3160 tx_q->tx_count_frames += nfrags + 1;
3150 if (likely(priv->tx_coal_frames > priv->tx_count_frames) && 3161 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3151 !priv->tx_timer_armed) {
3152 mod_timer(&priv->txtimer,
3153 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3154 priv->tx_timer_armed = true;
3155 } else {
3156 priv->tx_count_frames = 0;
3157 stmmac_set_tx_ic(priv, desc); 3162 stmmac_set_tx_ic(priv, desc);
3158 priv->xstats.tx_set_ic_bit++; 3163 priv->xstats.tx_set_ic_bit++;
3159 priv->tx_timer_armed = false; 3164 tx_q->tx_count_frames = 0;
3165 } else {
3166 stmmac_tx_timer_arm(priv, queue);
3160 } 3167 }
3161 3168
3162 skb_tx_timestamp(skb); 3169 skb_tx_timestamp(skb);
@@ -3202,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3202 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3209 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3203 3210
3204 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3211 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3212
3213 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3205 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3214 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3206 3215
3207 return NETDEV_TX_OK; 3216 return NETDEV_TX_OK;
@@ -3322,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3322static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3331static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3323{ 3332{
3324 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3333 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3334 struct stmmac_channel *ch = &priv->channel[queue];
3325 unsigned int entry = rx_q->cur_rx; 3335 unsigned int entry = rx_q->cur_rx;
3326 int coe = priv->hw->rx_csum; 3336 int coe = priv->hw->rx_csum;
3327 unsigned int next_entry; 3337 unsigned int next_entry;
@@ -3494,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3494 else 3504 else
3495 skb->ip_summed = CHECKSUM_UNNECESSARY; 3505 skb->ip_summed = CHECKSUM_UNNECESSARY;
3496 3506
3497 napi_gro_receive(&rx_q->napi, skb); 3507 napi_gro_receive(&ch->napi, skb);
3498 3508
3499 priv->dev->stats.rx_packets++; 3509 priv->dev->stats.rx_packets++;
3500 priv->dev->stats.rx_bytes += frame_len; 3510 priv->dev->stats.rx_bytes += frame_len;
@@ -3517,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3517 * Description : 3527 * Description :
3518 * To look at the incoming frames and clear the tx resources. 3528 * To look at the incoming frames and clear the tx resources.
3519 */ 3529 */
3520static int stmmac_poll(struct napi_struct *napi, int budget) 3530static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3521{ 3531{
3522 struct stmmac_rx_queue *rx_q = 3532 struct stmmac_channel *ch =
3523 container_of(napi, struct stmmac_rx_queue, napi); 3533 container_of(napi, struct stmmac_channel, napi);
3524 struct stmmac_priv *priv = rx_q->priv_data; 3534 struct stmmac_priv *priv = ch->priv_data;
3525 u32 tx_count = priv->plat->tx_queues_to_use; 3535 int work_done = 0, work_rem = budget;
3526 u32 chan = rx_q->queue_index; 3536 u32 chan = ch->index;
3527 int work_done = 0;
3528 u32 queue;
3529 3537
3530 priv->xstats.napi_poll++; 3538 priv->xstats.napi_poll++;
3531 3539
3532 /* check all the queues */ 3540 if (ch->has_tx) {
3533 for (queue = 0; queue < tx_count; queue++) 3541 int done = stmmac_tx_clean(priv, work_rem, chan);
3534 stmmac_tx_clean(priv, queue);
3535 3542
3536 work_done = stmmac_rx(priv, budget, rx_q->queue_index); 3543 work_done += done;
3537 if (work_done < budget) { 3544 work_rem -= done;
3538 napi_complete_done(napi, work_done); 3545 }
3539 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3546
3547 if (ch->has_rx) {
3548 int done = stmmac_rx(priv, work_rem, chan);
3549
3550 work_done += done;
3551 work_rem -= done;
3540 } 3552 }
3553
3554 if (work_done < budget && napi_complete_done(napi, work_done))
3555 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3556
3541 return work_done; 3557 return work_done;
3542} 3558}
3543 3559
@@ -4201,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
4201{ 4217{
4202 struct net_device *ndev = NULL; 4218 struct net_device *ndev = NULL;
4203 struct stmmac_priv *priv; 4219 struct stmmac_priv *priv;
4220 u32 queue, maxq;
4204 int ret = 0; 4221 int ret = 0;
4205 u32 queue;
4206 4222
4207 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), 4223 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4208 MTL_MAX_TX_QUEUES, 4224 MTL_MAX_TX_QUEUES,
@@ -4325,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
4325 "Enable RX Mitigation via HW Watchdog Timer\n"); 4341 "Enable RX Mitigation via HW Watchdog Timer\n");
4326 } 4342 }
4327 4343
4328 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4344 /* Setup channels NAPI */
4329 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4345 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4330 4346
4331 netif_napi_add(ndev, &rx_q->napi, stmmac_poll, 4347 for (queue = 0; queue < maxq; queue++) {
4332 (8 * priv->plat->rx_queues_to_use)); 4348 struct stmmac_channel *ch = &priv->channel[queue];
4349
4350 ch->priv_data = priv;
4351 ch->index = queue;
4352
4353 if (queue < priv->plat->rx_queues_to_use)
4354 ch->has_rx = true;
4355 if (queue < priv->plat->tx_queues_to_use)
4356 ch->has_tx = true;
4357
4358 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4359 NAPI_POLL_WEIGHT);
4333 } 4360 }
4334 4361
4335 mutex_init(&priv->lock); 4362 mutex_init(&priv->lock);
@@ -4375,10 +4402,10 @@ error_netdev_register:
4375 priv->hw->pcs != STMMAC_PCS_RTBI) 4402 priv->hw->pcs != STMMAC_PCS_RTBI)
4376 stmmac_mdio_unregister(ndev); 4403 stmmac_mdio_unregister(ndev);
4377error_mdio_register: 4404error_mdio_register:
4378 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4405 for (queue = 0; queue < maxq; queue++) {
4379 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4406 struct stmmac_channel *ch = &priv->channel[queue];
4380 4407
4381 netif_napi_del(&rx_q->napi); 4408 netif_napi_del(&ch->napi);
4382 } 4409 }
4383error_hw_init: 4410error_hw_init:
4384 destroy_workqueue(priv->wq); 4411 destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3609c7b696c7..2b800ce1d5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
67 * Description: 67 * Description:
68 * This function validates the number of Unicast address entries supported 68 * This function validates the number of Unicast address entries supported
69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 70 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71 * logic. This function validates a valid, supported configuration is 71 * logic. This function validates a valid, supported configuration is
72 * selected, and defaults to 1 Unicast address if an unsupported 72 * selected, and defaults to 1 Unicast address if an unsupported
73 * configuration is selected. 73 * configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
77 int x = ucast_entries; 77 int x = ucast_entries;
78 78
79 switch (x) { 79 switch (x) {
80 case 1: 80 case 1 ... 32:
81 case 32:
82 case 64: 81 case 64:
83 case 128: 82 case 128:
84 break; 83 break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1a96dd9c1091..531294f4978b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
61 struct stmmac_tc_entry *action_entry = entry; 61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act; 62 const struct tc_action *act;
63 struct tcf_exts *exts; 63 struct tcf_exts *exts;
64 LIST_HEAD(actions); 64 int i;
65 65
66 exts = cls->knode.exts; 66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts)) 67 if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
69 if (frag) 69 if (frag)
70 action_entry = frag; 70 action_entry = frag;
71 71
72 tcf_exts_to_list(exts, &actions); 72 tcf_exts_for_each_action(i, act, exts) {
73 list_for_each_entry(act, &actions, list) {
74 /* Accept */ 73 /* Accept */
75 if (is_tcf_gact_ok(act)) { 74 if (is_tcf_gact_ok(act)) {
76 action_entry->val.af = 1; 75 action_entry->val.af = 1;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9263d638bd6d..f932923f7d56 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
41config TI_DAVINCI_CPDMA 41config TI_DAVINCI_CPDMA
42 tristate "TI DaVinci CPDMA Support" 42 tristate "TI DaVinci CPDMA Support"
43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST 43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
44 select GENERIC_ALLOCATOR
44 ---help--- 45 ---help---
45 This driver supports TI's DaVinci CPDMA dma engine. 46 This driver supports TI's DaVinci CPDMA dma engine.
46 47
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0c1adad7415d..396e1cd10667 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
170 struct device_node *node; 170 struct device_node *node;
171 struct cpsw_phy_sel_priv *priv; 171 struct cpsw_phy_sel_priv *priv;
172 172
173 node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel"); 173 node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
174 if (!node) { 174 if (!node) {
175 dev_err(dev, "Phy mode driver DT not found\n"); 175 node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
176 return; 176 if (!node) {
177 dev_err(dev, "Phy mode driver DT not found\n");
178 return;
179 }
177 } 180 }
178 181
179 dev = bus_find_device(&platform_bus_type, NULL, node, match); 182 dev = bus_find_device(&platform_bus_type, NULL, node, match);
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2bdfb39215e9..d8ba512f166a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
835 w5100_tx_skb(priv->ndev, skb); 835 w5100_tx_skb(priv->ndev, skb);
836} 836}
837 837
838static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) 838static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
839{ 839{
840 struct w5100_priv *priv = netdev_priv(ndev); 840 struct w5100_priv *priv = netdev_priv(ndev);
841 841
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 56ae573001e8..80fdbff67d82 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
365 netif_wake_queue(ndev); 365 netif_wake_queue(ndev);
366} 366}
367 367
368static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) 368static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
369{ 369{
370 struct w5300_priv *priv = netdev_priv(ndev); 370 struct w5300_priv *priv = netdev_priv(ndev);
371 371
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 31c3d77b4733..fe01e141c8f8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
1203 1203
1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1206 netdev_info(ndev, "VF slot %u %s\n",
1207 net_device_ctx->vf_serial,
1208 net_device_ctx->vf_alloc ? "added" : "removed");
1206} 1209}
1207 1210
1208static void netvsc_receive_inband(struct net_device *ndev, 1211static void netvsc_receive_inband(struct net_device *ndev,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 507f68190cb1..3af6d8d15233 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -29,6 +29,7 @@
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/inetdevice.h> 30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/pci.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/if_vlan.h> 34#include <linux/if_vlan.h>
34#include <linux/in.h> 35#include <linux/in.h>
@@ -1893,20 +1894,6 @@ out_unlock:
1893 rtnl_unlock(); 1894 rtnl_unlock();
1894} 1895}
1895 1896
1896static struct net_device *get_netvsc_bymac(const u8 *mac)
1897{
1898 struct net_device_context *ndev_ctx;
1899
1900 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1901 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1902
1903 if (ether_addr_equal(mac, dev->perm_addr))
1904 return dev;
1905 }
1906
1907 return NULL;
1908}
1909
1910static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 1897static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1911{ 1898{
1912 struct net_device_context *net_device_ctx; 1899 struct net_device_context *net_device_ctx;
@@ -2035,22 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
2035 rtnl_unlock(); 2022 rtnl_unlock();
2036} 2023}
2037 2024
2025/* Find netvsc by VMBus serial number.
2026 * The PCI hyperv controller records the serial number as the slot.
2027 */
2028static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2029{
2030 struct device *parent = vf_netdev->dev.parent;
2031 struct net_device_context *ndev_ctx;
2032 struct pci_dev *pdev;
2033
2034 if (!parent || !dev_is_pci(parent))
2035 return NULL; /* not a PCI device */
2036
2037 pdev = to_pci_dev(parent);
2038 if (!pdev->slot) {
2039 netdev_notice(vf_netdev, "no PCI slot information\n");
2040 return NULL;
2041 }
2042
2043 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2044 if (!ndev_ctx->vf_alloc)
2045 continue;
2046
2047 if (ndev_ctx->vf_serial == pdev->slot->number)
2048 return hv_get_drvdata(ndev_ctx->device_ctx);
2049 }
2050
2051 netdev_notice(vf_netdev,
2052 "no netdev found for slot %u\n", pdev->slot->number);
2053 return NULL;
2054}
2055
2038static int netvsc_register_vf(struct net_device *vf_netdev) 2056static int netvsc_register_vf(struct net_device *vf_netdev)
2039{ 2057{
2040 struct net_device *ndev;
2041 struct net_device_context *net_device_ctx; 2058 struct net_device_context *net_device_ctx;
2042 struct netvsc_device *netvsc_dev; 2059 struct netvsc_device *netvsc_dev;
2060 struct net_device *ndev;
2043 int ret; 2061 int ret;
2044 2062
2045 if (vf_netdev->addr_len != ETH_ALEN) 2063 if (vf_netdev->addr_len != ETH_ALEN)
2046 return NOTIFY_DONE; 2064 return NOTIFY_DONE;
2047 2065
2048 /* 2066 ndev = get_netvsc_byslot(vf_netdev);
2049 * We will use the MAC address to locate the synthetic interface to
2050 * associate with the VF interface. If we don't find a matching
2051 * synthetic interface, move on.
2052 */
2053 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
2054 if (!ndev) 2067 if (!ndev)
2055 return NOTIFY_DONE; 2068 return NOTIFY_DONE;
2056 2069
@@ -2201,6 +2214,16 @@ static int netvsc_probe(struct hv_device *dev,
2201 2214
2202 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2215 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2203 2216
2217 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2218 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2219 * all subchannels to show up, but that may not happen because
2220 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2221 * -> ... -> device_add() -> ... -> __device_attach() can't get
2222 * the device lock, so all the subchannels can't be processed --
2223 * finally netvsc_subchan_work() hangs for ever.
2224 */
2225 rtnl_lock();
2226
2204 if (nvdev->num_chn > 1) 2227 if (nvdev->num_chn > 1)
2205 schedule_work(&nvdev->subchan_work); 2228 schedule_work(&nvdev->subchan_work);
2206 2229
@@ -2219,7 +2242,6 @@ static int netvsc_probe(struct hv_device *dev,
2219 else 2242 else
2220 net->max_mtu = ETH_DATA_LEN; 2243 net->max_mtu = ETH_DATA_LEN;
2221 2244
2222 rtnl_lock();
2223 ret = register_netdevice(net); 2245 ret = register_netdevice(net);
2224 if (ret != 0) { 2246 if (ret != 0) {
2225 pr_err("Unable to register netdev.\n"); 2247 pr_err("Unable to register netdev.\n");
@@ -2258,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
2258 2280
2259 cancel_delayed_work_sync(&ndev_ctx->dwork); 2281 cancel_delayed_work_sync(&ndev_ctx->dwork);
2260 2282
2261 rcu_read_lock(); 2283 rtnl_lock();
2262 nvdev = rcu_dereference(ndev_ctx->nvdev); 2284 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2263 2285 if (nvdev)
2264 if (nvdev)
2265 cancel_work_sync(&nvdev->subchan_work); 2286 cancel_work_sync(&nvdev->subchan_work);
2266 2287
2267 /* 2288 /*
2268 * Call to the vsc driver to let it know that the device is being 2289 * Call to the vsc driver to let it know that the device is being
2269 * removed. Also blocks mtu and channel changes. 2290 * removed. Also blocks mtu and channel changes.
2270 */ 2291 */
2271 rtnl_lock();
2272 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2292 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2273 if (vf_netdev) 2293 if (vf_netdev)
2274 netvsc_unregister_vf(vf_netdev); 2294 netvsc_unregister_vf(vf_netdev);
@@ -2280,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
2280 list_del(&ndev_ctx->list); 2300 list_del(&ndev_ctx->list);
2281 2301
2282 rtnl_unlock(); 2302 rtnl_unlock();
2283 rcu_read_unlock();
2284 2303
2285 hv_set_drvdata(dev, NULL); 2304 hv_set_drvdata(dev, NULL);
2286 2305
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 23a52b9293f3..cd1d8faccca5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
1308{ 1308{
1309 struct adf7242_local *lp = spi_get_drvdata(spi); 1309 struct adf7242_local *lp = spi_get_drvdata(spi);
1310 1310
1311 if (!IS_ERR_OR_NULL(lp->debugfs_root)) 1311 debugfs_remove_recursive(lp->debugfs_root);
1312 debugfs_remove_recursive(lp->debugfs_root);
1313 1312
1314 cancel_delayed_work_sync(&lp->work); 1313 cancel_delayed_work_sync(&lp->work);
1315 destroy_workqueue(lp->wqueue); 1314 destroy_workqueue(lp->wqueue);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 58299fb666ed..0ff5a403a8dc 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
634 for (i = 0; i < len; i++) 634 for (i = 0; i < len; i++)
635 dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]); 635 dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
636 636
637 fifo_buffer = kmalloc(len, GFP_KERNEL); 637 fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
638 if (!fifo_buffer) 638 if (!fifo_buffer)
639 return -ENOMEM; 639 return -ENOMEM;
640 memcpy(fifo_buffer, buf, len);
641 kfifo_in(&test->up_fifo, &fifo_buffer, 4); 640 kfifo_in(&test->up_fifo, &fifo_buffer, 4);
642 wake_up_interruptible(&priv->test.readq); 641 wake_up_interruptible(&priv->test.readq);
643 642
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
3044{ 3043{
3045 struct ca8210_test *test = &priv->test; 3044 struct ca8210_test *test = &priv->test;
3046 3045
3047 if (!IS_ERR(test->ca8210_dfs_spi_int)) 3046 debugfs_remove(test->ca8210_dfs_spi_int);
3048 debugfs_remove(test->ca8210_dfs_spi_int);
3049 kfifo_free(&test->up_fifo); 3047 kfifo_free(&test->up_fifo);
3050 dev_info(&priv->spi->dev, "Test interface removed\n"); 3048 dev_info(&priv->spi->dev, "Test interface removed\n");
3051} 3049}
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index e428277781ac..04891429a554 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
903 903
904 switch (seq_state) { 904 switch (seq_state) {
905 /* TX IRQ, RX IRQ and SEQ IRQ */ 905 /* TX IRQ, RX IRQ and SEQ IRQ */
906 case (0x03): 906 case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
907 if (lp->is_tx) { 907 if (lp->is_tx) {
908 lp->is_tx = 0; 908 lp->is_tx = 0;
909 dev_dbg(printdev(lp), "TX is done. No ACK\n"); 909 dev_dbg(printdev(lp), "TX is done. No ACK\n");
910 mcr20a_handle_tx_complete(lp); 910 mcr20a_handle_tx_complete(lp);
911 } 911 }
912 break; 912 break;
913 case (0x05): 913 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
914 /* rx is starting */ 914 /* rx is starting */
915 dev_dbg(printdev(lp), "RX is starting\n"); 915 dev_dbg(printdev(lp), "RX is starting\n");
916 mcr20a_handle_rx(lp); 916 mcr20a_handle_rx(lp);
917 break; 917 break;
918 case (0x07): 918 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
919 if (lp->is_tx) { 919 if (lp->is_tx) {
920 /* tx is done */ 920 /* tx is done */
921 lp->is_tx = 0; 921 lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
927 mcr20a_handle_rx(lp); 927 mcr20a_handle_rx(lp);
928 } 928 }
929 break; 929 break;
930 case (0x01): 930 case (DAR_IRQSTS1_SEQIRQ):
931 if (lp->is_tx) { 931 if (lp->is_tx) {
932 dev_dbg(printdev(lp), "TX is starting\n"); 932 dev_dbg(printdev(lp), "TX is starting\n");
933 mcr20a_handle_tx(lp); 933 mcr20a_handle_tx(lp);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1172db1e7c..19ab8a7d1e48 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
93 if (!netdev) 93 if (!netdev)
94 return !phydev->suspended; 94 return !phydev->suspended;
95 95
96 /* Don't suspend PHY if the attached netdev parent may wakeup. 96 if (netdev->wol_enabled)
97 return false;
98
99 /* As long as not all affected network drivers support the
100 * wol_enabled flag, let's check for hints that WoL is enabled.
101 * Don't suspend PHY if the attached netdev parent may wake up.
97 * The parent may point to a PCI device, as in tg3 driver. 102 * The parent may point to a PCI device, as in tg3 driver.
98 */ 103 */
99 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) 104 if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
1132 sysfs_remove_link(&dev->dev.kobj, "phydev"); 1137 sysfs_remove_link(&dev->dev.kobj, "phydev");
1133 sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev"); 1138 sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
1134 } 1139 }
1140 phy_suspend(phydev);
1135 phydev->attached_dev->phydev = NULL; 1141 phydev->attached_dev->phydev = NULL;
1136 phydev->attached_dev = NULL; 1142 phydev->attached_dev = NULL;
1137 phy_suspend(phydev);
1138 phydev->phylink = NULL; 1143 phydev->phylink = NULL;
1139 1144
1140 phy_led_triggers_unregister(phydev); 1145 phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
1168int phy_suspend(struct phy_device *phydev) 1173int phy_suspend(struct phy_device *phydev)
1169{ 1174{
1170 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1175 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1176 struct net_device *netdev = phydev->attached_dev;
1171 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1177 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1172 int ret = 0; 1178 int ret = 0;
1173 1179
1174 /* If the device has WOL enabled, we cannot suspend the PHY */ 1180 /* If the device has WOL enabled, we cannot suspend the PHY */
1175 phy_ethtool_get_wol(phydev, &wol); 1181 phy_ethtool_get_wol(phydev, &wol);
1176 if (wol.wolopts) 1182 if (wol.wolopts || (netdev && netdev->wol_enabled))
1177 return -EBUSY; 1183 return -EBUSY;
1178 1184
1179 if (phydev->drv && phydrv->suspend) 1185 if (phydev->drv && phydrv->suspend)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 740655261e5b..83060fb349f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
349 } 349 }
350 if (bus->started) 350 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 351 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus;
352 bus->registered = true; 353 bus->registered = true;
353 return 0; 354 return 0;
354} 355}
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
357{ 358{
358 const struct sfp_upstream_ops *ops = bus->upstream_ops; 359 const struct sfp_upstream_ops *ops = bus->upstream_ops;
359 360
361 bus->netdev->sfp_bus = NULL;
360 if (bus->registered) { 362 if (bus->registered) {
361 if (bus->started) 363 if (bus->started)
362 bus->socket_ops->stop(bus->sfp); 364 bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
438{ 440{
439 bus->upstream_ops = NULL; 441 bus->upstream_ops = NULL;
440 bus->upstream = NULL; 442 bus->upstream = NULL;
441 bus->netdev->sfp_bus = NULL;
442 bus->netdev = NULL; 443 bus->netdev = NULL;
443} 444}
444 445
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
467 bus->upstream_ops = ops; 468 bus->upstream_ops = ops;
468 bus->upstream = upstream; 469 bus->upstream = upstream;
469 bus->netdev = ndev; 470 bus->netdev = ndev;
470 ndev->sfp_bus = bus;
471 471
472 if (bus->sfp) { 472 if (bus->sfp) {
473 ret = sfp_register_bus(bus); 473 ret = sfp_register_bus(bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 4637d980310e..6e13b8832bc7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
398 switch (type) { 398 switch (type) {
399 case hwmon_temp: 399 case hwmon_temp:
400 switch (attr) { 400 switch (attr) {
401 case hwmon_temp_input:
402 case hwmon_temp_min_alarm: 401 case hwmon_temp_min_alarm:
403 case hwmon_temp_max_alarm: 402 case hwmon_temp_max_alarm:
404 case hwmon_temp_lcrit_alarm: 403 case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
407 case hwmon_temp_max: 406 case hwmon_temp_max:
408 case hwmon_temp_lcrit: 407 case hwmon_temp_lcrit:
409 case hwmon_temp_crit: 408 case hwmon_temp_crit:
409 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
410 return 0;
411 /* fall through */
412 case hwmon_temp_input:
410 return 0444; 413 return 0444;
411 default: 414 default:
412 return 0; 415 return 0;
413 } 416 }
414 case hwmon_in: 417 case hwmon_in:
415 switch (attr) { 418 switch (attr) {
416 case hwmon_in_input:
417 case hwmon_in_min_alarm: 419 case hwmon_in_min_alarm:
418 case hwmon_in_max_alarm: 420 case hwmon_in_max_alarm:
419 case hwmon_in_lcrit_alarm: 421 case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
422 case hwmon_in_max: 424 case hwmon_in_max:
423 case hwmon_in_lcrit: 425 case hwmon_in_lcrit:
424 case hwmon_in_crit: 426 case hwmon_in_crit:
427 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
428 return 0;
429 /* fall through */
430 case hwmon_in_input:
425 return 0444; 431 return 0444;
426 default: 432 default:
427 return 0; 433 return 0;
428 } 434 }
429 case hwmon_curr: 435 case hwmon_curr:
430 switch (attr) { 436 switch (attr) {
431 case hwmon_curr_input:
432 case hwmon_curr_min_alarm: 437 case hwmon_curr_min_alarm:
433 case hwmon_curr_max_alarm: 438 case hwmon_curr_max_alarm:
434 case hwmon_curr_lcrit_alarm: 439 case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
437 case hwmon_curr_max: 442 case hwmon_curr_max:
438 case hwmon_curr_lcrit: 443 case hwmon_curr_lcrit:
439 case hwmon_curr_crit: 444 case hwmon_curr_crit:
445 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
446 return 0;
447 /* fall through */
448 case hwmon_curr_input:
440 return 0444; 449 return 0444;
441 default: 450 default:
442 return 0; 451 return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
452 channel == 1) 461 channel == 1)
453 return 0; 462 return 0;
454 switch (attr) { 463 switch (attr) {
455 case hwmon_power_input:
456 case hwmon_power_min_alarm: 464 case hwmon_power_min_alarm:
457 case hwmon_power_max_alarm: 465 case hwmon_power_max_alarm:
458 case hwmon_power_lcrit_alarm: 466 case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
461 case hwmon_power_max: 469 case hwmon_power_max:
462 case hwmon_power_lcrit: 470 case hwmon_power_lcrit:
463 case hwmon_power_crit: 471 case hwmon_power_crit:
472 if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
473 return 0;
474 /* fall through */
475 case hwmon_power_input:
464 return 0444; 476 return 0444;
465 default: 477 default:
466 return 0; 478 return 0;
@@ -1086,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
1086 1098
1087static void sfp_hwmon_remove(struct sfp *sfp) 1099static void sfp_hwmon_remove(struct sfp *sfp)
1088{ 1100{
1089 hwmon_device_unregister(sfp->hwmon_dev); 1101 if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
1090 kfree(sfp->hwmon_name); 1102 hwmon_device_unregister(sfp->hwmon_dev);
1103 sfp->hwmon_dev = NULL;
1104 kfree(sfp->hwmon_name);
1105 }
1091} 1106}
1092#else 1107#else
1093static int sfp_hwmon_insert(struct sfp *sfp) 1108static int sfp_hwmon_insert(struct sfp *sfp)
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ce61231e96ea..62dc564b251d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
429 if (!skb) 429 if (!skb)
430 goto out; 430 goto out;
431 431
432 if (skb_mac_header_len(skb) < ETH_HLEN)
433 goto drop;
434
432 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 435 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
433 goto drop; 436 goto drop;
434 437
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ebd07ad82431..50e9cc19023a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
181 }; 181 };
182 struct napi_struct napi; 182 struct napi_struct napi;
183 bool napi_enabled; 183 bool napi_enabled;
184 bool napi_frags_enabled;
184 struct mutex napi_mutex; /* Protects access to the above napi */ 185 struct mutex napi_mutex; /* Protects access to the above napi */
185 struct list_head next; 186 struct list_head next;
186 struct tun_struct *detached; 187 struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
313} 314}
314 315
315static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 316static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
316 bool napi_en) 317 bool napi_en, bool napi_frags)
317{ 318{
318 tfile->napi_enabled = napi_en; 319 tfile->napi_enabled = napi_en;
320 tfile->napi_frags_enabled = napi_en && napi_frags;
319 if (napi_en) { 321 if (napi_en) {
320 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 322 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
321 NAPI_POLL_WEIGHT); 323 NAPI_POLL_WEIGHT);
322 napi_enable(&tfile->napi); 324 napi_enable(&tfile->napi);
323 mutex_init(&tfile->napi_mutex);
324 } 325 }
325} 326}
326 327
327static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 328static void tun_napi_disable(struct tun_file *tfile)
328{ 329{
329 if (tfile->napi_enabled) 330 if (tfile->napi_enabled)
330 napi_disable(&tfile->napi); 331 napi_disable(&tfile->napi);
331} 332}
332 333
333static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 334static void tun_napi_del(struct tun_file *tfile)
334{ 335{
335 if (tfile->napi_enabled) 336 if (tfile->napi_enabled)
336 netif_napi_del(&tfile->napi); 337 netif_napi_del(&tfile->napi);
337} 338}
338 339
339static bool tun_napi_frags_enabled(const struct tun_struct *tun) 340static bool tun_napi_frags_enabled(const struct tun_file *tfile)
340{ 341{
341 return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 342 return tfile->napi_frags_enabled;
342} 343}
343 344
344#ifdef CONFIG_TUN_VNET_CROSS_LE 345#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
690 tun = rtnl_dereference(tfile->tun); 691 tun = rtnl_dereference(tfile->tun);
691 692
692 if (tun && clean) { 693 if (tun && clean) {
693 tun_napi_disable(tun, tfile); 694 tun_napi_disable(tfile);
694 tun_napi_del(tun, tfile); 695 tun_napi_del(tfile);
695 } 696 }
696 697
697 if (tun && !tfile->detached) { 698 if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
758 for (i = 0; i < n; i++) { 759 for (i = 0; i < n; i++) {
759 tfile = rtnl_dereference(tun->tfiles[i]); 760 tfile = rtnl_dereference(tun->tfiles[i]);
760 BUG_ON(!tfile); 761 BUG_ON(!tfile);
761 tun_napi_disable(tun, tfile); 762 tun_napi_disable(tfile);
762 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 763 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
763 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 764 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
764 RCU_INIT_POINTER(tfile->tun, NULL); 765 RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
774 synchronize_net(); 775 synchronize_net();
775 for (i = 0; i < n; i++) { 776 for (i = 0; i < n; i++) {
776 tfile = rtnl_dereference(tun->tfiles[i]); 777 tfile = rtnl_dereference(tun->tfiles[i]);
777 tun_napi_del(tun, tfile); 778 tun_napi_del(tfile);
778 /* Drop read queue */ 779 /* Drop read queue */
779 tun_queue_purge(tfile); 780 tun_queue_purge(tfile);
780 xdp_rxq_info_unreg(&tfile->xdp_rxq); 781 xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
793} 794}
794 795
795static int tun_attach(struct tun_struct *tun, struct file *file, 796static int tun_attach(struct tun_struct *tun, struct file *file,
796 bool skip_filter, bool napi) 797 bool skip_filter, bool napi, bool napi_frags)
797{ 798{
798 struct tun_file *tfile = file->private_data; 799 struct tun_file *tfile = file->private_data;
799 struct net_device *dev = tun->dev; 800 struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
866 tun_enable_queue(tfile); 867 tun_enable_queue(tfile);
867 } else { 868 } else {
868 sock_hold(&tfile->sk); 869 sock_hold(&tfile->sk);
869 tun_napi_init(tun, tfile, napi); 870 tun_napi_init(tun, tfile, napi, napi_frags);
870 } 871 }
871 872
872 tun_set_real_num_queues(tun); 873 tun_set_real_num_queues(tun);
@@ -1153,43 +1154,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
1153 1154
1154 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1155 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1155} 1156}
1156#ifdef CONFIG_NET_POLL_CONTROLLER
1157static void tun_poll_controller(struct net_device *dev)
1158{
1159 /*
1160 * Tun only receives frames when:
1161 * 1) the char device endpoint gets data from user space
1162 * 2) the tun socket gets a sendmsg call from user space
1163 * If NAPI is not enabled, since both of those are synchronous
1164 * operations, we are guaranteed never to have pending data when we poll
1165 * for it so there is nothing to do here but return.
1166 * We need this though so netpoll recognizes us as an interface that
1167 * supports polling, which enables bridge devices in virt setups to
1168 * still use netconsole
1169 * If NAPI is enabled, however, we need to schedule polling for all
1170 * queues unless we are using napi_gro_frags(), which we call in
1171 * process context and not in NAPI context.
1172 */
1173 struct tun_struct *tun = netdev_priv(dev);
1174
1175 if (tun->flags & IFF_NAPI) {
1176 struct tun_file *tfile;
1177 int i;
1178
1179 if (tun_napi_frags_enabled(tun))
1180 return;
1181
1182 rcu_read_lock();
1183 for (i = 0; i < tun->numqueues; i++) {
1184 tfile = rcu_dereference(tun->tfiles[i]);
1185 if (tfile->napi_enabled)
1186 napi_schedule(&tfile->napi);
1187 }
1188 rcu_read_unlock();
1189 }
1190 return;
1191}
1192#endif
1193 1157
1194static void tun_set_headroom(struct net_device *dev, int new_hr) 1158static void tun_set_headroom(struct net_device *dev, int new_hr)
1195{ 1159{
@@ -1283,9 +1247,6 @@ static const struct net_device_ops tun_netdev_ops = {
1283 .ndo_start_xmit = tun_net_xmit, 1247 .ndo_start_xmit = tun_net_xmit,
1284 .ndo_fix_features = tun_net_fix_features, 1248 .ndo_fix_features = tun_net_fix_features,
1285 .ndo_select_queue = tun_select_queue, 1249 .ndo_select_queue = tun_select_queue,
1286#ifdef CONFIG_NET_POLL_CONTROLLER
1287 .ndo_poll_controller = tun_poll_controller,
1288#endif
1289 .ndo_set_rx_headroom = tun_set_headroom, 1250 .ndo_set_rx_headroom = tun_set_headroom,
1290 .ndo_get_stats64 = tun_net_get_stats64, 1251 .ndo_get_stats64 = tun_net_get_stats64,
1291}; 1252};
@@ -1365,9 +1326,6 @@ static const struct net_device_ops tap_netdev_ops = {
1365 .ndo_set_mac_address = eth_mac_addr, 1326 .ndo_set_mac_address = eth_mac_addr,
1366 .ndo_validate_addr = eth_validate_addr, 1327 .ndo_validate_addr = eth_validate_addr,
1367 .ndo_select_queue = tun_select_queue, 1328 .ndo_select_queue = tun_select_queue,
1368#ifdef CONFIG_NET_POLL_CONTROLLER
1369 .ndo_poll_controller = tun_poll_controller,
1370#endif
1371 .ndo_features_check = passthru_features_check, 1329 .ndo_features_check = passthru_features_check,
1372 .ndo_set_rx_headroom = tun_set_headroom, 1330 .ndo_set_rx_headroom = tun_set_headroom,
1373 .ndo_get_stats64 = tun_net_get_stats64, 1331 .ndo_get_stats64 = tun_net_get_stats64,
@@ -1752,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1752 int err; 1710 int err;
1753 u32 rxhash = 0; 1711 u32 rxhash = 0;
1754 int skb_xdp = 1; 1712 int skb_xdp = 1;
1755 bool frags = tun_napi_frags_enabled(tun); 1713 bool frags = tun_napi_frags_enabled(tfile);
1756 1714
1757 if (!(tun->dev->flags & IFF_UP)) 1715 if (!(tun->dev->flags & IFF_UP))
1758 return -EIO; 1716 return -EIO;
@@ -2577,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2577 return err; 2535 return err;
2578 2536
2579 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2537 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2580 ifr->ifr_flags & IFF_NAPI); 2538 ifr->ifr_flags & IFF_NAPI,
2539 ifr->ifr_flags & IFF_NAPI_FRAGS);
2581 if (err < 0) 2540 if (err < 0)
2582 return err; 2541 return err;
2583 2542
@@ -2675,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2675 (ifr->ifr_flags & TUN_FEATURES); 2634 (ifr->ifr_flags & TUN_FEATURES);
2676 2635
2677 INIT_LIST_HEAD(&tun->disabled); 2636 INIT_LIST_HEAD(&tun->disabled);
2678 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2637 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2638 ifr->ifr_flags & IFF_NAPI_FRAGS);
2679 if (err < 0) 2639 if (err < 0)
2680 goto err_free_flow; 2640 goto err_free_flow;
2681 2641
@@ -2824,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
2824 ret = security_tun_dev_attach_queue(tun->security); 2784 ret = security_tun_dev_attach_queue(tun->security);
2825 if (ret < 0) 2785 if (ret < 0)
2826 goto unlock; 2786 goto unlock;
2827 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 2787 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2788 tun->flags & IFF_NAPI_FRAGS);
2828 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2789 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2829 tun = rtnl_dereference(tfile->tun); 2790 tun = rtnl_dereference(tfile->tun);
2830 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2791 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3242,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3242 return -ENOMEM; 3203 return -ENOMEM;
3243 } 3204 }
3244 3205
3206 mutex_init(&tfile->napi_mutex);
3245 RCU_INIT_POINTER(tfile->tun, NULL); 3207 RCU_INIT_POINTER(tfile->tun, NULL);
3246 tfile->flags = 0; 3208 tfile->flags = 0;
3247 tfile->ifindex = 0; 3209 tfile->ifindex = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e95dd12edec4..023b8d0bf175 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
607 struct usbnet *dev = netdev_priv(net); 607 struct usbnet *dev = netdev_priv(net);
608 u8 opt = 0; 608 u8 opt = 0;
609 609
610 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
611 return -EINVAL;
612
610 if (wolinfo->wolopts & WAKE_PHY) 613 if (wolinfo->wolopts & WAKE_PHY)
611 opt |= AX_MONITOR_LINK; 614 opt |= AX_MONITOR_LINK;
612 if (wolinfo->wolopts & WAKE_MAGIC) 615 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 9e8ad372f419..2207f7a7d1ff 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
566 struct usbnet *dev = netdev_priv(net); 566 struct usbnet *dev = netdev_priv(net);
567 u8 opt = 0; 567 u8 opt = 0;
568 568
569 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
570 return -EINVAL;
571
569 if (wolinfo->wolopts & WAKE_PHY) 572 if (wolinfo->wolopts & WAKE_PHY)
570 opt |= AX_MONITOR_MODE_RWLC; 573 opt |= AX_MONITOR_MODE_RWLC;
571 if (wolinfo->wolopts & WAKE_MAGIC) 574 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a9991c5f4736..c3c9ba44e2a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
1401 if (ret < 0) 1401 if (ret < 0)
1402 return ret; 1402 return ret;
1403 1403
1404 pdata->wol = 0; 1404 if (wol->wolopts & ~WAKE_ALL)
1405 if (wol->wolopts & WAKE_UCAST) 1405 return -EINVAL;
1406 pdata->wol |= WAKE_UCAST; 1406
1407 if (wol->wolopts & WAKE_MCAST) 1407 pdata->wol = wol->wolopts;
1408 pdata->wol |= WAKE_MCAST;
1409 if (wol->wolopts & WAKE_BCAST)
1410 pdata->wol |= WAKE_BCAST;
1411 if (wol->wolopts & WAKE_MAGIC)
1412 pdata->wol |= WAKE_MAGIC;
1413 if (wol->wolopts & WAKE_PHY)
1414 pdata->wol |= WAKE_PHY;
1415 if (wol->wolopts & WAKE_ARP)
1416 pdata->wol |= WAKE_ARP;
1417 1408
1418 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); 1409 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1419 1410
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cb0cc30c3d6a..533b6fb8d923 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), 967 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
968 .driver_info = (unsigned long)&qmi_wwan_info, 968 .driver_info = (unsigned long)&qmi_wwan_info,
969 }, 969 },
970 { /* Quectel EP06/EG06/EM06 */
971 USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
972 USB_CLASS_VENDOR_SPEC,
973 USB_SUBCLASS_VENDOR_SPEC,
974 0xff),
975 .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
976 },
970 977
971 /* 3. Combined interface devices matching on interface number */ 978 /* 3. Combined interface devices matching on interface number */
972 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 979 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = {
1206 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1213 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1207 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ 1214 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1208 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ 1215 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1209 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1216 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1210 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1217 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
1211 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1218 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1212 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1219 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
1213 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1220 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1214 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1221 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
1215 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ 1222 {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1216 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1223 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1217 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1224 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1218 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1225 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1262 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
1256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ 1263 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
1257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1264 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
1258 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
1259 1265
1260 /* 4. Gobi 1000 devices */ 1266 /* 4. Gobi 1000 devices */
1261 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 1267 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
1331 return false; 1337 return false;
1332} 1338}
1333 1339
1340static bool quectel_ep06_diag_detected(struct usb_interface *intf)
1341{
1342 struct usb_device *dev = interface_to_usbdev(intf);
1343 struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
1344
1345 if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
1346 le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
1347 intf_desc.bNumEndpoints == 2)
1348 return true;
1349
1350 return false;
1351}
1352
1334static int qmi_wwan_probe(struct usb_interface *intf, 1353static int qmi_wwan_probe(struct usb_interface *intf,
1335 const struct usb_device_id *prod) 1354 const struct usb_device_id *prod)
1336{ 1355{
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1365 return -ENODEV; 1384 return -ENODEV;
1366 } 1385 }
1367 1386
1387 /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
1388 * we need to match on class/subclass/protocol. These values are
1389 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
1390 * different. Ignore the current interface if the number of endpoints
1391 * the number for the diag interface (two).
1392 */
1393 if (quectel_ep06_diag_detected(intf))
1394 return -ENODEV;
1395
1368 return usbnet_probe(intf, id); 1396 return usbnet_probe(intf, id);
1369} 1397}
1370 1398
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 97742708460b..f1b5201cc320 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4506 if (!rtl_can_wakeup(tp)) 4506 if (!rtl_can_wakeup(tp))
4507 return -EOPNOTSUPP; 4507 return -EOPNOTSUPP;
4508 4508
4509 if (wol->wolopts & ~WAKE_ANY)
4510 return -EINVAL;
4511
4509 ret = usb_autopm_get_interface(tp->intf); 4512 ret = usb_autopm_get_interface(tp->intf);
4510 if (ret < 0) 4513 if (ret < 0)
4511 goto out_set_wol; 4514 goto out_set_wol;
@@ -5217,8 +5220,8 @@ static int rtl8152_probe(struct usb_interface *intf,
5217 netdev->hw_features &= ~NETIF_F_RXCSUM; 5220 netdev->hw_features &= ~NETIF_F_RXCSUM;
5218 } 5221 }
5219 5222
5220 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && 5223 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
5221 udev->serial && !strcmp(udev->serial, "000001000000")) { 5224 (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
5222 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation"); 5225 dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
5223 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags); 5226 set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
5224 } 5227 }
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 05553d252446..e5a4cbb366dc 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
731 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 731 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
732 int ret; 732 int ret;
733 733
734 if (wolinfo->wolopts & ~SUPPORTED_WAKE)
735 return -EINVAL;
736
734 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 737 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
735 738
736 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); 739 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 06b4d290784d..262e7a3c23cb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
774 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 774 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
775 int ret; 775 int ret;
776 776
777 if (wolinfo->wolopts & ~SUPPORTED_WAKE)
778 return -EINVAL;
779
777 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 780 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
778 781
779 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts); 782 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 9277a0f228df..35f39f23d881 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
421 struct usbnet *dev = netdev_priv(net); 421 struct usbnet *dev = netdev_priv(net);
422 u8 opt = 0; 422 u8 opt = 0;
423 423
424 if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
425 return -EINVAL;
426
424 if (wolinfo->wolopts & WAKE_PHY) 427 if (wolinfo->wolopts & WAKE_PHY)
425 opt |= SR_MONITOR_LINK; 428 opt |= SR_MONITOR_LINK;
426 if (wolinfo->wolopts & WAKE_MAGIC) 429 if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8d679c8b7f25..41a00cd76955 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
463 int mac_len, delta, off; 463 int mac_len, delta, off;
464 struct xdp_buff xdp; 464 struct xdp_buff xdp;
465 465
466 skb_orphan(skb);
467
466 rcu_read_lock(); 468 rcu_read_lock();
467 xdp_prog = rcu_dereference(rq->xdp_prog); 469 xdp_prog = rcu_dereference(rq->xdp_prog);
468 if (unlikely(!xdp_prog)) { 470 if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
508 skb_copy_header(nskb, skb); 510 skb_copy_header(nskb, skb);
509 head_off = skb_headroom(nskb) - skb_headroom(skb); 511 head_off = skb_headroom(nskb) - skb_headroom(skb);
510 skb_headers_offset_update(nskb, head_off); 512 skb_headers_offset_update(nskb, head_off);
511 if (skb->sk)
512 skb_set_owner_w(nskb, skb->sk);
513 consume_skb(skb); 513 consume_skb(skb);
514 skb = nskb; 514 skb = nskb;
515 } 515 }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 765920905226..dab504ec5e50 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
1699 tot->rx_frame_errors = dev->stats.rx_frame_errors; 1699 tot->rx_frame_errors = dev->stats.rx_frame_errors;
1700} 1700}
1701 1701
1702#ifdef CONFIG_NET_POLL_CONTROLLER
1703static void virtnet_netpoll(struct net_device *dev)
1704{
1705 struct virtnet_info *vi = netdev_priv(dev);
1706 int i;
1707
1708 for (i = 0; i < vi->curr_queue_pairs; i++)
1709 napi_schedule(&vi->rq[i].napi);
1710}
1711#endif
1712
1713static void virtnet_ack_link_announce(struct virtnet_info *vi) 1702static void virtnet_ack_link_announce(struct virtnet_info *vi)
1714{ 1703{
1715 rtnl_lock(); 1704 rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
2447 .ndo_get_stats64 = virtnet_stats, 2436 .ndo_get_stats64 = virtnet_stats,
2448 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 2437 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2449 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 2438 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2450#ifdef CONFIG_NET_POLL_CONTROLLER
2451 .ndo_poll_controller = virtnet_netpoll,
2452#endif
2453 .ndo_bpf = virtnet_xdp, 2439 .ndo_bpf = virtnet_xdp,
2454 .ndo_xdp_xmit = virtnet_xdp_xmit, 2440 .ndo_xdp_xmit = virtnet_xdp_xmit,
2455 .ndo_features_check = passthru_features_check, 2441 .ndo_features_check = passthru_features_check,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ababba37d735..2b8da2b7e721 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
3539 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */ 3539 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3540 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ 3540 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3541 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 3541 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3542 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
3542 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 3543 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3543 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ 3544 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3544 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 3545 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3603 } 3604 }
3604 3605
3605 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || 3606 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3607 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
3608 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
3606 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || 3609 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3607 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || 3610 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3608 nla_put_u8(skb, IFLA_VXLAN_LEARNING, 3611 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 094cea775d0c..ef298d8525c5 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -257,7 +257,7 @@ static const struct
257 [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, 257 [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
258 [I2400M_MS_BUSY] = { "busy", -EBUSY }, 258 [I2400M_MS_BUSY] = { "busy", -EBUSY },
259 [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, 259 [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
260 [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ }, 260 [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
261 [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, 261 [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
262 [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, 262 [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
263 [I2400M_MS_NO_RF] = { "no RF", -EIO }, 263 [I2400M_MS_NO_RF] = { "no RF", -EIO },
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 6b0e1ec346cb..d46d57b989ae 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1518 } 1518 }
1519 } else { 1519 } else {
1520 /* More than a single header/data pair were missed. 1520 /* More than a single header/data pair were missed.
1521 * Report this error, and reset the controller to 1521 * Report this error. If running with open-source
1522 * firmware, then reset the controller to
1522 * revive operation. 1523 * revive operation.
1523 */ 1524 */
1524 b43dbg(dev->wl, 1525 b43dbg(dev->wl,
1525 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", 1526 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1526 ring->index, firstused, slot); 1527 ring->index, firstused, slot);
1527 b43_controller_restart(dev, "Out of order TX"); 1528 if (dev->fw.opensource)
1529 b43_controller_restart(dev, "Out of order TX");
1528 return; 1530 return;
1529 } 1531 }
1530 } 1532 }
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 591687984962..497fd766d87c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -51,6 +51,7 @@
51 51
52static const struct iwl_base_params iwl1000_base_params = { 52static const struct iwl_base_params iwl1000_base_params = {
53 .num_of_queues = IWLAGN_NUM_QUEUES, 53 .num_of_queues = IWLAGN_NUM_QUEUES,
54 .max_tfd_queue_size = 256,
54 .eeprom_size = OTP_LOW_IMAGE_SIZE, 55 .eeprom_size = OTP_LOW_IMAGE_SIZE,
55 .pll_cfg = true, 56 .pll_cfg = true,
56 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 57 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index b4c3a957c102..73969dbeb5c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
986 iwl_ext_nvm_channels : iwl_nvm_channels; 986 iwl_ext_nvm_channels : iwl_nvm_channels;
987 struct ieee80211_regdomain *regd, *copy_rd; 987 struct ieee80211_regdomain *regd, *copy_rd;
988 int size_of_regd, regd_to_copy, wmms_to_copy; 988 int size_of_regd, regd_to_copy;
989 int size_of_wmms = 0;
990 struct ieee80211_reg_rule *rule; 989 struct ieee80211_reg_rule *rule;
991 struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
992 struct regdb_ptrs *regdb_ptrs; 990 struct regdb_ptrs *regdb_ptrs;
993 enum nl80211_band band; 991 enum nl80211_band band;
994 int center_freq, prev_center_freq = 0; 992 int center_freq, prev_center_freq = 0;
995 int valid_rules = 0, n_wmms = 0; 993 int valid_rules = 0;
996 int i;
997 bool new_rule; 994 bool new_rule;
998 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 995 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
999 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; 996 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1012 sizeof(struct ieee80211_regdomain) + 1009 sizeof(struct ieee80211_regdomain) +
1013 num_of_ch * sizeof(struct ieee80211_reg_rule); 1010 num_of_ch * sizeof(struct ieee80211_reg_rule);
1014 1011
1015 if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) 1012 regd = kzalloc(size_of_regd, GFP_KERNEL);
1016 size_of_wmms =
1017 num_of_ch * sizeof(struct ieee80211_wmm_rule);
1018
1019 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
1020 if (!regd) 1013 if (!regd)
1021 return ERR_PTR(-ENOMEM); 1014 return ERR_PTR(-ENOMEM);
1022 1015
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1030 regd->alpha2[0] = fw_mcc >> 8; 1023 regd->alpha2[0] = fw_mcc >> 8;
1031 regd->alpha2[1] = fw_mcc & 0xff; 1024 regd->alpha2[1] = fw_mcc & 0xff;
1032 1025
1033 wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
1034
1035 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 1026 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
1036 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 1027 ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
1037 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 1028 band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1085 band == NL80211_BAND_2GHZ) 1076 band == NL80211_BAND_2GHZ)
1086 continue; 1077 continue;
1087 1078
1088 if (!reg_query_regdb_wmm(regd->alpha2, center_freq, 1079 reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
1089 &regdb_ptrs[n_wmms].token, wmm_rule)) {
1090 /* Add only new rules */
1091 for (i = 0; i < n_wmms; i++) {
1092 if (regdb_ptrs[i].token ==
1093 regdb_ptrs[n_wmms].token) {
1094 rule->wmm_rule = regdb_ptrs[i].rule;
1095 break;
1096 }
1097 }
1098 if (i == n_wmms) {
1099 rule->wmm_rule = wmm_rule;
1100 regdb_ptrs[n_wmms++].rule = wmm_rule;
1101 wmm_rule++;
1102 }
1103 }
1104 } 1080 }
1105 1081
1106 regd->n_reg_rules = valid_rules; 1082 regd->n_reg_rules = valid_rules;
1107 regd->n_wmm_rules = n_wmms;
1108 1083
1109 /* 1084 /*
1110 * Narrow down regdom for unused regulatory rules to prevent hole 1085 * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
1113 regd_to_copy = sizeof(struct ieee80211_regdomain) + 1088 regd_to_copy = sizeof(struct ieee80211_regdomain) +
1114 valid_rules * sizeof(struct ieee80211_reg_rule); 1089 valid_rules * sizeof(struct ieee80211_reg_rule);
1115 1090
1116 wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; 1091 copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
1117
1118 copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
1119 if (!copy_rd) { 1092 if (!copy_rd) {
1120 copy_rd = ERR_PTR(-ENOMEM); 1093 copy_rd = ERR_PTR(-ENOMEM);
1121 goto out; 1094 goto out;
1122 } 1095 }
1123 1096
1124 memcpy(copy_rd, regd, regd_to_copy); 1097 memcpy(copy_rd, regd, regd_to_copy);
1125 memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
1126 wmms_to_copy);
1127
1128 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
1129 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
1130
1131 for (i = 0; i < regd->n_reg_rules; i++) {
1132 if (!regd->reg_rules[i].wmm_rule)
1133 continue;
1134
1135 copy_rd->reg_rules[i].wmm_rule = d_wmm +
1136 (regd->reg_rules[i].wmm_rule - s_wmm);
1137 }
1138 1098
1139out: 1099out:
1140 kfree(regdb_ptrs); 1100 kfree(regdb_ptrs);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 998dfac0fcff..07442ada6dd0 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -34,6 +34,7 @@
34#include <net/net_namespace.h> 34#include <net/net_namespace.h>
35#include <net/netns/generic.h> 35#include <net/netns/generic.h>
36#include <linux/rhashtable.h> 36#include <linux/rhashtable.h>
37#include <linux/nospec.h>
37#include "mac80211_hwsim.h" 38#include "mac80211_hwsim.h"
38 39
39#define WARN_QUEUE 100 40#define WARN_QUEUE 100
@@ -519,7 +520,6 @@ struct mac80211_hwsim_data {
519 int channels, idx; 520 int channels, idx;
520 bool use_chanctx; 521 bool use_chanctx;
521 bool destroy_on_close; 522 bool destroy_on_close;
522 struct work_struct destroy_work;
523 u32 portid; 523 u32 portid;
524 char alpha2[2]; 524 char alpha2[2];
525 const struct ieee80211_regdomain *regd; 525 const struct ieee80211_regdomain *regd;
@@ -2820,9 +2820,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2820 IEEE80211_VHT_CAP_SHORT_GI_80 | 2820 IEEE80211_VHT_CAP_SHORT_GI_80 |
2821 IEEE80211_VHT_CAP_SHORT_GI_160 | 2821 IEEE80211_VHT_CAP_SHORT_GI_160 |
2822 IEEE80211_VHT_CAP_TXSTBC | 2822 IEEE80211_VHT_CAP_TXSTBC |
2823 IEEE80211_VHT_CAP_RXSTBC_1 |
2824 IEEE80211_VHT_CAP_RXSTBC_2 |
2825 IEEE80211_VHT_CAP_RXSTBC_3 |
2826 IEEE80211_VHT_CAP_RXSTBC_4 | 2823 IEEE80211_VHT_CAP_RXSTBC_4 |
2827 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 2824 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
2828 sband->vht_cap.vht_mcs.rx_mcs_map = 2825 sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -2937,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2937 hwsim_radios_generation++; 2934 hwsim_radios_generation++;
2938 spin_unlock_bh(&hwsim_radio_lock); 2935 spin_unlock_bh(&hwsim_radio_lock);
2939 2936
2940 if (idx > 0) 2937 hwsim_mcast_new_radio(idx, info, param);
2941 hwsim_mcast_new_radio(idx, info, param);
2942 2938
2943 return idx; 2939 return idx;
2944 2940
@@ -3317,6 +3313,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3317 if (info->attrs[HWSIM_ATTR_CHANNELS]) 3313 if (info->attrs[HWSIM_ATTR_CHANNELS])
3318 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); 3314 param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
3319 3315
3316 if (param.channels < 1) {
3317 GENL_SET_ERR_MSG(info, "must have at least one channel");
3318 return -EINVAL;
3319 }
3320
3320 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { 3321 if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
3321 GENL_SET_ERR_MSG(info, "too many channels specified"); 3322 GENL_SET_ERR_MSG(info, "too many channels specified");
3322 return -EINVAL; 3323 return -EINVAL;
@@ -3350,6 +3351,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
3350 kfree(hwname); 3351 kfree(hwname);
3351 return -EINVAL; 3352 return -EINVAL;
3352 } 3353 }
3354
3355 idx = array_index_nospec(idx,
3356 ARRAY_SIZE(hwsim_world_regdom_custom));
3353 param.regd = hwsim_world_regdom_custom[idx]; 3357 param.regd = hwsim_world_regdom_custom[idx];
3354 } 3358 }
3355 3359
@@ -3559,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
3559 .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), 3563 .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
3560}; 3564};
3561 3565
3562static void destroy_radio(struct work_struct *work)
3563{
3564 struct mac80211_hwsim_data *data =
3565 container_of(work, struct mac80211_hwsim_data, destroy_work);
3566
3567 hwsim_radios_generation++;
3568 mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
3569}
3570
3571static void remove_user_radios(u32 portid) 3566static void remove_user_radios(u32 portid)
3572{ 3567{
3573 struct mac80211_hwsim_data *entry, *tmp; 3568 struct mac80211_hwsim_data *entry, *tmp;
3569 LIST_HEAD(list);
3574 3570
3575 spin_lock_bh(&hwsim_radio_lock); 3571 spin_lock_bh(&hwsim_radio_lock);
3576 list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { 3572 list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
3577 if (entry->destroy_on_close && entry->portid == portid) { 3573 if (entry->destroy_on_close && entry->portid == portid) {
3578 list_del(&entry->list); 3574 list_move(&entry->list, &list);
3579 rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, 3575 rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
3580 hwsim_rht_params); 3576 hwsim_rht_params);
3581 INIT_WORK(&entry->destroy_work, destroy_radio); 3577 hwsim_radios_generation++;
3582 queue_work(hwsim_wq, &entry->destroy_work);
3583 } 3578 }
3584 } 3579 }
3585 spin_unlock_bh(&hwsim_radio_lock); 3580 spin_unlock_bh(&hwsim_radio_lock);
3581
3582 list_for_each_entry_safe(entry, tmp, &list, list) {
3583 list_del(&entry->list);
3584 mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
3585 NULL);
3586 }
3586} 3587}
3587 3588
3588static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, 3589static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3640,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
3640static void __net_exit hwsim_exit_net(struct net *net) 3641static void __net_exit hwsim_exit_net(struct net *net)
3641{ 3642{
3642 struct mac80211_hwsim_data *data, *tmp; 3643 struct mac80211_hwsim_data *data, *tmp;
3644 LIST_HEAD(list);
3643 3645
3644 spin_lock_bh(&hwsim_radio_lock); 3646 spin_lock_bh(&hwsim_radio_lock);
3645 list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { 3647 list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3650,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
3650 if (data->netgroup == hwsim_net_get_netgroup(&init_net)) 3652 if (data->netgroup == hwsim_net_get_netgroup(&init_net))
3651 continue; 3653 continue;
3652 3654
3653 list_del(&data->list); 3655 list_move(&data->list, &list);
3654 rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, 3656 rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
3655 hwsim_rht_params); 3657 hwsim_rht_params);
3656 hwsim_radios_generation++; 3658 hwsim_radios_generation++;
3657 spin_unlock_bh(&hwsim_radio_lock); 3659 }
3660 spin_unlock_bh(&hwsim_radio_lock);
3661
3662 list_for_each_entry_safe(data, tmp, &list, list) {
3663 list_del(&data->list);
3658 mac80211_hwsim_del_radio(data, 3664 mac80211_hwsim_del_radio(data,
3659 wiphy_name(data->hw->wiphy), 3665 wiphy_name(data->hw->wiphy),
3660 NULL); 3666 NULL);
3661 spin_lock_bh(&hwsim_radio_lock);
3662 } 3667 }
3663 spin_unlock_bh(&hwsim_radio_lock);
3664 3668
3665 ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); 3669 ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
3666} 3670}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index cf6ffb1ba4a2..22bc9d368728 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
77{ 77{
78 struct mt76x0_dev *dev = hw->priv; 78 struct mt76x0_dev *dev = hw->priv;
79 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; 79 struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
80 unsigned int wcid = mvif->group_wcid.idx;
81 80
82 dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); 81 dev->vif_mask &= ~BIT(mvif->idx);
83} 82}
84 83
85static int mt76x0_config(struct ieee80211_hw *hw, u32 changed) 84static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..936c0b3e0ba2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
241struct xenvif_hash { 241struct xenvif_hash {
242 unsigned int alg; 242 unsigned int alg;
243 u32 flags; 243 u32 flags;
244 bool mapping_sel;
244 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; 245 u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
245 u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; 246 u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
246 unsigned int size; 247 unsigned int size;
247 struct xenvif_hash_cache cache; 248 struct xenvif_hash_cache cache;
248}; 249};
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..0ccb021f1e78 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
324 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 324 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
325 325
326 vif->hash.size = size; 326 vif->hash.size = size;
327 memset(vif->hash.mapping, 0, sizeof(u32) * size); 327 memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
328 sizeof(u32) * size);
328 329
329 return XEN_NETIF_CTRL_STATUS_SUCCESS; 330 return XEN_NETIF_CTRL_STATUS_SUCCESS;
330} 331}
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
332u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, 333u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
333 u32 off) 334 u32 off)
334{ 335{
335 u32 *mapping = &vif->hash.mapping[off]; 336 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
336 struct gnttab_copy copy_op = { 337 unsigned int nr = 1;
338 struct gnttab_copy copy_op[2] = {{
337 .source.u.ref = gref, 339 .source.u.ref = gref,
338 .source.domid = vif->domid, 340 .source.domid = vif->domid,
339 .dest.u.gmfn = virt_to_gfn(mapping),
340 .dest.domid = DOMID_SELF, 341 .dest.domid = DOMID_SELF,
341 .dest.offset = xen_offset_in_page(mapping), 342 .len = len * sizeof(*mapping),
342 .len = len * sizeof(u32),
343 .flags = GNTCOPY_source_gref 343 .flags = GNTCOPY_source_gref
344 }; 344 }};
345 345
346 if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE) 346 if ((off + len < off) || (off + len > vif->hash.size) ||
347 len > XEN_PAGE_SIZE / sizeof(*mapping))
347 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 348 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
348 349
349 while (len-- != 0) 350 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
350 if (mapping[off++] >= vif->num_queues) 351 copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
351 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 352 if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
353 copy_op[1] = copy_op[0];
354 copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
355 copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
356 copy_op[1].dest.offset = 0;
357 copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
358 copy_op[0].len = copy_op[1].source.offset;
359 nr = 2;
360 }
352 361
353 if (copy_op.len != 0) { 362 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
354 gnttab_batch_copy(&copy_op, 1); 363 vif->hash.size * sizeof(*mapping));
355 364
356 if (copy_op.status != GNTST_okay) 365 if (copy_op[0].len != 0) {
366 gnttab_batch_copy(copy_op, nr);
367
368 if (copy_op[0].status != GNTST_okay ||
369 copy_op[nr - 1].status != GNTST_okay)
357 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER; 370 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
358 } 371 }
359 372
373 while (len-- != 0)
374 if (mapping[off++] >= vif->num_queues)
375 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
376
377 vif->hash.mapping_sel = !vif->hash.mapping_sel;
378
360 return XEN_NETIF_CTRL_STATUS_SUCCESS; 379 return XEN_NETIF_CTRL_STATUS_SUCCESS;
361} 380}
362 381
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
408 } 427 }
409 428
410 if (vif->hash.size != 0) { 429 if (vif->hash.size != 0) {
430 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
431
411 seq_puts(m, "\nHash Mapping:\n"); 432 seq_puts(m, "\nHash Mapping:\n");
412 433
413 for (i = 0; i < vif->hash.size; ) { 434 for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
420 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); 441 seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
421 442
422 for (j = 0; j < n; j++, i++) 443 for (j = 0; j < n; j++, i++)
423 seq_printf(m, "%4u ", vif->hash.mapping[i]); 444 seq_printf(m, "%4u ", mapping[i]);
424 445
425 seq_puts(m, "\n"); 446 seq_puts(m, "\n");
426 } 447 }
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 92274c237200..f6ae23fc3f6b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
162 if (size == 0) 162 if (size == 0)
163 return skb_get_hash_raw(skb) % dev->real_num_tx_queues; 163 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
164 164
165 return vif->hash.mapping[skb_get_hash_raw(skb) % size]; 165 return vif->hash.mapping[vif->hash.mapping_sel]
166 [skb_get_hash_raw(skb) % size];
166} 167}
167 168
168static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 169static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 73f596a90c69..f17f602e6171 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,8 +87,7 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_load_q); 90static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
92 91
93struct netfront_stats { 92struct netfront_stats {
94 u64 packets; 93 u64 packets;
@@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
909 BUG_ON(pull_to <= skb_headlen(skb)); 908 BUG_ON(pull_to <= skb_headlen(skb));
910 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
911 } 910 }
912 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 911 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
912 queue->rx.rsp_cons = ++cons;
913 kfree_skb(nskb);
914 return ~0U;
915 }
913 916
914 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 917 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
915 skb_frag_page(nfrag), 918 skb_frag_page(nfrag),
@@ -1046,6 +1049,8 @@ err:
1046 skb->len += rx->status; 1049 skb->len += rx->status;
1047 1050
1048 i = xennet_fill_frags(queue, skb, &tmpq); 1051 i = xennet_fill_frags(queue, skb, &tmpq);
1052 if (unlikely(i == ~0U))
1053 goto err;
1049 1054
1050 if (rx->flags & XEN_NETRXF_csum_blank) 1055 if (rx->flags & XEN_NETRXF_csum_blank)
1051 skb->ip_summed = CHECKSUM_PARTIAL; 1056 skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1332 netif_carrier_off(netdev); 1337 netif_carrier_off(netdev);
1333 1338
1334 xenbus_switch_state(dev, XenbusStateInitialising); 1339 xenbus_switch_state(dev, XenbusStateInitialising);
1335 wait_event(module_load_q, 1340 wait_event(module_wq,
1336 xenbus_read_driver_state(dev->otherend) != 1341 xenbus_read_driver_state(dev->otherend) !=
1337 XenbusStateClosed && 1342 XenbusStateClosed &&
1338 xenbus_read_driver_state(dev->otherend) != 1343 xenbus_read_driver_state(dev->otherend) !=
1339 XenbusStateUnknown); 1344 XenbusStateUnknown);
1340 return netdev; 1345 return netdev;
1341 1346
1342 exit: 1347 exit:
@@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
2010 2015
2011 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 2016 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2012 2017
2018 wake_up_all(&module_wq);
2019
2013 switch (backend_state) { 2020 switch (backend_state) {
2014 case XenbusStateInitialising: 2021 case XenbusStateInitialising:
2015 case XenbusStateInitialised: 2022 case XenbusStateInitialised:
2016 case XenbusStateReconfiguring: 2023 case XenbusStateReconfiguring:
2017 case XenbusStateReconfigured: 2024 case XenbusStateReconfigured:
2018 break;
2019
2020 case XenbusStateUnknown: 2025 case XenbusStateUnknown:
2021 wake_up_all(&module_unload_q);
2022 break; 2026 break;
2023 2027
2024 case XenbusStateInitWait: 2028 case XenbusStateInitWait:
@@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
2034 break; 2038 break;
2035 2039
2036 case XenbusStateClosed: 2040 case XenbusStateClosed:
2037 wake_up_all(&module_unload_q);
2038 if (dev->state == XenbusStateClosed) 2041 if (dev->state == XenbusStateClosed)
2039 break; 2042 break;
2040 /* Missed the backend's CLOSING state -- fallthrough */ 2043 /* Missed the backend's CLOSING state -- fallthrough */
2041 case XenbusStateClosing: 2044 case XenbusStateClosing:
2042 wake_up_all(&module_unload_q);
2043 xenbus_frontend_closed(dev); 2045 xenbus_frontend_closed(dev);
2044 break; 2046 break;
2045 } 2047 }
@@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
2147 2149
2148 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 2150 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2149 xenbus_switch_state(dev, XenbusStateClosing); 2151 xenbus_switch_state(dev, XenbusStateClosing);
2150 wait_event(module_unload_q, 2152 wait_event(module_wq,
2151 xenbus_read_driver_state(dev->otherend) == 2153 xenbus_read_driver_state(dev->otherend) ==
2152 XenbusStateClosing || 2154 XenbusStateClosing ||
2153 xenbus_read_driver_state(dev->otherend) == 2155 xenbus_read_driver_state(dev->otherend) ==
2154 XenbusStateUnknown); 2156 XenbusStateUnknown);
2155 2157
2156 xenbus_switch_state(dev, XenbusStateClosed); 2158 xenbus_switch_state(dev, XenbusStateClosed);
2157 wait_event(module_unload_q, 2159 wait_event(module_wq,
2158 xenbus_read_driver_state(dev->otherend) == 2160 xenbus_read_driver_state(dev->otherend) ==
2159 XenbusStateClosed || 2161 XenbusStateClosed ||
2160 xenbus_read_driver_state(dev->otherend) == 2162 xenbus_read_driver_state(dev->otherend) ==
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5a9562881d4e..9fe3fff818b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
537 537
538 INIT_WORK(&ctrl->ana_work, nvme_ana_work); 538 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
539 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); 539 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
540 if (!ctrl->ana_log_buf) 540 if (!ctrl->ana_log_buf) {
541 error = -ENOMEM;
541 goto out; 542 goto out;
543 }
542 544
543 error = nvme_read_ana_log(ctrl, true); 545 error = nvme_read_ana_log(ctrl, true);
544 if (error) 546 if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
547out_free_ana_log_buf: 549out_free_ana_log_buf:
548 kfree(ctrl->ana_log_buf); 550 kfree(ctrl->ana_log_buf);
549out: 551out:
550 return -ENOMEM; 552 return error;
551} 553}
552 554
553void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 555void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 1b9951d2067e..d668682f91df 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
316 old_value = *dbbuf_db; 316 old_value = *dbbuf_db;
317 *dbbuf_db = value; 317 *dbbuf_db = value;
318 318
319 /*
320 * Ensure that the doorbell is updated before reading the event
321 * index from memory. The controller needs to provide similar
322 * ordering to ensure the envent index is updated before reading
323 * the doorbell.
324 */
325 mb();
326
319 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 327 if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
320 return false; 328 return false;
321 } 329 }
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a21caea1e080..2008fa62a373 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
245 offset += len; 245 offset += len;
246 ngrps++; 246 ngrps++;
247 } 247 }
248 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
249 if (nvmet_ana_group_enabled[grpid])
250 ngrps++;
251 }
248 252
249 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 253 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
250 hdr.ngrps = cpu_to_le16(ngrps); 254 hdr.ngrps = cpu_to_le16(ngrps);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ebf3e7a6c49e..b5ec96abd048 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
1210 1210
1211 error = nvmet_init_discovery(); 1211 error = nvmet_init_discovery();
1212 if (error) 1212 if (error)
1213 goto out; 1213 goto out_free_work_queue;
1214 1214
1215 error = nvmet_init_configfs(); 1215 error = nvmet_init_configfs();
1216 if (error) 1216 if (error)
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
1219 1219
1220out_exit_discovery: 1220out_exit_discovery:
1221 nvmet_exit_discovery(); 1221 nvmet_exit_discovery();
1222out_free_work_queue:
1223 destroy_workqueue(buffered_io_wq);
1222out: 1224out:
1223 return error; 1225 return error;
1224} 1226}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 34712def81b1..5251689a1d9a 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
311 struct fcloop_tport *tport = tls_req->tport; 311 struct fcloop_tport *tport = tls_req->tport;
312 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 312 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
313 313
314 if (tport->remoteport) 314 if (!tport || tport->remoteport)
315 lsreq->done(lsreq, tls_req->status); 315 lsreq->done(lsreq, tls_req->status);
316} 316}
317 317
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
329 329
330 if (!rport->targetport) { 330 if (!rport->targetport) {
331 tls_req->status = -ECONNREFUSED; 331 tls_req->status = -ECONNREFUSED;
332 tls_req->tport = NULL;
332 schedule_work(&tls_req->work); 333 schedule_work(&tls_req->work);
333 return ret; 334 return ret;
334 } 335 }
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3533e918ea37..bfc4da660bb4 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
66 66
67 struct nvmet_req req; 67 struct nvmet_req req;
68 68
69 bool allocated;
69 u8 n_rdma; 70 u8 n_rdma;
70 u32 flags; 71 u32 flags;
71 u32 invalidate_rkey; 72 u32 invalidate_rkey;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
174 unsigned long flags; 175 unsigned long flags;
175 176
176 spin_lock_irqsave(&queue->rsps_lock, flags); 177 spin_lock_irqsave(&queue->rsps_lock, flags);
177 rsp = list_first_entry(&queue->free_rsps, 178 rsp = list_first_entry_or_null(&queue->free_rsps,
178 struct nvmet_rdma_rsp, free_list); 179 struct nvmet_rdma_rsp, free_list);
179 list_del(&rsp->free_list); 180 if (likely(rsp))
181 list_del(&rsp->free_list);
180 spin_unlock_irqrestore(&queue->rsps_lock, flags); 182 spin_unlock_irqrestore(&queue->rsps_lock, flags);
181 183
184 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp))
187 return NULL;
188 rsp->allocated = true;
189 }
190
182 return rsp; 191 return rsp;
183} 192}
184 193
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
187{ 196{
188 unsigned long flags; 197 unsigned long flags;
189 198
199 if (rsp->allocated) {
200 kfree(rsp);
201 return;
202 }
203
190 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); 204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
191 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); 205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
192 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); 206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
776 790
777 cmd->queue = queue; 791 cmd->queue = queue;
778 rsp = nvmet_rdma_get_rsp(queue); 792 rsp = nvmet_rdma_get_rsp(queue);
793 if (unlikely(!rsp)) {
794 /*
795 * we get here only under memory pressure,
796 * silently drop and have the host retry
797 * as we can't even fail it.
798 */
799 nvmet_rdma_post_recv(queue->dev, cmd);
800 return;
801 }
779 rsp->queue = queue; 802 rsp->queue = queue;
780 rsp->cmd = cmd; 803 rsp->cmd = cmd;
781 rsp->flags = 0; 804 rsp->flags = 0;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 466e3c8582f0..74eaedd5b860 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
54 */ 54 */
55DEFINE_RAW_SPINLOCK(devtree_lock); 55DEFINE_RAW_SPINLOCK(devtree_lock);
56 56
57bool of_node_name_eq(const struct device_node *np, const char *name)
58{
59 const char *node_name;
60 size_t len;
61
62 if (!np)
63 return false;
64
65 node_name = kbasename(np->full_name);
66 len = strchrnul(node_name, '@') - node_name;
67
68 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
69}
70
71bool of_node_name_prefix(const struct device_node *np, const char *prefix)
72{
73 if (!np)
74 return false;
75
76 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
77}
78
57int of_n_addr_cells(struct device_node *np) 79int of_n_addr_cells(struct device_node *np)
58{ 80{
59 u32 cells; 81 u32 cells;
@@ -118,6 +140,9 @@ void of_populate_phandle_cache(void)
118 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) 140 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
119 phandles++; 141 phandles++;
120 142
143 if (!phandles)
144 goto out;
145
121 cache_entries = roundup_pow_of_two(phandles); 146 cache_entries = roundup_pow_of_two(phandles);
122 phandle_cache_mask = cache_entries - 1; 147 phandle_cache_mask = cache_entries - 1;
123 148
@@ -720,6 +745,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
720EXPORT_SYMBOL(of_get_next_available_child); 745EXPORT_SYMBOL(of_get_next_available_child);
721 746
722/** 747/**
748 * of_get_compatible_child - Find compatible child node
749 * @parent: parent node
750 * @compatible: compatible string
751 *
752 * Lookup child node whose compatible property contains the given compatible
753 * string.
754 *
755 * Returns a node pointer with refcount incremented, use of_node_put() on it
756 * when done; or NULL if not found.
757 */
758struct device_node *of_get_compatible_child(const struct device_node *parent,
759 const char *compatible)
760{
761 struct device_node *child;
762
763 for_each_child_of_node(parent, child) {
764 if (of_device_is_compatible(child, compatible))
765 break;
766 }
767
768 return child;
769}
770EXPORT_SYMBOL(of_get_compatible_child);
771
772/**
723 * of_get_child_by_name - Find the child node by name for a given parent 773 * of_get_child_by_name - Find the child node by name for a given parent
724 * @node: parent node 774 * @node: parent node
725 * @name: child name to look for. 775 * @name: child name to look for.
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 7ba90c290a42..6c59673933e9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
241 if (!dev) 241 if (!dev)
242 goto err_clear_flag; 242 goto err_clear_flag;
243 243
244 /* AMBA devices only support a single DMA mask */
245 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
246 dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
247
244 /* setup generic device info */ 248 /* setup generic device info */
245 dev->dev.of_node = of_node_get(node); 249 dev->dev.of_node = of_node_get(node);
246 dev->dev.fwnode = &node->fwnode; 250 dev->dev.fwnode = &node->fwnode;
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 778c4f76a884..2153956a0b20 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
135 if (val & PCIE_ATU_ENABLE) 135 if (val & PCIE_ATU_ENABLE)
136 return; 136 return;
137 137
138 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 138 mdelay(LINK_WAIT_IATU);
139 } 139 }
140 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 140 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
141} 141}
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
178 if (val & PCIE_ATU_ENABLE) 178 if (val & PCIE_ATU_ENABLE)
179 return; 179 return;
180 180
181 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 181 mdelay(LINK_WAIT_IATU);
182 } 182 }
183 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 183 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
184} 184}
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
236 if (val & PCIE_ATU_ENABLE) 236 if (val & PCIE_ATU_ENABLE)
237 return 0; 237 return 0;
238 238
239 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 239 mdelay(LINK_WAIT_IATU);
240 } 240 }
241 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 241 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
242 242
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
282 if (val & PCIE_ATU_ENABLE) 282 if (val & PCIE_ATU_ENABLE)
283 return 0; 283 return 0;
284 284
285 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 285 mdelay(LINK_WAIT_IATU);
286 } 286 }
287 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 287 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
288 288
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 96126fd8403c..9f1a5e399b70 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -26,8 +26,7 @@
26 26
27/* Parameters for the waiting for iATU enabled routine */ 27/* Parameters for the waiting for iATU enabled routine */
28#define LINK_WAIT_MAX_IATU_RETRIES 5 28#define LINK_WAIT_MAX_IATU_RETRIES 5
29#define LINK_WAIT_IATU_MIN 9000 29#define LINK_WAIT_IATU 9
30#define LINK_WAIT_IATU_MAX 10000
31 30
32/* Synopsys-specific PCIe configuration registers */ 31/* Synopsys-specific PCIe configuration registers */
33#define PCIE_PORT_LINK_CONTROL 0x710 32#define PCIE_PORT_LINK_CONTROL 0x710
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index c00f82cc54aa..9ba4d12c179c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
89 89
90#define STATUS_REVISION_MISMATCH 0xC0000059 90#define STATUS_REVISION_MISMATCH 0xC0000059
91 91
92/* space for 32bit serial number as string */
93#define SLOT_NAME_SIZE 11
94
92/* 95/*
93 * Message Types 96 * Message Types
94 */ 97 */
@@ -494,6 +497,7 @@ struct hv_pci_dev {
494 struct list_head list_entry; 497 struct list_head list_entry;
495 refcount_t refs; 498 refcount_t refs;
496 enum hv_pcichild_state state; 499 enum hv_pcichild_state state;
500 struct pci_slot *pci_slot;
497 struct pci_function_description desc; 501 struct pci_function_description desc;
498 bool reported_missing; 502 bool reported_missing;
499 struct hv_pcibus_device *hbus; 503 struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
1457 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1461 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1458} 1462}
1459 1463
1464/*
1465 * Assign entries in sysfs pci slot directory.
1466 *
1467 * Note that this function does not need to lock the children list
1468 * because it is called from pci_devices_present_work which
1469 * is serialized with hv_eject_device_work because they are on the
1470 * same ordered workqueue. Therefore hbus->children list will not change
1471 * even when pci_create_slot sleeps.
1472 */
1473static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1474{
1475 struct hv_pci_dev *hpdev;
1476 char name[SLOT_NAME_SIZE];
1477 int slot_nr;
1478
1479 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1480 if (hpdev->pci_slot)
1481 continue;
1482
1483 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1484 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1485 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1486 name, NULL);
1487 if (IS_ERR(hpdev->pci_slot)) {
1488 pr_warn("pci_create slot %s failed\n", name);
1489 hpdev->pci_slot = NULL;
1490 }
1491 }
1492}
1493
1460/** 1494/**
1461 * create_root_hv_pci_bus() - Expose a new root PCI bus 1495 * create_root_hv_pci_bus() - Expose a new root PCI bus
1462 * @hbus: Root PCI bus, as understood by this driver 1496 * @hbus: Root PCI bus, as understood by this driver
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1480 pci_lock_rescan_remove(); 1514 pci_lock_rescan_remove();
1481 pci_scan_child_bus(hbus->pci_bus); 1515 pci_scan_child_bus(hbus->pci_bus);
1482 pci_bus_assign_resources(hbus->pci_bus); 1516 pci_bus_assign_resources(hbus->pci_bus);
1517 hv_pci_assign_slots(hbus);
1483 pci_bus_add_devices(hbus->pci_bus); 1518 pci_bus_add_devices(hbus->pci_bus);
1484 pci_unlock_rescan_remove(); 1519 pci_unlock_rescan_remove();
1485 hbus->state = hv_pcibus_installed; 1520 hbus->state = hv_pcibus_installed;
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work)
1742 */ 1777 */
1743 pci_lock_rescan_remove(); 1778 pci_lock_rescan_remove();
1744 pci_scan_child_bus(hbus->pci_bus); 1779 pci_scan_child_bus(hbus->pci_bus);
1780 hv_pci_assign_slots(hbus);
1745 pci_unlock_rescan_remove(); 1781 pci_unlock_rescan_remove();
1746 break; 1782 break;
1747 1783
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work)
1858 list_del(&hpdev->list_entry); 1894 list_del(&hpdev->list_entry);
1859 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); 1895 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
1860 1896
1897 if (hpdev->pci_slot)
1898 pci_destroy_slot(hpdev->pci_slot);
1899
1861 memset(&ctxt, 0, sizeof(ctxt)); 1900 memset(&ctxt, 0, sizeof(ctxt));
1862 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; 1901 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
1863 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; 1902 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef0b1b6ba86f..12afa7fdf77e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
457/** 457/**
458 * enable_slot - enable, configure a slot 458 * enable_slot - enable, configure a slot
459 * @slot: slot to be enabled 459 * @slot: slot to be enabled
460 * @bridge: true if enable is for the whole bridge (not a single slot)
460 * 461 *
461 * This function should be called per *physical slot*, 462 * This function should be called per *physical slot*,
462 * not per each slot object in ACPI namespace. 463 * not per each slot object in ACPI namespace.
463 */ 464 */
464static void enable_slot(struct acpiphp_slot *slot) 465static void enable_slot(struct acpiphp_slot *slot, bool bridge)
465{ 466{
466 struct pci_dev *dev; 467 struct pci_dev *dev;
467 struct pci_bus *bus = slot->bus; 468 struct pci_bus *bus = slot->bus;
468 struct acpiphp_func *func; 469 struct acpiphp_func *func;
469 470
470 if (bus->self && hotplug_is_native(bus->self)) { 471 if (bridge && bus->self && hotplug_is_native(bus->self)) {
471 /* 472 /*
472 * If native hotplug is used, it will take care of hotplug 473 * If native hotplug is used, it will take care of hotplug
473 * slot management and resource allocation for hotplug 474 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
701 trim_stale_devices(dev); 702 trim_stale_devices(dev);
702 703
703 /* configure all functions */ 704 /* configure all functions */
704 enable_slot(slot); 705 enable_slot(slot, true);
705 } else { 706 } else {
706 disable_slot(slot); 707 disable_slot(slot);
707 } 708 }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
785 if (bridge) 786 if (bridge)
786 acpiphp_check_bridge(bridge); 787 acpiphp_check_bridge(bridge);
787 else if (!(slot->flags & SLOT_IS_GOING_AWAY)) 788 else if (!(slot->flags & SLOT_IS_GOING_AWAY))
788 enable_slot(slot); 789 enable_slot(slot, false);
789 790
790 break; 791 break;
791 792
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
973 974
974 /* configure all functions */ 975 /* configure all functions */
975 if (!(slot->flags & SLOT_ENABLED)) 976 if (!(slot->flags & SLOT_ENABLED))
976 enable_slot(slot); 977 enable_slot(slot, false);
977 978
978 pci_unlock_rescan_remove(); 979 pci_unlock_rescan_remove();
979 return 0; 980 return 0;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7136e3430925..a938abdb41ce 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
496 u16 slot_status; 496 u16 slot_status;
497 int retval; 497 int retval;
498 498
499 /* Clear sticky power-fault bit from previous power failures */ 499 /* Clear power-fault bit from previous power failures */
500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); 500 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
501 if (slot_status & PCI_EXP_SLTSTA_PFD) 501 if (slot_status & PCI_EXP_SLTSTA_PFD)
502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, 502 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
646 pciehp_handle_button_press(slot); 646 pciehp_handle_button_press(slot);
647 } 647 }
648 648
649 /* Check Power Fault Detected */
650 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
651 ctrl->power_fault_detected = 1;
652 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
653 pciehp_set_attention_status(slot, 1);
654 pciehp_green_led_off(slot);
655 }
656
649 /* 657 /*
650 * Disable requests have higher priority than Presence Detect Changed 658 * Disable requests have higher priority than Presence Detect Changed
651 * or Data Link Layer State Changed events. 659 * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
657 pciehp_handle_presence_or_link_change(slot, events); 665 pciehp_handle_presence_or_link_change(slot, events);
658 up_read(&ctrl->reset_lock); 666 up_read(&ctrl->reset_lock);
659 667
660 /* Check Power Fault Detected */
661 if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
662 ctrl->power_fault_detected = 1;
663 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
664 pciehp_set_attention_status(slot, 1);
665 pciehp_green_led_off(slot);
666 }
667
668 pci_config_pm_runtime_put(pdev); 668 pci_config_pm_runtime_put(pdev);
669 wake_up(&ctrl->requester); 669 wake_up(&ctrl->requester);
670 return IRQ_HANDLED; 670 return IRQ_HANDLED;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ff9619b5fa..1835f3a7aa8d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4547 4547
4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); 4548 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4549} 4549}
4550EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4550 4551
4551static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 4552static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4552{ 4553{
@@ -5200,7 +5201,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
5200 */ 5201 */
5201int pci_reset_bus(struct pci_dev *pdev) 5202int pci_reset_bus(struct pci_dev *pdev)
5202{ 5203{
5203 return pci_probe_reset_slot(pdev->slot) ? 5204 return (!pci_probe_reset_slot(pdev->slot)) ?
5204 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); 5205 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5205} 5206}
5206EXPORT_SYMBOL_GPL(pci_reset_bus); 5207EXPORT_SYMBOL_GPL(pci_reset_bus);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ec784009a36b..201f9e5ff55c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2074{ 2074{
2075#ifdef CONFIG_PCI_PASID 2075#ifdef CONFIG_PCI_PASID
2076 struct pci_dev *bridge; 2076 struct pci_dev *bridge;
2077 int pcie_type;
2077 u32 cap; 2078 u32 cap;
2078 2079
2079 if (!pci_is_pcie(dev)) 2080 if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
2083 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) 2084 if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
2084 return; 2085 return;
2085 2086
2086 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2087 pcie_type = pci_pcie_type(dev);
2088 if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2089 pcie_type == PCI_EXP_TYPE_RC_END)
2087 dev->eetlp_prefix_path = 1; 2090 dev->eetlp_prefix_path = 1;
2088 else { 2091 else {
2089 bridge = pci_upstream_bridge(dev); 2092 bridge = pci_upstream_bridge(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ef7143a274e0..6bc27b7fd452 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4355 * 4355 *
4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12} 4356 * 0x9d10-0x9d1b PCI Express Root port #{1-12}
4357 * 4357 *
4358 * The 300 series chipset suffers from the same bug so include those root
4359 * ports here as well.
4360 *
4361 * 0xa32c-0xa343 PCI Express Root port #{0-24}
4362 *
4363 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html 4358 * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
4364 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html 4359 * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
4365 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html 4360 * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4377 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ 4372 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
4378 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ 4373 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
4379 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ 4374 case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
4380 case 0xa32c ... 0xa343: /* 300 series */
4381 return true; 4375 return true;
4382 } 4376 }
4383 4377
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 9940cc70f38b..54a8b30dda38 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -14,6 +14,8 @@
14#include <linux/poll.h> 14#include <linux/poll.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
16 16
17#include <linux/nospec.h>
18
17MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); 19MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
18MODULE_VERSION("0.1"); 20MODULE_VERSION("0.1");
19MODULE_LICENSE("GPL"); 21MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
909 default: 911 default:
910 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) 912 if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
911 return -EINVAL; 913 return -EINVAL;
914 p.port = array_index_nospec(p.port,
915 ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
912 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); 916 p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
913 break; 917 break;
914 } 918 }
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index ece41fb2848f..c4f4d904e4a6 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
1040 } 1040 }
1041 1041
1042 /* if the configuration is provided through pdata, apply it */ 1042 /* if the configuration is provided through pdata, apply it */
1043 if (pdata) { 1043 if (pdata && pdata->gpio_configs) {
1044 ret = pinctrl_register_mappings(pdata->gpio_configs, 1044 ret = pinctrl_register_mappings(pdata->gpio_configs,
1045 pdata->n_gpio_configs); 1045 pdata->n_gpio_configs);
1046 if (ret) { 1046 if (ret) {
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index fb1afe55bf53..e7f45d96b0cb 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -15,10 +15,11 @@
15 15
16#include "pinctrl-intel.h" 16#include "pinctrl-intel.h"
17 17
18#define CNL_PAD_OWN 0x020 18#define CNL_PAD_OWN 0x020
19#define CNL_PADCFGLOCK 0x080 19#define CNL_PADCFGLOCK 0x080
20#define CNL_HOSTSW_OWN 0x0b0 20#define CNL_LP_HOSTSW_OWN 0x0b0
21#define CNL_GPI_IE 0x120 21#define CNL_H_HOSTSW_OWN 0x0c0
22#define CNL_GPI_IE 0x120
22 23
23#define CNL_GPP(r, s, e, g) \ 24#define CNL_GPP(r, s, e, g) \
24 { \ 25 { \
@@ -30,12 +31,12 @@
30 31
31#define CNL_NO_GPIO -1 32#define CNL_NO_GPIO -1
32 33
33#define CNL_COMMUNITY(b, s, e, g) \ 34#define CNL_COMMUNITY(b, s, e, o, g) \
34 { \ 35 { \
35 .barno = (b), \ 36 .barno = (b), \
36 .padown_offset = CNL_PAD_OWN, \ 37 .padown_offset = CNL_PAD_OWN, \
37 .padcfglock_offset = CNL_PADCFGLOCK, \ 38 .padcfglock_offset = CNL_PADCFGLOCK, \
38 .hostown_offset = CNL_HOSTSW_OWN, \ 39 .hostown_offset = (o), \
39 .ie_offset = CNL_GPI_IE, \ 40 .ie_offset = CNL_GPI_IE, \
40 .pin_base = (s), \ 41 .pin_base = (s), \
41 .npins = ((e) - (s) + 1), \ 42 .npins = ((e) - (s) + 1), \
@@ -43,6 +44,12 @@
43 .ngpps = ARRAY_SIZE(g), \ 44 .ngpps = ARRAY_SIZE(g), \
44 } 45 }
45 46
47#define CNLLP_COMMUNITY(b, s, e, g) \
48 CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
49
50#define CNLH_COMMUNITY(b, s, e, g) \
51 CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
52
46/* Cannon Lake-H */ 53/* Cannon Lake-H */
47static const struct pinctrl_pin_desc cnlh_pins[] = { 54static const struct pinctrl_pin_desc cnlh_pins[] = {
48 /* GPP_A */ 55 /* GPP_A */
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
379static const struct intel_padgroup cnlh_community3_gpps[] = { 386static const struct intel_padgroup cnlh_community3_gpps[] = {
380 CNL_GPP(0, 155, 178, 192), /* GPP_K */ 387 CNL_GPP(0, 155, 178, 192), /* GPP_K */
381 CNL_GPP(1, 179, 202, 224), /* GPP_H */ 388 CNL_GPP(1, 179, 202, 224), /* GPP_H */
382 CNL_GPP(2, 203, 215, 258), /* GPP_E */ 389 CNL_GPP(2, 203, 215, 256), /* GPP_E */
383 CNL_GPP(3, 216, 239, 288), /* GPP_F */ 390 CNL_GPP(3, 216, 239, 288), /* GPP_F */
384 CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ 391 CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
385}; 392};
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
442}; 449};
443 450
444static const struct intel_community cnlh_communities[] = { 451static const struct intel_community cnlh_communities[] = {
445 CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps), 452 CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
446 CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps), 453 CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
447 CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps), 454 CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
448 CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps), 455 CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
449}; 456};
450 457
451static const struct intel_pinctrl_soc_data cnlh_soc_data = { 458static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
803}; 810};
804 811
805static const struct intel_community cnllp_communities[] = { 812static const struct intel_community cnllp_communities[] = {
806 CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps), 813 CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
807 CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps), 814 CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
808 CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps), 815 CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
809}; 816};
810 817
811static const struct intel_pinctrl_soc_data cnllp_soc_data = { 818static const struct intel_pinctrl_soc_data cnllp_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 62b009b27eda..1ea3438ea67e 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
747 .owner = THIS_MODULE, 747 .owner = THIS_MODULE,
748}; 748};
749 749
750/**
751 * intel_gpio_to_pin() - Translate from GPIO offset to pin number
752 * @pctrl: Pinctrl structure
753 * @offset: GPIO offset from gpiolib
754 * @commmunity: Community is filled here if not %NULL
755 * @padgrp: Pad group is filled here if not %NULL
756 *
757 * When coming through gpiolib irqchip, the GPIO offset is not
758 * automatically translated to pinctrl pin number. This function can be
759 * used to find out the corresponding pinctrl pin.
760 */
761static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
762 const struct intel_community **community,
763 const struct intel_padgroup **padgrp)
764{
765 int i;
766
767 for (i = 0; i < pctrl->ncommunities; i++) {
768 const struct intel_community *comm = &pctrl->communities[i];
769 int j;
770
771 for (j = 0; j < comm->ngpps; j++) {
772 const struct intel_padgroup *pgrp = &comm->gpps[j];
773
774 if (pgrp->gpio_base < 0)
775 continue;
776
777 if (offset >= pgrp->gpio_base &&
778 offset < pgrp->gpio_base + pgrp->size) {
779 int pin;
780
781 pin = pgrp->base + offset - pgrp->gpio_base;
782 if (community)
783 *community = comm;
784 if (padgrp)
785 *padgrp = pgrp;
786
787 return pin;
788 }
789 }
790 }
791
792 return -EINVAL;
793}
794
750static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) 795static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
751{ 796{
752 struct intel_pinctrl *pctrl = gpiochip_get_data(chip); 797 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
753 void __iomem *reg; 798 void __iomem *reg;
754 u32 padcfg0; 799 u32 padcfg0;
800 int pin;
755 801
756 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 802 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
803 if (pin < 0)
804 return -EINVAL;
805
806 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
757 if (!reg) 807 if (!reg)
758 return -EINVAL; 808 return -EINVAL;
759 809
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
770 unsigned long flags; 820 unsigned long flags;
771 void __iomem *reg; 821 void __iomem *reg;
772 u32 padcfg0; 822 u32 padcfg0;
823 int pin;
773 824
774 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 825 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
826 if (pin < 0)
827 return;
828
829 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
775 if (!reg) 830 if (!reg)
776 return; 831 return;
777 832
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
790 struct intel_pinctrl *pctrl = gpiochip_get_data(chip); 845 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
791 void __iomem *reg; 846 void __iomem *reg;
792 u32 padcfg0; 847 u32 padcfg0;
848 int pin;
793 849
794 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 850 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
851 if (pin < 0)
852 return -EINVAL;
853
854 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
795 if (!reg) 855 if (!reg)
796 return -EINVAL; 856 return -EINVAL;
797 857
@@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
827 .set_config = gpiochip_generic_config, 887 .set_config = gpiochip_generic_config,
828}; 888};
829 889
830/**
831 * intel_gpio_to_pin() - Translate from GPIO offset to pin number
832 * @pctrl: Pinctrl structure
833 * @offset: GPIO offset from gpiolib
834 * @commmunity: Community is filled here if not %NULL
835 * @padgrp: Pad group is filled here if not %NULL
836 *
837 * When coming through gpiolib irqchip, the GPIO offset is not
838 * automatically translated to pinctrl pin number. This function can be
839 * used to find out the corresponding pinctrl pin.
840 */
841static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
842 const struct intel_community **community,
843 const struct intel_padgroup **padgrp)
844{
845 int i;
846
847 for (i = 0; i < pctrl->ncommunities; i++) {
848 const struct intel_community *comm = &pctrl->communities[i];
849 int j;
850
851 for (j = 0; j < comm->ngpps; j++) {
852 const struct intel_padgroup *pgrp = &comm->gpps[j];
853
854 if (pgrp->gpio_base < 0)
855 continue;
856
857 if (offset >= pgrp->gpio_base &&
858 offset < pgrp->gpio_base + pgrp->size) {
859 int pin;
860
861 pin = pgrp->base + offset - pgrp->gpio_base;
862 if (community)
863 *community = comm;
864 if (padgrp)
865 *padgrp = pgrp;
866
867 return pin;
868 }
869 }
870 }
871
872 return -EINVAL;
873}
874
875static int intel_gpio_irq_reqres(struct irq_data *d)
876{
877 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
878 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
879 int pin;
880 int ret;
881
882 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
883 if (pin >= 0) {
884 ret = gpiochip_lock_as_irq(gc, pin);
885 if (ret) {
886 dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
887 pin);
888 return ret;
889 }
890 }
891 return 0;
892}
893
894static void intel_gpio_irq_relres(struct irq_data *d)
895{
896 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
897 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
898 int pin;
899
900 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
901 if (pin >= 0)
902 gpiochip_unlock_as_irq(gc, pin);
903}
904
905static void intel_gpio_irq_ack(struct irq_data *d) 890static void intel_gpio_irq_ack(struct irq_data *d)
906{ 891{
907 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 892 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
1117 1102
1118static struct irq_chip intel_gpio_irqchip = { 1103static struct irq_chip intel_gpio_irqchip = {
1119 .name = "intel-gpio", 1104 .name = "intel-gpio",
1120 .irq_request_resources = intel_gpio_irq_reqres,
1121 .irq_release_resources = intel_gpio_irq_relres,
1122 .irq_enable = intel_gpio_irq_enable, 1105 .irq_enable = intel_gpio_irq_enable,
1123 .irq_ack = intel_gpio_irq_ack, 1106 .irq_ack = intel_gpio_irq_ack,
1124 .irq_mask = intel_gpio_irq_mask, 1107 .irq_mask = intel_gpio_irq_mask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 41ccc759b8b8..1425c2874d40 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
348 unsigned long flags; 348 unsigned long flags;
349 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 349 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
350 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 350 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
351 u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
352 351
353 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 352 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
354 pin_reg = readl(gpio_dev->base + (d->hwirq)*4); 353 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
355 pin_reg |= BIT(INTERRUPT_ENABLE_OFF); 354 pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
356 pin_reg |= BIT(INTERRUPT_MASK_OFF); 355 pin_reg |= BIT(INTERRUPT_MASK_OFF);
357 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 356 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
358 /*
359 * When debounce logic is enabled it takes ~900 us before interrupts
360 * can be enabled. During this "debounce warm up" period the
361 * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
362 * reads back as 1, signaling that interrupts are now enabled.
363 */
364 while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
365 continue;
366 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 357 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
367} 358}
368 359
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
426static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) 417static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
427{ 418{
428 int ret = 0; 419 int ret = 0;
429 u32 pin_reg; 420 u32 pin_reg, pin_reg_irq_en, mask;
430 unsigned long flags, irq_flags; 421 unsigned long flags, irq_flags;
431 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 422 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
432 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 423 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
495 } 486 }
496 487
497 pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; 488 pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
489 /*
490 * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
491 * debounce registers of any GPIO will block wake/interrupt status
492 * generation for *all* GPIOs for a lenght of time that depends on
493 * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
494 * INTERRUPT_ENABLE bit will read as 0.
495 *
496 * We temporarily enable irq for the GPIO whose configuration is
497 * changing, and then wait for it to read back as 1 to know when
498 * debounce has settled and then disable the irq again.
499 * We do this polling with the spinlock held to ensure other GPIO
500 * access routines do not read an incorrect value for the irq enable
501 * bit of other GPIOs. We keep the GPIO masked while polling to avoid
502 * spurious irqs, and disable the irq again after polling.
503 */
504 mask = BIT(INTERRUPT_ENABLE_OFF);
505 pin_reg_irq_en = pin_reg;
506 pin_reg_irq_en |= mask;
507 pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
508 writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
509 while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
510 continue;
498 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 511 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
499 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 512 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
500 513
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6a1b6058b991..628817c40e3b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
793 793
794 err = pinctrl_generic_add_group(jzpc->pctl, group->name, 794 err = pinctrl_generic_add_group(jzpc->pctl, group->name,
795 group->pins, group->num_pins, group->data); 795 group->pins, group->num_pins, group->data);
796 if (err) { 796 if (err < 0) {
797 dev_err(dev, "Failed to register group %s\n", 797 dev_err(dev, "Failed to register group %s\n",
798 group->name); 798 group->name);
799 return err; 799 return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
806 err = pinmux_generic_add_function(jzpc->pctl, func->name, 806 err = pinmux_generic_add_function(jzpc->pctl, func->name,
807 func->group_names, func->num_group_names, 807 func->group_names, func->num_group_names,
808 func->data); 808 func->data);
809 if (err) { 809 if (err < 0) {
810 dev_err(dev, "Failed to register function %s\n", 810 dev_err(dev, "Failed to register function %s\n",
811 func->name); 811 func->name);
812 return err; 812 return err;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2155a30c282b..5d72ffad32c2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
634 raw_spin_lock_irqsave(&pctrl->lock, flags); 634 raw_spin_lock_irqsave(&pctrl->lock, flags);
635 635
636 val = readl(pctrl->regs + g->intr_cfg_reg); 636 val = readl(pctrl->regs + g->intr_cfg_reg);
637 /*
638 * There are two bits that control interrupt forwarding to the CPU. The
639 * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
640 * latched into the interrupt status register when the hardware detects
641 * an irq that it's configured for (either edge for edge type or level
642 * for level type irq). The 'non-raw' status enable bit causes the
643 * hardware to assert the summary interrupt to the CPU if the latched
644 * status bit is set. There's a bug though, the edge detection logic
645 * seems to have a problem where toggling the RAW_STATUS_EN bit may
646 * cause the status bit to latch spuriously when there isn't any edge
647 * so we can't touch that bit for edge type irqs and we have to keep
648 * the bit set anyway so that edges are latched while the line is masked.
649 *
650 * To make matters more complicated, leaving the RAW_STATUS_EN bit
651 * enabled all the time causes level interrupts to re-latch into the
652 * status register because the level is still present on the line after
653 * we ack it. We clear the raw status enable bit during mask here and
654 * set the bit on unmask so the interrupt can't latch into the hardware
655 * while it's masked.
656 */
657 if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
658 val &= ~BIT(g->intr_raw_status_bit);
659
637 val &= ~BIT(g->intr_enable_bit); 660 val &= ~BIT(g->intr_enable_bit);
638 writel(val, pctrl->regs + g->intr_cfg_reg); 661 writel(val, pctrl->regs + g->intr_cfg_reg);
639 662
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
655 raw_spin_lock_irqsave(&pctrl->lock, flags); 678 raw_spin_lock_irqsave(&pctrl->lock, flags);
656 679
657 val = readl(pctrl->regs + g->intr_cfg_reg); 680 val = readl(pctrl->regs + g->intr_cfg_reg);
681 val |= BIT(g->intr_raw_status_bit);
658 val |= BIT(g->intr_enable_bit); 682 val |= BIT(g->intr_enable_bit);
659 writel(val, pctrl->regs + g->intr_cfg_reg); 683 writel(val, pctrl->regs + g->intr_cfg_reg);
660 684
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d975462a4c57..f10af5c383c5 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
536 if (obj && obj->type == ACPI_TYPE_INTEGER) 536 if (obj && obj->type == ACPI_TYPE_INTEGER)
537 *out_data = (u32) obj->integer.value; 537 *out_data = (u32) obj->integer.value;
538 } 538 }
539 kfree(output.pointer);
539 return status; 540 return status;
540 541
541} 542}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 88afe5651d24..cf2229ece9ff 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
78 dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", 78 dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
79 priv->buf->std.output[0], priv->buf->std.output[1], 79 priv->buf->std.output[0], priv->buf->std.output[1],
80 priv->buf->std.output[2], priv->buf->std.output[3]); 80 priv->buf->std.output[2], priv->buf->std.output[3]);
81 kfree(output.pointer);
81 82
82 return 0; 83 return 0;
83} 84}
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
index 0f8ac8dec3e1..a1bd8aaf4d98 100644
--- a/drivers/regulator/bd71837-regulator.c
+++ b/drivers/regulator/bd71837-regulator.c
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
569 BD71837_REG_REGLOCK); 569 BD71837_REG_REGLOCK);
570 } 570 }
571 571
572 /*
573 * There is a HW quirk in BD71837. The shutdown sequence timings for
574 * bucks/LDOs which are controlled via register interface are changed.
575 * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
576 * beginning of shut-down sequence. As bucks 6 and 7 are parent
577 * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
578 * monitoring to errorneously detect under voltage and force PMIC to
579 * emergency state instead of poweroff. In order to avoid this we
580 * disable voltage monitoring for LDO5 and LDO6
581 */
582 err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
583 BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
584 BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
585 if (err) {
586 dev_err(&pmic->pdev->dev,
587 "Failed to disable voltage monitoring\n");
588 goto err;
589 }
590
572 for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { 591 for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
573 592
574 struct regulator_desc *desc; 593 struct regulator_desc *desc;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bb1324f93143..9577d8941846 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
3161 if (!rstate->changeable) 3161 if (!rstate->changeable)
3162 return -EPERM; 3162 return -EPERM;
3163 3163
3164 rstate->enabled = en; 3164 rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
3165 3165
3166 return 0; 3166 return 0;
3167} 3167}
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
4395 !rdev->desc->fixed_uV) 4395 !rdev->desc->fixed_uV)
4396 rdev->is_switch = true; 4396 rdev->is_switch = true;
4397 4397
4398 dev_set_drvdata(&rdev->dev, rdev);
4398 ret = device_register(&rdev->dev); 4399 ret = device_register(&rdev->dev);
4399 if (ret != 0) { 4400 if (ret != 0) {
4400 put_device(&rdev->dev); 4401 put_device(&rdev->dev);
4401 goto unset_supplies; 4402 goto unset_supplies;
4402 } 4403 }
4403 4404
4404 dev_set_drvdata(&rdev->dev, rdev);
4405 rdev_init_debugfs(rdev); 4405 rdev_init_debugfs(rdev);
4406 4406
4407 /* try to resolve regulators supply since a new one was registered */ 4407 /* try to resolve regulators supply since a new one was registered */
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 638f17d4c848..210fc20f7de7 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
213 else if (of_property_read_bool(suspend_np, 213 else if (of_property_read_bool(suspend_np,
214 "regulator-off-in-suspend")) 214 "regulator-off-in-suspend"))
215 suspend_state->enabled = DISABLE_IN_SUSPEND; 215 suspend_state->enabled = DISABLE_IN_SUSPEND;
216 else
217 suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
218 216
219 if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", 217 if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
220 &pval)) 218 &pval))
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec891bc7d10a..f039266b275d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
872 if (bits & 0x07) 872 if (bits & 0x07)
873 return -EINVAL; 873 return -EINVAL;
874 874
875 memset(bitmap, 0, bits / 8);
876
877 if (str[0] == '0' && str[1] == 'x') 875 if (str[0] == '0' && str[1] == 'x')
878 str++; 876 str++;
879 if (*str == 'x') 877 if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
895} 893}
896 894
897/* 895/*
898 * str2clrsetmasks() - parse bitmask argument and set the clear and 896 * modify_bitmap() - parse bitmask argument and modify an existing
899 * the set bitmap mask. A concatenation (done with ',') of these terms 897 * bit mask accordingly. A concatenation (done with ',') of these
900 * is recognized: 898 * terms is recognized:
901 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>] 899 * +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
902 * <bitnr> may be any valid number (hex, decimal or octal) in the range 900 * <bitnr> may be any valid number (hex, decimal or octal) in the range
903 * 0...bits-1; the leading + or - is required. Here are some examples: 901 * 0...bits-1; the leading + or - is required. Here are some examples:
904 * +0-15,+32,-128,-0xFF 902 * +0-15,+32,-128,-0xFF
905 * -0-255,+1-16,+0x128 903 * -0-255,+1-16,+0x128
906 * +1,+2,+3,+4,-5,-7-10 904 * +1,+2,+3,+4,-5,-7-10
907 * Returns a clear and a set bitmask. Every positive value in the string 905 * Returns the new bitmap after all changes have been applied. Every
908 * results in a bit set in the set mask and every negative value in the 906 * positive value in the string will set a bit and every negative value
909 * string results in a bit SET in the clear mask. As a bit may be touched 907 * in the string will clear a bit. As a bit may be touched more than once,
910 * more than once, the last 'operation' wins: +0-255,-128 = all but bit 908 * the last 'operation' wins:
911 * 128 set in the set mask, only bit 128 set in the clear mask. 909 * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
910 * cleared again. All other bits are unmodified.
912 */ 911 */
913static int str2clrsetmasks(const char *str, 912static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
914 unsigned long *clrmap,
915 unsigned long *setmap,
916 int bits)
917{ 913{
918 int a, i, z; 914 int a, i, z;
919 char *np, sign; 915 char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
922 if (bits & 0x07) 918 if (bits & 0x07)
923 return -EINVAL; 919 return -EINVAL;
924 920
925 memset(clrmap, 0, bits / 8);
926 memset(setmap, 0, bits / 8);
927
928 while (*str) { 921 while (*str) {
929 sign = *str++; 922 sign = *str++;
930 if (sign != '+' && sign != '-') 923 if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
940 str = np; 933 str = np;
941 } 934 }
942 for (i = a; i <= z; i++) 935 for (i = a; i <= z; i++)
943 if (sign == '+') { 936 if (sign == '+')
944 set_bit_inv(i, setmap); 937 set_bit_inv(i, bitmap);
945 clear_bit_inv(i, clrmap); 938 else
946 } else { 939 clear_bit_inv(i, bitmap);
947 clear_bit_inv(i, setmap);
948 set_bit_inv(i, clrmap);
949 }
950 while (*str == ',' || *str == '\n') 940 while (*str == ',' || *str == '\n')
951 str++; 941 str++;
952 } 942 }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
970 unsigned long *bitmap, int bits, 960 unsigned long *bitmap, int bits,
971 struct mutex *lock) 961 struct mutex *lock)
972{ 962{
973 int i; 963 unsigned long *newmap, size;
964 int rc;
974 965
975 /* bits needs to be a multiple of 8 */ 966 /* bits needs to be a multiple of 8 */
976 if (bits & 0x07) 967 if (bits & 0x07)
977 return -EINVAL; 968 return -EINVAL;
978 969
970 size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
971 newmap = kmalloc(size, GFP_KERNEL);
972 if (!newmap)
973 return -ENOMEM;
974 if (mutex_lock_interruptible(lock)) {
975 kfree(newmap);
976 return -ERESTARTSYS;
977 }
978
979 if (*str == '+' || *str == '-') { 979 if (*str == '+' || *str == '-') {
980 DECLARE_BITMAP(clrm, bits); 980 memcpy(newmap, bitmap, size);
981 DECLARE_BITMAP(setm, bits); 981 rc = modify_bitmap(str, newmap, bits);
982
983 i = str2clrsetmasks(str, clrm, setm, bits);
984 if (i)
985 return i;
986 if (mutex_lock_interruptible(lock))
987 return -ERESTARTSYS;
988 for (i = 0; i < bits; i++) {
989 if (test_bit_inv(i, clrm))
990 clear_bit_inv(i, bitmap);
991 if (test_bit_inv(i, setm))
992 set_bit_inv(i, bitmap);
993 }
994 } else { 982 } else {
995 DECLARE_BITMAP(setm, bits); 983 memset(newmap, 0, size);
996 984 rc = hex2bitmap(str, newmap, bits);
997 i = hex2bitmap(str, setm, bits);
998 if (i)
999 return i;
1000 if (mutex_lock_interruptible(lock))
1001 return -ERESTARTSYS;
1002 for (i = 0; i < bits; i++)
1003 if (test_bit_inv(i, setm))
1004 set_bit_inv(i, bitmap);
1005 else
1006 clear_bit_inv(i, bitmap);
1007 } 985 }
986 if (rc == 0)
987 memcpy(bitmap, newmap, size);
1008 mutex_unlock(lock); 988 mutex_unlock(lock);
1009 989 kfree(newmap);
1010 return 0; 990 return rc;
1011} 991}
1012 992
1013/* 993/*
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 49f64eb3eab0..ffce6f39828a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -25,6 +25,7 @@
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/netdev_features.h> 26#include <linux/netdev_features.h>
27#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/vmalloc.h>
28 29
29#include <net/iucv/af_iucv.h> 30#include <net/iucv/af_iucv.h>
30#include <net/dsfield.h> 31#include <net/dsfield.h>
@@ -609,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
609static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, 610static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
610 struct qeth_card *card) 611 struct qeth_card *card)
611{ 612{
612 char *ipa_name; 613 const char *ipa_name;
613 int com = cmd->hdr.command; 614 int com = cmd->hdr.command;
614 ipa_name = qeth_get_ipa_cmd_name(com); 615 ipa_name = qeth_get_ipa_cmd_name(com);
615 if (rc) 616 if (rc)
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4699 4700
4700 priv.buffer_len = oat_data.buffer_len; 4701 priv.buffer_len = oat_data.buffer_len;
4701 priv.response_len = 0; 4702 priv.response_len = 0;
4702 priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL); 4703 priv.buffer = vzalloc(oat_data.buffer_len);
4703 if (!priv.buffer) { 4704 if (!priv.buffer) {
4704 rc = -ENOMEM; 4705 rc = -ENOMEM;
4705 goto out; 4706 goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4740 rc = -EFAULT; 4741 rc = -EFAULT;
4741 4742
4742out_free: 4743out_free:
4743 kfree(priv.buffer); 4744 vfree(priv.buffer);
4744out: 4745out:
4745 return rc; 4746 return rc;
4746} 4747}
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5706 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 5707 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5707 dev->hw_features |= NETIF_F_SG; 5708 dev->hw_features |= NETIF_F_SG;
5708 dev->vlan_features |= NETIF_F_SG; 5709 dev->vlan_features |= NETIF_F_SG;
5710 if (IS_IQD(card))
5711 dev->features |= NETIF_F_SG;
5709 } 5712 }
5710 5713
5711 return dev; 5714 return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5768 qeth_update_from_chp_desc(card); 5771 qeth_update_from_chp_desc(card);
5769 5772
5770 card->dev = qeth_alloc_netdev(card); 5773 card->dev = qeth_alloc_netdev(card);
5771 if (!card->dev) 5774 if (!card->dev) {
5775 rc = -ENOMEM;
5772 goto err_card; 5776 goto err_card;
5777 }
5773 5778
5774 qeth_determine_capabilities(card); 5779 qeth_determine_capabilities(card);
5775 enforced_disc = qeth_enforce_discipline(card); 5780 enforced_disc = qeth_enforce_discipline(card);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5bcb8dafc3ee..e891c0b52f4c 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
148 148
149struct ipa_rc_msg { 149struct ipa_rc_msg {
150 enum qeth_ipa_return_codes rc; 150 enum qeth_ipa_return_codes rc;
151 char *msg; 151 const char *msg;
152}; 152};
153 153
154static struct ipa_rc_msg qeth_ipa_rc_msg[] = { 154static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
155 {IPA_RC_SUCCESS, "success"}, 155 {IPA_RC_SUCCESS, "success"},
156 {IPA_RC_NOTSUPP, "Command not supported"}, 156 {IPA_RC_NOTSUPP, "Command not supported"},
157 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, 157 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
219 219
220 220
221 221
222char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) 222const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
223{ 223{
224 int x = 0; 224 int x;
225 qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) / 225
226 sizeof(struct ipa_rc_msg) - 1].rc = rc; 226 for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
227 while (qeth_ipa_rc_msg[x].rc != rc) 227 if (qeth_ipa_rc_msg[x].rc == rc)
228 x++; 228 return qeth_ipa_rc_msg[x].msg;
229 return qeth_ipa_rc_msg[x].msg; 229 return qeth_ipa_rc_msg[x].msg;
230} 230}
231 231
232 232
233struct ipa_cmd_names { 233struct ipa_cmd_names {
234 enum qeth_ipa_cmds cmd; 234 enum qeth_ipa_cmds cmd;
235 char *name; 235 const char *name;
236}; 236};
237 237
238static struct ipa_cmd_names qeth_ipa_cmd_names[] = { 238static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
239 {IPA_CMD_STARTLAN, "startlan"}, 239 {IPA_CMD_STARTLAN, "startlan"},
240 {IPA_CMD_STOPLAN, "stoplan"}, 240 {IPA_CMD_STOPLAN, "stoplan"},
241 {IPA_CMD_SETVMAC, "setvmac"}, 241 {IPA_CMD_SETVMAC, "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
267 {IPA_CMD_UNKNOWN, "unknown"}, 267 {IPA_CMD_UNKNOWN, "unknown"},
268}; 268};
269 269
270char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) 270const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
271{ 271{
272 int x = 0; 272 int x;
273 qeth_ipa_cmd_names[ 273
274 sizeof(qeth_ipa_cmd_names) / 274 for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
275 sizeof(struct ipa_cmd_names)-1].cmd = cmd; 275 if (qeth_ipa_cmd_names[x].cmd == cmd)
276 while (qeth_ipa_cmd_names[x].cmd != cmd) 276 return qeth_ipa_cmd_names[x].name;
277 x++;
278 return qeth_ipa_cmd_names[x].name; 277 return qeth_ipa_cmd_names[x].name;
279} 278}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index aa8b9196b089..aa5de1fe01e1 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
797 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, 797 QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
798}; 798};
799 799
800extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); 800extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
801extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); 801extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
802 802
803#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ 803#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
804 sizeof(struct qeth_ipacmd_setassparms_hdr)) 804 sizeof(struct qeth_ipacmd_setassparms_hdr))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 710fa74892ae..b5e38531733f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
423 default: 423 default:
424 dev_kfree_skb_any(skb); 424 dev_kfree_skb_any(skb);
425 QETH_CARD_TEXT(card, 3, "inbunkno"); 425 QETH_CARD_TEXT(card, 3, "inbunkno");
426 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 426 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
427 continue; 427 continue;
428 } 428 }
429 work_done++; 429 work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7175086677fb..ada258c01a08 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1390 default: 1390 default:
1391 dev_kfree_skb_any(skb); 1391 dev_kfree_skb_any(skb);
1392 QETH_CARD_TEXT(card, 3, "inbunkno"); 1392 QETH_CARD_TEXT(card, 3, "inbunkno");
1393 QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN); 1393 QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
1394 continue; 1394 continue;
1395 } 1395 }
1396 work_done++; 1396 work_done++;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8fc851a9e116..7c097006c54d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
52 default y 52 default y
53 depends on SCSI 53 depends on SCSI
54 ---help--- 54 ---help---
55 This option enables the new blk-mq based I/O path for SCSI 55 This option enables the blk-mq based I/O path for SCSI devices by
56 devices by default. With the option the scsi_mod.use_blk_mq 56 default. With this option the scsi_mod.use_blk_mq module/boot
57 module/boot option defaults to Y, without it to N, but it can 57 option defaults to Y, without it to N, but it can still be
58 still be overridden either way. 58 overridden either way.
59 59
60 If unsure say N. 60 If unsure say Y.
61 61
62config SCSI_PROC_FS 62config SCSI_PROC_FS
63 bool "legacy /proc/scsi/ support" 63 bool "legacy /proc/scsi/ support"
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 29bf1e60f542..39eb415987fc 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1346,7 +1346,7 @@ struct fib {
1346struct aac_hba_map_info { 1346struct aac_hba_map_info {
1347 __le32 rmw_nexus; /* nexus for native HBA devices */ 1347 __le32 rmw_nexus; /* nexus for native HBA devices */
1348 u8 devtype; /* device type */ 1348 u8 devtype; /* device type */
1349 u8 reset_state; /* 0 - no reset, 1..x - */ 1349 s8 reset_state; /* 0 - no reset, 1..x - */
1350 /* after xth TM LUN reset */ 1350 /* after xth TM LUN reset */
1351 u16 qd_limit; 1351 u16 qd_limit;
1352 u32 scan_counter; 1352 u32 scan_counter;
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 23d07e9f87d0..e51923886475 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
1602} 1602}
1603 1603
1604/** 1604/**
1605 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
1606 * @caps32: a 32-bit Port Capabilities value
1607 *
1608 * Returns the equivalent 16-bit Port Capabilities value. Note that
1609 * not all 32-bit Port Capabilities can be represented in the 16-bit
1610 * Port Capabilities and some fields/values may not make it.
1611 */
1612fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
1613{
1614 fw_port_cap16_t caps16 = 0;
1615
1616 #define CAP32_TO_CAP16(__cap) \
1617 do { \
1618 if (caps32 & FW_PORT_CAP32_##__cap) \
1619 caps16 |= FW_PORT_CAP_##__cap; \
1620 } while (0)
1621
1622 CAP32_TO_CAP16(SPEED_100M);
1623 CAP32_TO_CAP16(SPEED_1G);
1624 CAP32_TO_CAP16(SPEED_10G);
1625 CAP32_TO_CAP16(SPEED_25G);
1626 CAP32_TO_CAP16(SPEED_40G);
1627 CAP32_TO_CAP16(SPEED_100G);
1628 CAP32_TO_CAP16(FC_RX);
1629 CAP32_TO_CAP16(FC_TX);
1630 CAP32_TO_CAP16(802_3_PAUSE);
1631 CAP32_TO_CAP16(802_3_ASM_DIR);
1632 CAP32_TO_CAP16(ANEG);
1633 CAP32_TO_CAP16(FORCE_PAUSE);
1634 CAP32_TO_CAP16(MDIAUTO);
1635 CAP32_TO_CAP16(MDISTRAIGHT);
1636 CAP32_TO_CAP16(FEC_RS);
1637 CAP32_TO_CAP16(FEC_BASER_RS);
1638
1639 #undef CAP32_TO_CAP16
1640
1641 return caps16;
1642}
1643
1644/**
1605 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities 1645 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
1606 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value 1646 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
1607 * 1647 *
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
1759 val = 1; 1799 val = 1;
1760 1800
1761 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, 1801 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
1762 hw->pfn, 0, 1, &param, &val, false, 1802 hw->pfn, 0, 1, &param, &val, true,
1763 NULL); 1803 NULL);
1764 1804
1765 if (csio_mb_issue(hw, mbp)) { 1805 if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
1769 return -EINVAL; 1809 return -EINVAL;
1770 } 1810 }
1771 1811
1772 csio_mb_process_read_params_rsp(hw, mbp, &retval, 1, 1812 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1773 &val); 1813 0, NULL);
1774 if (retval != FW_SUCCESS) { 1814 fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
1775 csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
1776 portid, retval);
1777 mempool_free(mbp, hw->mb_mempool);
1778 return -EINVAL;
1779 }
1780
1781 fw_caps = val;
1782 } 1815 }
1783 1816
1784 /* Read PORT information */ 1817 /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
2364} 2397}
2365 2398
2366/* 2399/*
2367 * Returns -EINVAL if attempts to flash the firmware failed 2400 * Returns -EINVAL if attempts to flash the firmware failed,
2368 * else returns 0, 2401 * -ENOMEM if memory allocation failed else returns 0,
2369 * if flashing was not attempted because the card had the 2402 * if flashing was not attempted because the card had the
2370 * latest firmware ECANCELED is returned 2403 * latest firmware ECANCELED is returned
2371 */ 2404 */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2393 return -EINVAL; 2426 return -EINVAL;
2394 } 2427 }
2395 2428
2429 /* allocate memory to read the header of the firmware on the
2430 * card
2431 */
2432 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2433 if (!card_fw)
2434 return -ENOMEM;
2435
2396 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK)) 2436 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2397 fw_bin_file = FW_FNAME_T5; 2437 fw_bin_file = FW_FNAME_T5;
2398 else 2438 else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2406 fw_size = fw->size; 2446 fw_size = fw->size;
2407 } 2447 }
2408 2448
2409 /* allocate memory to read the header of the firmware on the
2410 * card
2411 */
2412 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2413
2414 /* upgrade FW logic */ 2449 /* upgrade FW logic */
2415 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw, 2450 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2416 hw->fw_state, reset); 2451 hw->fw_state, reset);
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9e73ef771eb7..e351af6e7c81 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
639 639
640fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps); 640fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
641fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16); 641fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
642fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
642fw_port_cap32_t lstatus_to_fwcap(u32 lstatus); 643fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
643 644
644int csio_hw_start(struct csio_hw *); 645int csio_hw_start(struct csio_hw *);
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index c026417269c3..6f13673d6aa0 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16)); 368 FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
369 369
370 if (fw_caps == FW_CAPS16) 370 if (fw_caps == FW_CAPS16)
371 cmdp->u.l1cfg.rcap = cpu_to_be32(fc); 371 cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
372 else 372 else
373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc); 373 cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
374} 374}
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap)); 395 *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap)); 396 *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
397 } else { 397 } else {
398 *pcaps = ntohs(rsp->u.info32.pcaps32); 398 *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
399 *acaps = ntohs(rsp->u.info32.acaps32); 399 *acaps = be32_to_cpu(rsp->u.info32.acaps32);
400 } 400 }
401 } 401 }
402} 402}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f02dcc875a09..ea4b0bb0c1cd 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
563} 563}
564EXPORT_SYMBOL(scsi_host_get); 564EXPORT_SYMBOL(scsi_host_get);
565 565
566struct scsi_host_mq_in_flight {
567 int cnt;
568};
569
570static void scsi_host_check_in_flight(struct request *rq, void *data,
571 bool reserved)
572{
573 struct scsi_host_mq_in_flight *in_flight = data;
574
575 if (blk_mq_request_started(rq))
576 in_flight->cnt++;
577}
578
579/** 566/**
580 * scsi_host_busy - Return the host busy counter 567 * scsi_host_busy - Return the host busy counter
581 * @shost: Pointer to Scsi_Host to inc. 568 * @shost: Pointer to Scsi_Host to inc.
582 **/ 569 **/
583int scsi_host_busy(struct Scsi_Host *shost) 570int scsi_host_busy(struct Scsi_Host *shost)
584{ 571{
585 struct scsi_host_mq_in_flight in_flight = { 572 return atomic_read(&shost->host_busy);
586 .cnt = 0,
587 };
588
589 if (!shost->use_blk_mq)
590 return atomic_read(&shost->host_busy);
591
592 blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
593 &in_flight);
594 return in_flight.cnt;
595} 573}
596EXPORT_SYMBOL(scsi_host_busy); 574EXPORT_SYMBOL(scsi_host_busy);
597 575
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 58bb70b886d7..c120929d4ffe 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
976#endif 976#endif
977 .sdev_attrs = hpsa_sdev_attrs, 977 .sdev_attrs = hpsa_sdev_attrs,
978 .shost_attrs = hpsa_shost_attrs, 978 .shost_attrs = hpsa_shost_attrs,
979 .max_sectors = 1024, 979 .max_sectors = 2048,
980 .no_write_same = 1, 980 .no_write_same = 1,
981}; 981};
982 982
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index fac377320158..f42a619198c4 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3474 vscsi->dds.window[LOCAL].liobn, 3474 vscsi->dds.window[LOCAL].liobn,
3475 vscsi->dds.window[REMOTE].liobn); 3475 vscsi->dds.window[REMOTE].liobn);
3476 3476
3477 strcpy(vscsi->eye, "VSCSI "); 3477 snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
3478 strncat(vscsi->eye, vdev->name, MAX_EYE);
3479 3478
3480 vscsi->dds.unit_id = vdev->unit_address; 3479 vscsi->dds.unit_id = vdev->unit_address;
3481 strncpy(vscsi->dds.partition_name, partition_name, 3480 strscpy(vscsi->dds.partition_name, partition_name,
3482 sizeof(vscsi->dds.partition_name)); 3481 sizeof(vscsi->dds.partition_name));
3483 vscsi->dds.partition_num = partition_number; 3482 vscsi->dds.partition_num = partition_number;
3484 3483
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f2ec80b0ffc0..271990bc065b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
3335 LEAVE; 3335 LEAVE;
3336} 3336}
3337 3337
3338static void ipr_add_remove_thread(struct work_struct *work)
3339{
3340 unsigned long lock_flags;
3341 struct ipr_resource_entry *res;
3342 struct scsi_device *sdev;
3343 struct ipr_ioa_cfg *ioa_cfg =
3344 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3345 u8 bus, target, lun;
3346 int did_work;
3347
3348 ENTER;
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
3351restart:
3352 do {
3353 did_work = 0;
3354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return;
3357 }
3358
3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360 if (res->del_from_ml && res->sdev) {
3361 did_work = 1;
3362 sdev = res->sdev;
3363 if (!scsi_device_get(sdev)) {
3364 if (!res->add_to_ml)
3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366 else
3367 res->del_from_ml = 0;
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 scsi_remove_device(sdev);
3370 scsi_device_put(sdev);
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 }
3373 break;
3374 }
3375 }
3376 } while (did_work);
3377
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379 if (res->add_to_ml) {
3380 bus = res->bus;
3381 target = res->target;
3382 lun = res->lun;
3383 res->add_to_ml = 0;
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 scsi_add_device(ioa_cfg->host, bus, target, lun);
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 goto restart;
3388 }
3389 }
3390
3391 ioa_cfg->scan_done = 1;
3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3394 LEAVE;
3395}
3396
3338/** 3397/**
3339 * ipr_worker_thread - Worker thread 3398 * ipr_worker_thread - Worker thread
3340 * @work: ioa config struct 3399 * @work: ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
3349static void ipr_worker_thread(struct work_struct *work) 3408static void ipr_worker_thread(struct work_struct *work)
3350{ 3409{
3351 unsigned long lock_flags; 3410 unsigned long lock_flags;
3352 struct ipr_resource_entry *res;
3353 struct scsi_device *sdev;
3354 struct ipr_dump *dump; 3411 struct ipr_dump *dump;
3355 struct ipr_ioa_cfg *ioa_cfg = 3412 struct ipr_ioa_cfg *ioa_cfg =
3356 container_of(work, struct ipr_ioa_cfg, work_q); 3413 container_of(work, struct ipr_ioa_cfg, work_q);
3357 u8 bus, target, lun;
3358 int did_work;
3359 3414
3360 ENTER; 3415 ENTER;
3361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
3393 return; 3448 return;
3394 } 3449 }
3395 3450
3396restart: 3451 schedule_work(&ioa_cfg->scsi_add_work_q);
3397 do {
3398 did_work = 0;
3399 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3401 return;
3402 }
3403 3452
3404 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3405 if (res->del_from_ml && res->sdev) {
3406 did_work = 1;
3407 sdev = res->sdev;
3408 if (!scsi_device_get(sdev)) {
3409 if (!res->add_to_ml)
3410 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3411 else
3412 res->del_from_ml = 0;
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 scsi_remove_device(sdev);
3415 scsi_device_put(sdev);
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417 }
3418 break;
3419 }
3420 }
3421 } while (did_work);
3422
3423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3424 if (res->add_to_ml) {
3425 bus = res->bus;
3426 target = res->target;
3427 lun = res->lun;
3428 res->add_to_ml = 0;
3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430 scsi_add_device(ioa_cfg->host, bus, target, lun);
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 goto restart;
3433 }
3434 }
3435
3436 ioa_cfg->scan_done = 1;
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3439 LEAVE; 3454 LEAVE;
3440} 3455}
3441 3456
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9933 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9948 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9934 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9949 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9935 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9950 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9951 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9936 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9952 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9937 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9953 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9938 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9954 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 68afbbde54d3..f6baa2351313 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
1575 u8 saved_mode_page_len; 1575 u8 saved_mode_page_len;
1576 1576
1577 struct work_struct work_q; 1577 struct work_struct work_q;
1578 struct work_struct scsi_add_work_q;
1578 struct workqueue_struct *reset_work_q; 1579 struct workqueue_struct *reset_work_q;
1579 1580
1580 wait_queue_head_t reset_wait_q; 1581 wait_queue_head_t reset_wait_q;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e0d0da5f43d6..43732e8d1347 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -672,7 +672,7 @@ struct lpfc_hba {
672#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 672#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
673#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */ 673#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
674#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */ 674#define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
675#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */ 675#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
676 676
677 uint32_t hba_flag; /* hba generic flags */ 677 uint32_t hba_flag; /* hba generic flags */
678#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ 678#define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5a25553415f8..1a6ed9b0a249 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
360 goto buffer_done; 360 goto buffer_done;
361 361
362 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 362 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
363 nrport = NULL;
364 spin_lock(&vport->phba->hbalock);
363 rport = lpfc_ndlp_get_nrport(ndlp); 365 rport = lpfc_ndlp_get_nrport(ndlp);
364 if (!rport) 366 if (rport)
365 continue; 367 nrport = rport->remoteport;
366 368 spin_unlock(&vport->phba->hbalock);
367 /* local short-hand pointer. */
368 nrport = rport->remoteport;
369 if (!nrport) 369 if (!nrport)
370 continue; 370 continue;
371 371
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3386 struct lpfc_nodelist *ndlp; 3386 struct lpfc_nodelist *ndlp;
3387#if (IS_ENABLED(CONFIG_NVME_FC)) 3387#if (IS_ENABLED(CONFIG_NVME_FC))
3388 struct lpfc_nvme_rport *rport; 3388 struct lpfc_nvme_rport *rport;
3389 struct nvme_fc_remote_port *remoteport = NULL;
3389#endif 3390#endif
3390 3391
3391 shost = lpfc_shost_from_vport(vport); 3392 shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3396 if (ndlp->rport) 3397 if (ndlp->rport)
3397 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3398 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3398#if (IS_ENABLED(CONFIG_NVME_FC)) 3399#if (IS_ENABLED(CONFIG_NVME_FC))
3400 spin_lock(&vport->phba->hbalock);
3399 rport = lpfc_ndlp_get_nrport(ndlp); 3401 rport = lpfc_ndlp_get_nrport(ndlp);
3400 if (rport) 3402 if (rport)
3403 remoteport = rport->remoteport;
3404 spin_unlock(&vport->phba->hbalock);
3405 if (remoteport)
3401 nvme_fc_set_remoteport_devloss(rport->remoteport, 3406 nvme_fc_set_remoteport_devloss(rport->remoteport,
3402 vport->cfg_devloss_tmo); 3407 vport->cfg_devloss_tmo);
3403#endif 3408#endif
@@ -5122,16 +5127,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5122 5127
5123/* 5128/*
5124# lpfc_fdmi_on: Controls FDMI support. 5129# lpfc_fdmi_on: Controls FDMI support.
5125# 0 No FDMI support (default) 5130# 0 No FDMI support
5126# 1 Traditional FDMI support 5131# 1 Traditional FDMI support (default)
5127# Traditional FDMI support means the driver will assume FDMI-2 support; 5132# Traditional FDMI support means the driver will assume FDMI-2 support;
5128# however, if that fails, it will fallback to FDMI-1. 5133# however, if that fails, it will fallback to FDMI-1.
5129# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on. 5134# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5130# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of 5135# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5131# lpfc_fdmi_on. 5136# lpfc_fdmi_on.
5132# Value range [0,1]. Default value is 0. 5137# Value range [0,1]. Default value is 1.
5133*/ 5138*/
5134LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support"); 5139LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5135 5140
5136/* 5141/*
5137# Specifies the maximum number of ELS cmds we can have outstanding (for 5142# Specifies the maximum number of ELS cmds we can have outstanding (for
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9df0c051349f..aec5b10a8c85 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
551 unsigned char *statep; 551 unsigned char *statep;
552 struct nvme_fc_local_port *localport; 552 struct nvme_fc_local_port *localport;
553 struct lpfc_nvmet_tgtport *tgtp; 553 struct lpfc_nvmet_tgtport *tgtp;
554 struct nvme_fc_remote_port *nrport; 554 struct nvme_fc_remote_port *nrport = NULL;
555 struct lpfc_nvme_rport *rport; 555 struct lpfc_nvme_rport *rport;
556 556
557 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); 557 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
696 len += snprintf(buf + len, size - len, "\tRport List:\n"); 696 len += snprintf(buf + len, size - len, "\tRport List:\n");
697 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 697 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
698 /* local short-hand pointer. */ 698 /* local short-hand pointer. */
699 spin_lock(&phba->hbalock);
699 rport = lpfc_ndlp_get_nrport(ndlp); 700 rport = lpfc_ndlp_get_nrport(ndlp);
700 if (!rport) 701 if (rport)
701 continue; 702 nrport = rport->remoteport;
702 703 spin_unlock(&phba->hbalock);
703 nrport = rport->remoteport;
704 if (!nrport) 704 if (!nrport)
705 continue; 705 continue;
706 706
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 028462e5994d..918ae18ef8a8 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2725 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2725 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2726 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2726 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2727 2727
2728 spin_lock_irq(&vport->phba->hbalock);
2728 oldrport = lpfc_ndlp_get_nrport(ndlp); 2729 oldrport = lpfc_ndlp_get_nrport(ndlp);
2730 spin_unlock_irq(&vport->phba->hbalock);
2729 if (!oldrport) 2731 if (!oldrport)
2730 lpfc_nlp_get(ndlp); 2732 lpfc_nlp_get(ndlp);
2731 2733
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2840 struct nvme_fc_local_port *localport; 2842 struct nvme_fc_local_port *localport;
2841 struct lpfc_nvme_lport *lport; 2843 struct lpfc_nvme_lport *lport;
2842 struct lpfc_nvme_rport *rport; 2844 struct lpfc_nvme_rport *rport;
2843 struct nvme_fc_remote_port *remoteport; 2845 struct nvme_fc_remote_port *remoteport = NULL;
2844 2846
2845 localport = vport->localport; 2847 localport = vport->localport;
2846 2848
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2854 if (!lport) 2856 if (!lport)
2855 goto input_err; 2857 goto input_err;
2856 2858
2859 spin_lock_irq(&vport->phba->hbalock);
2857 rport = lpfc_ndlp_get_nrport(ndlp); 2860 rport = lpfc_ndlp_get_nrport(ndlp);
2858 if (!rport) 2861 if (rport)
2862 remoteport = rport->remoteport;
2863 spin_unlock_irq(&vport->phba->hbalock);
2864 if (!remoteport)
2859 goto input_err; 2865 goto input_err;
2860 2866
2861 remoteport = rport->remoteport;
2862 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2867 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2863 "6033 Unreg nvme remoteport %p, portname x%llx, " 2868 "6033 Unreg nvme remoteport %p, portname x%llx, "
2864 "port_id x%06x, portstate x%x port type x%x\n", 2869 "port_id x%06x, portstate x%x port type x%x\n",
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index fc3babc15fa3..a6f96b35e971 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
77 QEDI_NVM_TGT_SEC, 77 QEDI_NVM_TGT_SEC,
78}; 78};
79 79
80struct qedi_nvm_iscsi_image {
81 struct nvm_iscsi_cfg iscsi_cfg;
82 u32 crc;
83};
84
80struct qedi_uio_ctrl { 85struct qedi_uio_ctrl {
81 /* meta data */ 86 /* meta data */
82 u32 uio_hsi_version; 87 u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
294 void *bdq_pbl_list; 299 void *bdq_pbl_list;
295 dma_addr_t bdq_pbl_list_dma; 300 dma_addr_t bdq_pbl_list_dma;
296 u8 bdq_pbl_list_num_entries; 301 u8 bdq_pbl_list_num_entries;
297 struct nvm_iscsi_cfg *iscsi_cfg; 302 struct qedi_nvm_iscsi_image *iscsi_image;
298 dma_addr_t nvm_buf_dma; 303 dma_addr_t nvm_buf_dma;
299 void __iomem *bdq_primary_prod; 304 void __iomem *bdq_primary_prod;
300 void __iomem *bdq_secondary_prod; 305 void __iomem *bdq_secondary_prod;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index aa96bccb5a96..cc8e64dc65ad 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1346,23 +1346,26 @@ exit_setup_int:
1346 1346
1347static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1347static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1348{ 1348{
1349 if (qedi->iscsi_cfg) 1349 if (qedi->iscsi_image)
1350 dma_free_coherent(&qedi->pdev->dev, 1350 dma_free_coherent(&qedi->pdev->dev,
1351 sizeof(struct nvm_iscsi_cfg), 1351 sizeof(struct qedi_nvm_iscsi_image),
1352 qedi->iscsi_cfg, qedi->nvm_buf_dma); 1352 qedi->iscsi_image, qedi->nvm_buf_dma);
1353} 1353}
1354 1354
1355static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1355static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1356{ 1356{
1357 qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, 1357 struct qedi_nvm_iscsi_image nvm_image;
1358 sizeof(struct nvm_iscsi_cfg), 1358
1359 &qedi->nvm_buf_dma, GFP_KERNEL); 1359 qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
1360 if (!qedi->iscsi_cfg) { 1360 sizeof(nvm_image),
1361 &qedi->nvm_buf_dma,
1362 GFP_KERNEL);
1363 if (!qedi->iscsi_image) {
1361 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1364 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1362 return -ENOMEM; 1365 return -ENOMEM;
1363 } 1366 }
1364 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1367 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1365 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, 1368 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
1366 qedi->nvm_buf_dma); 1369 qedi->nvm_buf_dma);
1367 1370
1368 return 0; 1371 return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
1905 struct nvm_iscsi_block *block; 1908 struct nvm_iscsi_block *block;
1906 1909
1907 pf = qedi->dev_info.common.abs_pf_id; 1910 pf = qedi->dev_info.common.abs_pf_id;
1908 block = &qedi->iscsi_cfg->block[0]; 1911 block = &qedi->iscsi_image->iscsi_cfg.block[0];
1909 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { 1912 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
1910 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> 1913 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
1911 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; 1914 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
2194static int qedi_get_boot_info(struct qedi_ctx *qedi) 2197static int qedi_get_boot_info(struct qedi_ctx *qedi)
2195{ 2198{
2196 int ret = 1; 2199 int ret = 1;
2197 u16 len; 2200 struct qedi_nvm_iscsi_image nvm_image;
2198
2199 len = sizeof(struct nvm_iscsi_cfg);
2200 2201
2201 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 2202 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2202 "Get NVM iSCSI CFG image\n"); 2203 "Get NVM iSCSI CFG image\n");
2203 ret = qedi_ops->common->nvm_get_image(qedi->cdev, 2204 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2204 QED_NVM_IMAGE_ISCSI_CFG, 2205 QED_NVM_IMAGE_ISCSI_CFG,
2205 (char *)qedi->iscsi_cfg, len); 2206 (char *)qedi->iscsi_image,
2207 sizeof(nvm_image));
2206 if (ret) 2208 if (ret)
2207 QEDI_ERR(&qedi->dbg_ctx, 2209 QEDI_ERR(&qedi->dbg_ctx,
2208 "Could not get NVM image. ret = %d\n", ret); 2210 "Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index fecf96f0225c..199d3ba1916d 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -374,8 +374,8 @@ struct atio_from_isp {
374static inline int fcpcmd_is_corrupted(struct atio *atio) 374static inline int fcpcmd_is_corrupted(struct atio *atio)
375{ 375{
376 if (atio->entry_type == ATIO_TYPE7 && 376 if (atio->entry_type == ATIO_TYPE7 &&
377 (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < 377 ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
378 FCP_CMD_LENGTH_MIN)) 378 FCP_CMD_LENGTH_MIN))
379 return 1; 379 return 1;
380 else 380 else
381 return 0; 381 return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 0adfb3bce0fd..eb97d2dd3651 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
345 unsigned long flags; 345 unsigned long flags;
346 346
347 rcu_read_lock(); 347 rcu_read_lock();
348 if (!shost->use_blk_mq) 348 atomic_dec(&shost->host_busy);
349 atomic_dec(&shost->host_busy);
350 if (unlikely(scsi_host_in_recovery(shost))) { 349 if (unlikely(scsi_host_in_recovery(shost))) {
351 spin_lock_irqsave(shost->host_lock, flags); 350 spin_lock_irqsave(shost->host_lock, flags);
352 if (shost->host_failed || shost->host_eh_scheduled) 351 if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
445 444
446static inline bool scsi_host_is_busy(struct Scsi_Host *shost) 445static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
447{ 446{
448 /* 447 if (shost->can_queue > 0 &&
449 * blk-mq can handle host queue busy efficiently via host-wide driver
450 * tag allocation
451 */
452
453 if (!shost->use_blk_mq && shost->can_queue > 0 &&
454 atomic_read(&shost->host_busy) >= shost->can_queue) 448 atomic_read(&shost->host_busy) >= shost->can_queue)
455 return true; 449 return true;
456 if (atomic_read(&shost->host_blocked) > 0) 450 if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1606 if (scsi_host_in_recovery(shost)) 1600 if (scsi_host_in_recovery(shost))
1607 return 0; 1601 return 0;
1608 1602
1609 if (!shost->use_blk_mq) 1603 busy = atomic_inc_return(&shost->host_busy) - 1;
1610 busy = atomic_inc_return(&shost->host_busy) - 1;
1611 else
1612 busy = 0;
1613 if (atomic_read(&shost->host_blocked) > 0) { 1604 if (atomic_read(&shost->host_blocked) > 0) {
1614 if (busy) 1605 if (busy)
1615 goto starved; 1606 goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1625 "unblocking host at zero depth\n")); 1616 "unblocking host at zero depth\n"));
1626 } 1617 }
1627 1618
1628 if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) 1619 if (shost->can_queue > 0 && busy >= shost->can_queue)
1629 goto starved; 1620 goto starved;
1630 if (shost->host_self_blocked) 1621 if (shost->host_self_blocked)
1631 goto starved; 1622 goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1711 * with the locks as normal issue path does. 1702 * with the locks as normal issue path does.
1712 */ 1703 */
1713 atomic_inc(&sdev->device_busy); 1704 atomic_inc(&sdev->device_busy);
1714 1705 atomic_inc(&shost->host_busy);
1715 if (!shost->use_blk_mq)
1716 atomic_inc(&shost->host_busy);
1717 if (starget->can_queue > 0) 1706 if (starget->can_queue > 0)
1718 atomic_inc(&starget->target_busy); 1707 atomic_inc(&starget->target_busy);
1719 1708
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..4a57ffecc7e6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1276 case REQ_OP_ZONE_RESET: 1276 case REQ_OP_ZONE_RESET:
1277 return sd_zbc_setup_reset_cmnd(cmd); 1277 return sd_zbc_setup_reset_cmnd(cmd);
1278 default: 1278 default:
1279 BUG(); 1279 WARN_ON_ONCE(1);
1280 return BLKPREP_KILL;
1280 } 1281 }
1281} 1282}
1282 1283
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2959 if (rot == 1) { 2960 if (rot == 1) {
2960 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2961 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2961 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2962 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2963 } else {
2964 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2965 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2962 } 2966 }
2963 2967
2964 if (sdkp->device->type == TYPE_ZBC) { 2968 if (sdkp->device->type == TYPE_ZBC) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d5d2ca7fc4f..c55f38ec391c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7940 err = -ENOMEM; 7940 err = -ENOMEM;
7941 goto out_error; 7941 goto out_error;
7942 } 7942 }
7943
7944 /*
7945 * Do not use blk-mq at this time because blk-mq does not support
7946 * runtime pm.
7947 */
7948 host->use_blk_mq = false;
7949
7943 hba = shost_priv(host); 7950 hba = shost_priv(host);
7944 hba->host = host; 7951 hba->host = host;
7945 hba->dev = dev; 7952 hba->dev = dev;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index ecb22749df0b..8cc015183043 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2729{ 2729{
2730 unsigned long addr; 2730 unsigned long addr;
2731 2731
2732 if (!p)
2733 return -ENODEV;
2734
2732 addr = gen_pool_alloc(p, cnt); 2735 addr = gen_pool_alloc(p, cnt);
2733 if (!addr) 2736 if (!addr)
2734 return -ENOMEM; 2737 return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index c646d8713861..681f7d4b7724 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
626{ 626{
627 u32 shift; 627 u32 shift;
628 628
629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE; 629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
630 shift -= tdm_num * 2; 630 shift -= tdm_num * 2;
631 631
632 return shift; 632 return shift;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4b5e250e8615..e5c7e1ef6318 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
899 struct sdw_master_runtime *m_rt = stream->m_rt; 899 struct sdw_master_runtime *m_rt = stream->m_rt;
900 struct sdw_slave_runtime *s_rt, *_s_rt; 900 struct sdw_slave_runtime *s_rt, *_s_rt;
901 901
902 list_for_each_entry_safe(s_rt, _s_rt, 902 list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
903 &m_rt->slave_rt_list, m_rt_node) 903 sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
904 sdw_stream_remove_slave(s_rt->slave, stream); 904 sdw_release_slave_stream(s_rt->slave, stream);
905 }
905 906
906 list_del(&m_rt->bus_node); 907 list_del(&m_rt->bus_node);
907} 908}
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1112 "Master runtime config failed for stream:%s", 1113 "Master runtime config failed for stream:%s",
1113 stream->name); 1114 stream->name);
1114 ret = -ENOMEM; 1115 ret = -ENOMEM;
1115 goto error; 1116 goto unlock;
1116 } 1117 }
1117 1118
1118 ret = sdw_config_stream(bus->dev, stream, stream_config, false); 1119 ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1123 if (ret) 1124 if (ret)
1124 goto stream_error; 1125 goto stream_error;
1125 1126
1126 stream->state = SDW_STREAM_CONFIGURED; 1127 goto unlock;
1127 1128
1128stream_error: 1129stream_error:
1129 sdw_release_master_stream(stream); 1130 sdw_release_master_stream(stream);
1130error: 1131unlock:
1131 mutex_unlock(&bus->bus_lock); 1132 mutex_unlock(&bus->bus_lock);
1132 return ret; 1133 return ret;
1133} 1134}
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
1141 * @stream: SoundWire stream 1142 * @stream: SoundWire stream
1142 * @port_config: Port configuration for audio stream 1143 * @port_config: Port configuration for audio stream
1143 * @num_ports: Number of ports 1144 * @num_ports: Number of ports
1145 *
1146 * It is expected that Slave is added before adding Master
1147 * to the Stream.
1148 *
1144 */ 1149 */
1145int sdw_stream_add_slave(struct sdw_slave *slave, 1150int sdw_stream_add_slave(struct sdw_slave *slave,
1146 struct sdw_stream_config *stream_config, 1151 struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
1186 if (ret) 1191 if (ret)
1187 goto stream_error; 1192 goto stream_error;
1188 1193
1194 /*
1195 * Change stream state to CONFIGURED on first Slave add.
1196 * Bus is not aware of number of Slave(s) in a stream at this
1197 * point so cannot depend on all Slave(s) to be added in order to
1198 * change stream state to CONFIGURED.
1199 */
1189 stream->state = SDW_STREAM_CONFIGURED; 1200 stream->state = SDW_STREAM_CONFIGURED;
1190 goto error; 1201 goto error;
1191 1202
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
30 30
31#define DRIVER_NAME "fsl-dspi" 31#define DRIVER_NAME "fsl-dspi"
32 32
33#ifdef CONFIG_M5441x
34#define DSPI_FIFO_SIZE 16
35#else
33#define DSPI_FIFO_SIZE 4 36#define DSPI_FIFO_SIZE 4
37#endif
34#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) 38#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
35 39
36#define SPI_MCR 0x00 40#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
623static void dspi_eoq_write(struct fsl_dspi *dspi) 627static void dspi_eoq_write(struct fsl_dspi *dspi)
624{ 628{
625 int fifo_size = DSPI_FIFO_SIZE; 629 int fifo_size = DSPI_FIFO_SIZE;
630 u16 xfer_cmd = dspi->tx_cmd;
626 631
627 /* Fill TX FIFO with as many transfers as possible */ 632 /* Fill TX FIFO with as many transfers as possible */
628 while (dspi->len && fifo_size--) { 633 while (dspi->len && fifo_size--) {
634 dspi->tx_cmd = xfer_cmd;
629 /* Request EOQF for last transfer in FIFO */ 635 /* Request EOQF for last transfer in FIFO */
630 if (dspi->len == dspi->bytes_per_word || fifo_size == 0) 636 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
631 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 637 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0626e6e3ea0c..421bfc7dda67 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
300 *mflags |= SPI_MASTER_NO_RX; 300 *mflags |= SPI_MASTER_NO_RX;
301 301
302 spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); 302 spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
303 if (IS_ERR(spi_gpio->mosi)) 303 if (IS_ERR(spi_gpio->sck))
304 return PTR_ERR(spi_gpio->mosi); 304 return PTR_ERR(spi_gpio->sck);
305 305
306 for (i = 0; i < num_chipselects; i++) { 306 for (i = 0; i < num_chipselects; i++) {
307 spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", 307 spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 95dc4d78618d..b37de1d991d6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
598 598
599 ret = wait_event_interruptible_timeout(rspi->wait, 599 ret = wait_event_interruptible_timeout(rspi->wait,
600 rspi->dma_callbacked, HZ); 600 rspi->dma_callbacked, HZ);
601 if (ret > 0 && rspi->dma_callbacked) 601 if (ret > 0 && rspi->dma_callbacked) {
602 ret = 0; 602 ret = 0;
603 else if (!ret) { 603 } else {
604 dev_err(&rspi->master->dev, "DMA timeout\n"); 604 if (!ret) {
605 ret = -ETIMEDOUT; 605 dev_err(&rspi->master->dev, "DMA timeout\n");
606 ret = -ETIMEDOUT;
607 }
606 if (tx) 608 if (tx)
607 dmaengine_terminate_all(rspi->master->dma_tx); 609 dmaengine_terminate_all(rspi->master->dma_tx);
608 if (rx) 610 if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
1350 1352
1351MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1353MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1352 1354
1355#ifdef CONFIG_PM_SLEEP
1356static int rspi_suspend(struct device *dev)
1357{
1358 struct platform_device *pdev = to_platform_device(dev);
1359 struct rspi_data *rspi = platform_get_drvdata(pdev);
1360
1361 return spi_master_suspend(rspi->master);
1362}
1363
1364static int rspi_resume(struct device *dev)
1365{
1366 struct platform_device *pdev = to_platform_device(dev);
1367 struct rspi_data *rspi = platform_get_drvdata(pdev);
1368
1369 return spi_master_resume(rspi->master);
1370}
1371
1372static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
1373#define DEV_PM_OPS &rspi_pm_ops
1374#else
1375#define DEV_PM_OPS NULL
1376#endif /* CONFIG_PM_SLEEP */
1377
1353static struct platform_driver rspi_driver = { 1378static struct platform_driver rspi_driver = {
1354 .probe = rspi_probe, 1379 .probe = rspi_probe,
1355 .remove = rspi_remove, 1380 .remove = rspi_remove,
1356 .id_table = spi_driver_ids, 1381 .id_table = spi_driver_ids,
1357 .driver = { 1382 .driver = {
1358 .name = "renesas_spi", 1383 .name = "renesas_spi",
1384 .pm = DEV_PM_OPS,
1359 .of_match_table = of_match_ptr(rspi_of_match), 1385 .of_match_table = of_match_ptr(rspi_of_match),
1360 }, 1386 },
1361}; 1387};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 539d6d1a277a..101cd6aae2ea 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
397 397
398static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) 398static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
399{ 399{
400 sh_msiof_write(p, STR, sh_msiof_read(p, STR)); 400 sh_msiof_write(p, STR,
401 sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
401} 402}
402 403
403static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, 404static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
1426}; 1427};
1427MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1428MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1428 1429
1430#ifdef CONFIG_PM_SLEEP
1431static int sh_msiof_spi_suspend(struct device *dev)
1432{
1433 struct platform_device *pdev = to_platform_device(dev);
1434 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1435
1436 return spi_master_suspend(p->master);
1437}
1438
1439static int sh_msiof_spi_resume(struct device *dev)
1440{
1441 struct platform_device *pdev = to_platform_device(dev);
1442 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1443
1444 return spi_master_resume(p->master);
1445}
1446
1447static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
1448 sh_msiof_spi_resume);
1449#define DEV_PM_OPS &sh_msiof_spi_pm_ops
1450#else
1451#define DEV_PM_OPS NULL
1452#endif /* CONFIG_PM_SLEEP */
1453
1429static struct platform_driver sh_msiof_spi_drv = { 1454static struct platform_driver sh_msiof_spi_drv = {
1430 .probe = sh_msiof_spi_probe, 1455 .probe = sh_msiof_spi_probe,
1431 .remove = sh_msiof_spi_remove, 1456 .remove = sh_msiof_spi_remove,
1432 .id_table = spi_driver_ids, 1457 .id_table = spi_driver_ids,
1433 .driver = { 1458 .driver = {
1434 .name = "spi_sh_msiof", 1459 .name = "spi_sh_msiof",
1460 .pm = DEV_PM_OPS,
1435 .of_match_table = of_match_ptr(sh_msiof_match), 1461 .of_match_table = of_match_ptr(sh_msiof_match),
1436 }, 1462 },
1437}; 1463};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 6f7b946b5ced..1427f343b39a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
1063 goto exit_free_master; 1063 goto exit_free_master;
1064 } 1064 }
1065 1065
1066 /* disabled clock may cause interrupt storm upon request */
1067 tspi->clk = devm_clk_get(&pdev->dev, NULL);
1068 if (IS_ERR(tspi->clk)) {
1069 ret = PTR_ERR(tspi->clk);
1070 dev_err(&pdev->dev, "Can not get clock %d\n", ret);
1071 goto exit_free_master;
1072 }
1073 ret = clk_prepare(tspi->clk);
1074 if (ret < 0) {
1075 dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
1076 goto exit_free_master;
1077 }
1078 ret = clk_enable(tspi->clk);
1079 if (ret < 0) {
1080 dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
1081 goto exit_free_master;
1082 }
1083
1066 spi_irq = platform_get_irq(pdev, 0); 1084 spi_irq = platform_get_irq(pdev, 0);
1067 tspi->irq = spi_irq; 1085 tspi->irq = spi_irq;
1068 ret = request_threaded_irq(tspi->irq, tegra_slink_isr, 1086 ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
1071 if (ret < 0) { 1089 if (ret < 0) {
1072 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", 1090 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1073 tspi->irq); 1091 tspi->irq);
1074 goto exit_free_master; 1092 goto exit_clk_disable;
1075 }
1076
1077 tspi->clk = devm_clk_get(&pdev->dev, NULL);
1078 if (IS_ERR(tspi->clk)) {
1079 dev_err(&pdev->dev, "can not get clock\n");
1080 ret = PTR_ERR(tspi->clk);
1081 goto exit_free_irq;
1082 } 1093 }
1083 1094
1084 tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); 1095 tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
1138 tegra_slink_deinit_dma_param(tspi, true); 1149 tegra_slink_deinit_dma_param(tspi, true);
1139exit_free_irq: 1150exit_free_irq:
1140 free_irq(spi_irq, tspi); 1151 free_irq(spi_irq, tspi);
1152exit_clk_disable:
1153 clk_disable(tspi->clk);
1141exit_free_master: 1154exit_free_master:
1142 spi_master_put(master); 1155 spi_master_put(master);
1143 return ret; 1156 return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
1150 1163
1151 free_irq(tspi->irq, tspi); 1164 free_irq(tspi->irq, tspi);
1152 1165
1166 clk_disable(tspi->clk);
1167
1153 if (tspi->tx_dma_chan) 1168 if (tspi->tx_dma_chan)
1154 tegra_slink_deinit_dma_param(tspi, false); 1169 tegra_slink_deinit_dma_param(tspi, false);
1155 1170
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..9da0bc5a036c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
2143 */ 2143 */
2144 if (ctlr->num_chipselect == 0) 2144 if (ctlr->num_chipselect == 0)
2145 return -EINVAL; 2145 return -EINVAL;
2146 /* allocate dynamic bus number using Linux idr */ 2146 if (ctlr->bus_num >= 0) {
2147 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2147 /* devices with a fixed bus num must check-in with the num */
2148 mutex_lock(&board_lock);
2149 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2150 ctlr->bus_num + 1, GFP_KERNEL);
2151 mutex_unlock(&board_lock);
2152 if (WARN(id < 0, "couldn't get idr"))
2153 return id == -ENOSPC ? -EBUSY : id;
2154 ctlr->bus_num = id;
2155 } else if (ctlr->dev.of_node) {
2156 /* allocate dynamic bus number using Linux idr */
2148 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2157 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2149 if (id >= 0) { 2158 if (id >= 0) {
2150 ctlr->bus_num = id; 2159 ctlr->bus_num = id;
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f614934df1..663b755bf2fb 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -2,7 +2,7 @@
2 2
3config EROFS_FS 3config EROFS_FS
4 tristate "EROFS filesystem support" 4 tristate "EROFS filesystem support"
5 depends on BROKEN 5 depends on BLOCK
6 help 6 help
7 EROFS(Enhanced Read-Only File System) is a lightweight 7 EROFS(Enhanced Read-Only File System) is a lightweight
8 read-only file system with modern designs (eg. page-sized 8 read-only file system with modern designs (eg. page-sized
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 1aec509c805f..2df9768edac9 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
340 goto err_sbread; 340 goto err_sbread;
341 341
342 sb->s_magic = EROFS_SUPER_MAGIC; 342 sb->s_magic = EROFS_SUPER_MAGIC;
343 sb->s_flags |= MS_RDONLY | MS_NOATIME; 343 sb->s_flags |= SB_RDONLY | SB_NOATIME;
344 sb->s_maxbytes = MAX_LFS_FILESIZE; 344 sb->s_maxbytes = MAX_LFS_FILESIZE;
345 sb->s_time_gran = 1; 345 sb->s_time_gran = 1;
346 346
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
627{ 627{
628 BUG_ON(!sb_rdonly(sb)); 628 BUG_ON(!sb_rdonly(sb));
629 629
630 *flags |= MS_RDONLY; 630 *flags |= SB_RDONLY;
631 return 0; 631 return 0;
632} 632}
633 633
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
index 7e64c7e438f0..a9f4802bb6be 100644
--- a/drivers/staging/fbtft/TODO
+++ b/drivers/staging/fbtft/TODO
@@ -2,3 +2,7 @@
2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO 2 GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
3 lines from device tree, ACPI or board files, board files should 3 lines from device tree, ACPI or board files, board files should
4 use <linux/gpio/machine.h> 4 use <linux/gpio/machine.h>
5
6* convert all these over to drm_simple_display_pipe and submit for inclusion
7 into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
8 drivers anymore.
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
index 6ff8e01b04cc..5b1865f8af2d 100644
--- a/drivers/staging/gasket/TODO
+++ b/drivers/staging/gasket/TODO
@@ -1,9 +1,22 @@
1This is a list of things that need to be done to get this driver out of the 1This is a list of things that need to be done to get this driver out of the
2staging directory. 2staging directory.
3
4- Implement the gasket framework's functionality through UIO instead of
5 introducing a new user-space drivers framework that is quite similar.
6
7 UIO provides the necessary bits to implement user-space drivers. Meanwhile
8 the gasket APIs adds some extra conveniences like PCI BAR mapping, and
9 MSI interrupts. Add these features to the UIO subsystem, then re-implement
10 the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
11
3- Document sysfs files with Documentation/ABI/ entries. 12- Document sysfs files with Documentation/ABI/ entries.
13
4- Use misc interface instead of major number for driver version description. 14- Use misc interface instead of major number for driver version description.
15
5- Add descriptions of module_param's 16- Add descriptions of module_param's
17
6- apex_get_status() should actually check status. 18- apex_get_status() should actually check status.
19
7- "drivers" should never be dealing with "raw" sysfs calls or mess around with 20- "drivers" should never be dealing with "raw" sysfs calls or mess around with
8 kobjects at all. The driver core should handle all of this for you 21 kobjects at all. The driver core should handle all of this for you
9 automaically. There should not be a need for raw attribute macros. 22 automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
index f48e06a03cdb..9a58aaf72edd 100644
--- a/drivers/staging/media/mt9t031/Kconfig
+++ b/drivers/staging/media/mt9t031/Kconfig
@@ -1,9 +1,3 @@
1config SOC_CAMERA_IMX074
2 tristate "imx074 support (DEPRECATED)"
3 depends on SOC_CAMERA && I2C
4 help
5 This driver supports IMX074 cameras from Sony
6
7config SOC_CAMERA_MT9T031 1config SOC_CAMERA_MT9T031
8 tristate "mt9t031 support (DEPRECATED)" 2 tristate "mt9t031 support (DEPRECATED)"
9 depends on SOC_CAMERA && I2C 3 depends on SOC_CAMERA && I2C
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index da92c493f157..69cc508af1bc 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
59 ret = PTR_ERR(dev); 59 ret = PTR_ERR(dev);
60 goto err_drv_alloc; 60 goto err_drv_alloc;
61 } 61 }
62
63 ret = pci_enable_device(pdev);
64 if (ret)
65 goto err_pci_enable;
66
62 dev->pdev = pdev; 67 dev->pdev = pdev;
63 pci_set_drvdata(pdev, dev); 68 pci_set_drvdata(pdev, dev);
64 69
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
75 err_drv_dev_register: 80 err_drv_dev_register:
76 vbox_driver_unload(dev); 81 vbox_driver_unload(dev);
77 err_vbox_driver_load: 82 err_vbox_driver_load:
83 pci_disable_device(pdev);
84 err_pci_enable:
78 drm_dev_put(dev); 85 drm_dev_put(dev);
79 err_drv_alloc: 86 err_drv_alloc:
80 return ret; 87 return ret;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index a83eac8668d0..79836c8fb909 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
323 if (rc) 323 if (rc)
324 return rc; 324 return rc;
325 325
326 mutex_lock(&vbox->hw_mutex);
327 vbox_set_view(crtc);
328 vbox_do_modeset(crtc, &crtc->mode);
329 mutex_unlock(&vbox->hw_mutex);
330
326 spin_lock_irqsave(&drm->event_lock, flags); 331 spin_lock_irqsave(&drm->event_lock, flags);
327 332
328 if (event) 333 if (event)
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index f7b07c0b5ce2..ee7e26b886a5 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_WILC1000) += wilc1000.o
2 3
3ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ 4ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
4 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" 5 -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
11 wilc_wlan.o 12 wilc_wlan.o
12 13
13obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o 14obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
14wilc1000-sdio-objs += $(wilc1000-objs)
15wilc1000-sdio-objs += wilc_sdio.o 15wilc1000-sdio-objs += wilc_sdio.o
16 16
17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o 17obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
18wilc1000-spi-objs += $(wilc1000-objs)
19wilc1000-spi-objs += wilc_spi.o 18wilc1000-spi-objs += wilc_spi.o
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 01cf4bd2e192..3b8d237decbf 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
1038 } 1038 }
1039 1039
1040 kfree(wilc); 1040 kfree(wilc);
1041 wilc_debugfs_remove();
1042} 1041}
1042EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
1043 1043
1044static const struct net_device_ops wilc_netdev_ops = { 1044static const struct net_device_ops wilc_netdev_ops = {
1045 .ndo_init = mac_init_fn, 1045 .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1062 if (!wl) 1062 if (!wl)
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 1064
1065 wilc_debugfs_init();
1066 *wilc = wl; 1065 *wilc = wl;
1067 wl->io_type = io_type; 1066 wl->io_type = io_type;
1068 wl->hif_func = ops; 1067 wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
1124 1123
1125 return 0; 1124 return 0;
1126} 1125}
1126EXPORT_SYMBOL_GPL(wilc_netdev_init);
1127
1128MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index edc72876458d..8001df66b8c2 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
19 19
20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR) 20#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR); 21static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
22EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
22 23
23static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf, 24static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
24 size_t count, loff_t *ppos) 25 size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
87 }, 88 },
88}; 89};
89 90
90int wilc_debugfs_init(void) 91static int __init wilc_debugfs_init(void)
91{ 92{
92 int i; 93 int i;
93 struct wilc_debugfs_info_t *info; 94 struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
103 } 104 }
104 return 0; 105 return 0;
105} 106}
107module_init(wilc_debugfs_init);
106 108
107void wilc_debugfs_remove(void) 109static void __exit wilc_debugfs_remove(void)
108{ 110{
109 debugfs_remove_recursive(wilc_dir); 111 debugfs_remove_recursive(wilc_dir);
110} 112}
113module_exit(wilc_debugfs_remove);
111 114
112#endif 115#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 6787b6e9f124..8b184aa30d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0)); 417 wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0); 418 wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
419} 419}
420EXPORT_SYMBOL_GPL(chip_allow_sleep);
420 421
421void chip_wakeup(struct wilc *wilc) 422void chip_wakeup(struct wilc *wilc)
422{ 423{
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
471 } 472 }
472 chip_ps_state = CHIP_WAKEDUP; 473 chip_ps_state = CHIP_WAKEDUP;
473} 474}
475EXPORT_SYMBOL_GPL(chip_wakeup);
474 476
475void wilc_chip_sleep_manually(struct wilc *wilc) 477void wilc_chip_sleep_manually(struct wilc *wilc)
476{ 478{
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
484 chip_ps_state = CHIP_SLEEPING_MANUAL; 486 chip_ps_state = CHIP_SLEEPING_MANUAL;
485 release_bus(wilc, RELEASE_ONLY); 487 release_bus(wilc, RELEASE_ONLY);
486} 488}
489EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
487 490
488void host_wakeup_notify(struct wilc *wilc) 491void host_wakeup_notify(struct wilc *wilc)
489{ 492{
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
491 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1); 494 wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
492 release_bus(wilc, RELEASE_ONLY); 495 release_bus(wilc, RELEASE_ONLY);
493} 496}
497EXPORT_SYMBOL_GPL(host_wakeup_notify);
494 498
495void host_sleep_notify(struct wilc *wilc) 499void host_sleep_notify(struct wilc *wilc)
496{ 500{
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
498 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1); 502 wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
499 release_bus(wilc, RELEASE_ONLY); 503 release_bus(wilc, RELEASE_ONLY);
500} 504}
505EXPORT_SYMBOL_GPL(host_sleep_notify);
501 506
502int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count) 507int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
503{ 508{
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
871 876
872 release_bus(wilc, RELEASE_ALLOW_SLEEP); 877 release_bus(wilc, RELEASE_ALLOW_SLEEP);
873} 878}
879EXPORT_SYMBOL_GPL(wilc_handle_isr);
874 880
875int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, 881int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
876 u32 buffer_size) 882 u32 buffer_size)
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 00d13b153f80..b81a73b9bd67 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -831,6 +831,4 @@ struct wilc;
831int wilc_wlan_init(struct net_device *dev); 831int wilc_wlan_init(struct net_device *dev);
832u32 wilc_get_chipid(struct wilc *wilc, bool update); 832u32 wilc_get_chipid(struct wilc *wilc, bool update);
833 833
834int wilc_debugfs_init(void);
835void wilc_debugfs_remove(void);
836#endif 834#endif
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 768cce0ccb80..76a262674c8d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE); 207 ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
208 sgl->offset = sg_offset; 208 sgl->offset = sg_offset;
209 if (!ret) { 209 if (!ret) {
210 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n", 210 pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
211 __func__, 0, xferlen, sgcnt); 211 __func__, 0, xferlen, sgcnt);
212 goto rel_ppods; 212 goto rel_ppods;
213 } 213 }
214 214
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
250 250
251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length); 251 ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
252 if (ret < 0) { 252 if (ret < 0) {
253 pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n", 253 pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
254 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents); 254 csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
255 255
256 ttinfo->sgl = NULL; 256 ttinfo->sgl = NULL;
257 ttinfo->nents = 0; 257 ttinfo->nents = 0;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 94bad43c41ff..cc756a123fd8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1416 1416
1417 sg_init_table(sg, ARRAY_SIZE(sg)); 1417 sg_init_table(sg, ARRAY_SIZE(sg));
1418 sg_set_buf(sg, buf, payload_length); 1418 sg_set_buf(sg, buf, payload_length);
1419 sg_set_buf(sg + 1, pad_bytes, padding); 1419 if (padding)
1420 sg_set_buf(sg + 1, pad_bytes, padding);
1420 1421
1421 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); 1422 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1422 1423
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
3910static void iscsit_get_rx_pdu(struct iscsi_conn *conn) 3911static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3911{ 3912{
3912 int ret; 3913 int ret;
3913 u8 buffer[ISCSI_HDR_LEN], opcode; 3914 u8 *buffer, opcode;
3914 u32 checksum = 0, digest = 0; 3915 u32 checksum = 0, digest = 0;
3915 struct kvec iov; 3916 struct kvec iov;
3916 3917
3918 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
3919 if (!buffer)
3920 return;
3921
3917 while (!kthread_should_stop()) { 3922 while (!kthread_should_stop()) {
3918 /* 3923 /*
3919 * Ensure that both TX and RX per connection kthreads 3924 * Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3921 */ 3926 */
3922 iscsit_thread_check_cpumask(conn, current, 0); 3927 iscsit_thread_check_cpumask(conn, current, 0);
3923 3928
3924 memset(buffer, 0, ISCSI_HDR_LEN);
3925 memset(&iov, 0, sizeof(struct kvec)); 3929 memset(&iov, 0, sizeof(struct kvec));
3926 3930
3927 iov.iov_base = buffer; 3931 iov.iov_base = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3930 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3934 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3931 if (ret != ISCSI_HDR_LEN) { 3935 if (ret != ISCSI_HDR_LEN) {
3932 iscsit_rx_thread_wait_for_tcp(conn); 3936 iscsit_rx_thread_wait_for_tcp(conn);
3933 return; 3937 break;
3934 } 3938 }
3935 3939
3936 if (conn->conn_ops->HeaderDigest) { 3940 if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3940 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3944 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3941 if (ret != ISCSI_CRC_LEN) { 3945 if (ret != ISCSI_CRC_LEN) {
3942 iscsit_rx_thread_wait_for_tcp(conn); 3946 iscsit_rx_thread_wait_for_tcp(conn);
3943 return; 3947 break;
3944 } 3948 }
3945 3949
3946 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, 3950 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3964 } 3968 }
3965 3969
3966 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3970 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3967 return; 3971 break;
3968 3972
3969 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3973 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3970 3974
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3975 " while in Discovery Session, rejecting.\n", opcode); 3979 " while in Discovery Session, rejecting.\n", opcode);
3976 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 3980 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3977 buffer); 3981 buffer);
3978 return; 3982 break;
3979 } 3983 }
3980 3984
3981 ret = iscsi_target_rx_opcode(conn, buffer); 3985 ret = iscsi_target_rx_opcode(conn, buffer);
3982 if (ret < 0) 3986 if (ret < 0)
3983 return; 3987 break;
3984 } 3988 }
3989
3990 kfree(buffer);
3985} 3991}
3986 3992
3987int iscsi_target_rx_thread(void *arg) 3993int iscsi_target_rx_thread(void *arg)
@@ -4208,22 +4214,15 @@ int iscsit_close_connection(
4208 crypto_free_ahash(tfm); 4214 crypto_free_ahash(tfm);
4209 } 4215 }
4210 4216
4211 free_cpumask_var(conn->conn_cpumask);
4212
4213 kfree(conn->conn_ops);
4214 conn->conn_ops = NULL;
4215
4216 if (conn->sock) 4217 if (conn->sock)
4217 sock_release(conn->sock); 4218 sock_release(conn->sock);
4218 4219
4219 if (conn->conn_transport->iscsit_free_conn) 4220 if (conn->conn_transport->iscsit_free_conn)
4220 conn->conn_transport->iscsit_free_conn(conn); 4221 conn->conn_transport->iscsit_free_conn(conn);
4221 4222
4222 iscsit_put_transport(conn->conn_transport);
4223
4224 pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); 4223 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
4225 conn->conn_state = TARG_CONN_STATE_FREE; 4224 conn->conn_state = TARG_CONN_STATE_FREE;
4226 kfree(conn); 4225 iscsit_free_conn(conn);
4227 4226
4228 spin_lock_bh(&sess->conn_lock); 4227 spin_lock_bh(&sess->conn_lock);
4229 atomic_dec(&sess->nconn); 4228 atomic_dec(&sess->nconn);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 9518ffd8b8ba..4e680d753941 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
26#include "iscsi_target_nego.h" 26#include "iscsi_target_nego.h"
27#include "iscsi_target_auth.h" 27#include "iscsi_target_auth.h"
28 28
29static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
30{
31 int j = DIV_ROUND_UP(len, 2), rc;
32
33 rc = hex2bin(dst, src, j);
34 if (rc < 0)
35 pr_debug("CHAP string contains non hex digit symbols\n");
36
37 dst[j] = '\0';
38 return j;
39}
40
41static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
42{
43 int i;
44
45 for (i = 0; i < src_len; i++) {
46 sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
47 }
48}
49
50static int chap_gen_challenge( 29static int chap_gen_challenge(
51 struct iscsi_conn *conn, 30 struct iscsi_conn *conn,
52 int caller, 31 int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
62 ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); 41 ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
63 if (unlikely(ret)) 42 if (unlikely(ret))
64 return ret; 43 return ret;
65 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, 44 bin2hex(challenge_asciihex, chap->challenge,
66 CHAP_CHALLENGE_LENGTH); 45 CHAP_CHALLENGE_LENGTH);
67 /* 46 /*
68 * Set CHAP_C, and copy the generated challenge into c_str. 47 * Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
248 pr_err("Could not find CHAP_R.\n"); 227 pr_err("Could not find CHAP_R.\n");
249 goto out; 228 goto out;
250 } 229 }
230 if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
231 pr_err("Malformed CHAP_R\n");
232 goto out;
233 }
234 if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
235 pr_err("Malformed CHAP_R\n");
236 goto out;
237 }
251 238
252 pr_debug("[server] Got CHAP_R=%s\n", chap_r); 239 pr_debug("[server] Got CHAP_R=%s\n", chap_r);
253 chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
254 240
255 tfm = crypto_alloc_shash("md5", 0, 0); 241 tfm = crypto_alloc_shash("md5", 0, 0);
256 if (IS_ERR(tfm)) { 242 if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
294 goto out; 280 goto out;
295 } 281 }
296 282
297 chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); 283 bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
298 pr_debug("[server] MD5 Server Digest: %s\n", response); 284 pr_debug("[server] MD5 Server Digest: %s\n", response);
299 285
300 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { 286 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
349 pr_err("Could not find CHAP_C.\n"); 335 pr_err("Could not find CHAP_C.\n");
350 goto out; 336 goto out;
351 } 337 }
352 pr_debug("[server] Got CHAP_C=%s\n", challenge); 338 challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
353 challenge_len = chap_string_to_hex(challenge_binhex, challenge,
354 strlen(challenge));
355 if (!challenge_len) { 339 if (!challenge_len) {
356 pr_err("Unable to convert incoming challenge\n"); 340 pr_err("Unable to convert incoming challenge\n");
357 goto out; 341 goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
360 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); 344 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
361 goto out; 345 goto out;
362 } 346 }
347 if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
348 pr_err("Malformed CHAP_C\n");
349 goto out;
350 }
351 pr_debug("[server] Got CHAP_C=%s\n", challenge);
363 /* 352 /*
364 * During mutual authentication, the CHAP_C generated by the 353 * During mutual authentication, the CHAP_C generated by the
365 * initiator must not match the original CHAP_C generated by 354 * initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
413 /* 402 /*
414 * Convert response from binary hex to ascii hext. 403 * Convert response from binary hex to ascii hext.
415 */ 404 */
416 chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); 405 bin2hex(response, digest, MD5_SIGNATURE_SIZE);
417 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", 406 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
418 response); 407 response);
419 *nr_out_len += 1; 408 *nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 9e74f8bc2963..bb90c80ff388 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
67 goto out_req_buf; 67 goto out_req_buf;
68 } 68 }
69 69
70 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
71 if (!conn->conn_ops) {
72 pr_err("Unable to allocate memory for"
73 " struct iscsi_conn_ops.\n");
74 goto out_rsp_buf;
75 }
76
77 init_waitqueue_head(&conn->queues_wq);
78 INIT_LIST_HEAD(&conn->conn_list);
79 INIT_LIST_HEAD(&conn->conn_cmd_list);
80 INIT_LIST_HEAD(&conn->immed_queue_list);
81 INIT_LIST_HEAD(&conn->response_queue_list);
82 init_completion(&conn->conn_post_wait_comp);
83 init_completion(&conn->conn_wait_comp);
84 init_completion(&conn->conn_wait_rcfr_comp);
85 init_completion(&conn->conn_waiting_on_uc_comp);
86 init_completion(&conn->conn_logout_comp);
87 init_completion(&conn->rx_half_close_comp);
88 init_completion(&conn->tx_half_close_comp);
89 init_completion(&conn->rx_login_comp);
90 spin_lock_init(&conn->cmd_lock);
91 spin_lock_init(&conn->conn_usage_lock);
92 spin_lock_init(&conn->immed_queue_lock);
93 spin_lock_init(&conn->nopin_timer_lock);
94 spin_lock_init(&conn->response_queue_lock);
95 spin_lock_init(&conn->state_lock);
96
97 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
98 pr_err("Unable to allocate conn->conn_cpumask\n");
99 goto out_conn_ops;
100 }
101 conn->conn_login = login; 70 conn->conn_login = login;
102 71
103 return login; 72 return login;
104 73
105out_conn_ops:
106 kfree(conn->conn_ops);
107out_rsp_buf:
108 kfree(login->rsp_buf);
109out_req_buf: 74out_req_buf:
110 kfree(login->req_buf); 75 kfree(login->req_buf);
111out_login: 76out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
310 return -ENOMEM; 275 return -ENOMEM;
311 } 276 }
312 277
313 ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); 278 if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
314 if (unlikely(ret)) { 279 goto free_sess;
315 kfree(sess); 280
316 return ret;
317 }
318 sess->init_task_tag = pdu->itt; 281 sess->init_task_tag = pdu->itt;
319 memcpy(&sess->isid, pdu->isid, 6); 282 memcpy(&sess->isid, pdu->isid, 6);
320 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); 283 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
@@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
1149 return 0; 1112 return 0;
1150} 1113}
1151 1114
1115static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
1116{
1117 struct iscsi_conn *conn;
1118
1119 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
1120 if (!conn) {
1121 pr_err("Could not allocate memory for new connection\n");
1122 return NULL;
1123 }
1124 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
1125 conn->conn_state = TARG_CONN_STATE_FREE;
1126
1127 init_waitqueue_head(&conn->queues_wq);
1128 INIT_LIST_HEAD(&conn->conn_list);
1129 INIT_LIST_HEAD(&conn->conn_cmd_list);
1130 INIT_LIST_HEAD(&conn->immed_queue_list);
1131 INIT_LIST_HEAD(&conn->response_queue_list);
1132 init_completion(&conn->conn_post_wait_comp);
1133 init_completion(&conn->conn_wait_comp);
1134 init_completion(&conn->conn_wait_rcfr_comp);
1135 init_completion(&conn->conn_waiting_on_uc_comp);
1136 init_completion(&conn->conn_logout_comp);
1137 init_completion(&conn->rx_half_close_comp);
1138 init_completion(&conn->tx_half_close_comp);
1139 init_completion(&conn->rx_login_comp);
1140 spin_lock_init(&conn->cmd_lock);
1141 spin_lock_init(&conn->conn_usage_lock);
1142 spin_lock_init(&conn->immed_queue_lock);
1143 spin_lock_init(&conn->nopin_timer_lock);
1144 spin_lock_init(&conn->response_queue_lock);
1145 spin_lock_init(&conn->state_lock);
1146
1147 timer_setup(&conn->nopin_response_timer,
1148 iscsit_handle_nopin_response_timeout, 0);
1149 timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
1150
1151 if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
1152 goto free_conn;
1153
1154 conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
1155 if (!conn->conn_ops) {
1156 pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
1157 goto put_transport;
1158 }
1159
1160 if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
1161 pr_err("Unable to allocate conn->conn_cpumask\n");
1162 goto free_mask;
1163 }
1164
1165 return conn;
1166
1167free_mask:
1168 free_cpumask_var(conn->conn_cpumask);
1169put_transport:
1170 iscsit_put_transport(conn->conn_transport);
1171free_conn:
1172 kfree(conn);
1173 return NULL;
1174}
1175
1176void iscsit_free_conn(struct iscsi_conn *conn)
1177{
1178 free_cpumask_var(conn->conn_cpumask);
1179 kfree(conn->conn_ops);
1180 iscsit_put_transport(conn->conn_transport);
1181 kfree(conn);
1182}
1183
1152void iscsi_target_login_sess_out(struct iscsi_conn *conn, 1184void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1153 struct iscsi_np *np, bool zero_tsih, bool new_sess) 1185 struct iscsi_np *np, bool zero_tsih, bool new_sess)
1154{ 1186{
@@ -1198,10 +1230,6 @@ old_sess_out:
1198 crypto_free_ahash(tfm); 1230 crypto_free_ahash(tfm);
1199 } 1231 }
1200 1232
1201 free_cpumask_var(conn->conn_cpumask);
1202
1203 kfree(conn->conn_ops);
1204
1205 if (conn->param_list) { 1233 if (conn->param_list) {
1206 iscsi_release_param_list(conn->param_list); 1234 iscsi_release_param_list(conn->param_list);
1207 conn->param_list = NULL; 1235 conn->param_list = NULL;
@@ -1219,8 +1247,7 @@ old_sess_out:
1219 if (conn->conn_transport->iscsit_free_conn) 1247 if (conn->conn_transport->iscsit_free_conn)
1220 conn->conn_transport->iscsit_free_conn(conn); 1248 conn->conn_transport->iscsit_free_conn(conn);
1221 1249
1222 iscsit_put_transport(conn->conn_transport); 1250 iscsit_free_conn(conn);
1223 kfree(conn);
1224} 1251}
1225 1252
1226static int __iscsi_target_login_thread(struct iscsi_np *np) 1253static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1250 } 1277 }
1251 spin_unlock_bh(&np->np_thread_lock); 1278 spin_unlock_bh(&np->np_thread_lock);
1252 1279
1253 conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL); 1280 conn = iscsit_alloc_conn(np);
1254 if (!conn) { 1281 if (!conn) {
1255 pr_err("Could not allocate memory for"
1256 " new connection\n");
1257 /* Get another socket */ 1282 /* Get another socket */
1258 return 1; 1283 return 1;
1259 } 1284 }
1260 pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
1261 conn->conn_state = TARG_CONN_STATE_FREE;
1262
1263 timer_setup(&conn->nopin_response_timer,
1264 iscsit_handle_nopin_response_timeout, 0);
1265 timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
1266
1267 if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
1268 kfree(conn);
1269 return 1;
1270 }
1271 1285
1272 rc = np->np_transport->iscsit_accept_np(np, conn); 1286 rc = np->np_transport->iscsit_accept_np(np, conn);
1273 if (rc == -ENOSYS) { 1287 if (rc == -ENOSYS) {
1274 complete(&np->np_restart_comp); 1288 complete(&np->np_restart_comp);
1275 iscsit_put_transport(conn->conn_transport); 1289 iscsit_free_conn(conn);
1276 kfree(conn);
1277 conn = NULL;
1278 goto exit; 1290 goto exit;
1279 } else if (rc < 0) { 1291 } else if (rc < 0) {
1280 spin_lock_bh(&np->np_thread_lock); 1292 spin_lock_bh(&np->np_thread_lock);
@@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1282 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1294 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1283 spin_unlock_bh(&np->np_thread_lock); 1295 spin_unlock_bh(&np->np_thread_lock);
1284 complete(&np->np_restart_comp); 1296 complete(&np->np_restart_comp);
1285 iscsit_put_transport(conn->conn_transport); 1297 iscsit_free_conn(conn);
1286 kfree(conn);
1287 conn = NULL;
1288 /* Get another socket */ 1298 /* Get another socket */
1289 return 1; 1299 return 1;
1290 } 1300 }
1291 spin_unlock_bh(&np->np_thread_lock); 1301 spin_unlock_bh(&np->np_thread_lock);
1292 iscsit_put_transport(conn->conn_transport); 1302 iscsit_free_conn(conn);
1293 kfree(conn); 1303 return 1;
1294 conn = NULL;
1295 goto out;
1296 } 1304 }
1297 /* 1305 /*
1298 * Perform the remaining iSCSI connection initialization items.. 1306 * Perform the remaining iSCSI connection initialization items..
@@ -1442,7 +1450,6 @@ old_sess_out:
1442 tpg_np = NULL; 1450 tpg_np = NULL;
1443 } 1451 }
1444 1452
1445out:
1446 return 1; 1453 return 1;
1447 1454
1448exit: 1455exit:
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 74ac3abc44a0..3b8e3639ff5d 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
19extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); 19extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
20extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 20extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
21extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 21extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
22extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 22extern void iscsit_free_conn(struct iscsi_conn *);
23extern int iscsit_start_kthreads(struct iscsi_conn *); 23extern int iscsit_start_kthreads(struct iscsi_conn *);
24extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 24extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
25extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 25extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 977a8307fbb1..4f2816559205 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
260 260
261 mutex_lock(&tz->lock); 261 mutex_lock(&tz->lock);
262 262
263 if (mode == THERMAL_DEVICE_ENABLED) 263 if (mode == THERMAL_DEVICE_ENABLED) {
264 tz->polling_delay = data->polling_delay; 264 tz->polling_delay = data->polling_delay;
265 else 265 tz->passive_delay = data->passive_delay;
266 } else {
266 tz->polling_delay = 0; 267 tz->polling_delay = 0;
268 tz->passive_delay = 0;
269 }
267 270
268 mutex_unlock(&tz->lock); 271 mutex_unlock(&tz->lock);
269 272
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index c866cc165960..450ed66edf58 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -1,16 +1,6 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright 2016 Freescale Semiconductor, Inc. 2//
3 * 3// Copyright 2016 Freescale Semiconductor, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14 4
15#include <linux/module.h> 5#include <linux/module.h>
16#include <linux/platform_device.h> 6#include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
197 int ret; 187 int ret;
198 struct qoriq_tmu_data *data; 188 struct qoriq_tmu_data *data;
199 struct device_node *np = pdev->dev.of_node; 189 struct device_node *np = pdev->dev.of_node;
200 u32 site = 0; 190 u32 site;
201 191
202 if (!np) { 192 if (!np) {
203 dev_err(&pdev->dev, "Device OF-Node is NULL"); 193 dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
233 if (ret < 0) 223 if (ret < 0)
234 goto err_tmu; 224 goto err_tmu;
235 225
236 data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id, 226 data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
237 data, &tmu_tz_ops); 227 data->sensor_id,
228 data, &tmu_tz_ops);
238 if (IS_ERR(data->tz)) { 229 if (IS_ERR(data->tz)) {
239 ret = PTR_ERR(data->tz); 230 ret = PTR_ERR(data->tz);
240 dev_err(&pdev->dev, 231 dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
243 } 234 }
244 235
245 /* Enable monitoring */ 236 /* Enable monitoring */
246 site |= 0x1 << (15 - data->sensor_id); 237 site = 0x1 << (15 - data->sensor_id);
247 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); 238 tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
248 239
249 return 0; 240 return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
261{ 252{
262 struct qoriq_tmu_data *data = platform_get_drvdata(pdev); 253 struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
263 254
264 thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
265
266 /* Disable monitoring */ 255 /* Disable monitoring */
267 tmu_write(data, TMR_DISABLE, &data->regs->tmr); 256 tmu_write(data, TMR_DISABLE, &data->regs->tmr);
268 257
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 766521eb7071..7aed5337bdd3 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -1,19 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * R-Car Gen3 THS thermal sensor driver 3 * R-Car Gen3 THS thermal sensor driver
3 * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen. 4 * Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
4 * 5 *
5 * Copyright (C) 2016 Renesas Electronics Corporation. 6 * Copyright (C) 2016 Renesas Electronics Corporation.
6 * Copyright (C) 2016 Sang Engineering 7 * Copyright (C) 2016 Sang Engineering
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 */ 8 */
18#include <linux/delay.h> 9#include <linux/delay.h>
19#include <linux/err.h> 10#include <linux/err.h>
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index e77e63070e99..78f932822d38 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -1,21 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * R-Car THS/TSC thermal sensor driver 3 * R-Car THS/TSC thermal sensor driver
3 * 4 *
4 * Copyright (C) 2012 Renesas Solutions Corp. 5 * Copyright (C) 2012 Renesas Solutions Corp.
5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
19 */ 7 */
20#include <linux/delay.h> 8#include <linux/delay.h>
21#include <linux/err.h> 9#include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
660}; 648};
661module_platform_driver(rcar_thermal_driver); 649module_platform_driver(rcar_thermal_driver);
662 650
663MODULE_LICENSE("GPL"); 651MODULE_LICENSE("GPL v2");
664MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver"); 652MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
665MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); 653MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 5414c4a87bea..27284a2dcd2b 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
522 return -EIO; 522 return -EIO;
523 523
524 while (count > 0) { 524 while (count > 0) {
525 int ret = 0;
526
525 spin_lock_irqsave(&hp->lock, flags); 527 spin_lock_irqsave(&hp->lock, flags);
526 528
527 rsize = hp->outbuf_size - hp->n_outbuf; 529 rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
537 } 539 }
538 540
539 if (hp->n_outbuf > 0) 541 if (hp->n_outbuf > 0)
540 hvc_push(hp); 542 ret = hvc_push(hp);
541 543
542 spin_unlock_irqrestore(&hp->lock, flags); 544 spin_unlock_irqrestore(&hp->lock, flags);
543 545
546 if (!ret)
547 break;
548
544 if (count) { 549 if (count) {
545 if (hp->n_outbuf > 0) 550 if (hp->n_outbuf > 0)
546 hvc_flush(hp); 551 hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
623#define MAX_TIMEOUT (2000) 628#define MAX_TIMEOUT (2000)
624static u32 timeout = MIN_TIMEOUT; 629static u32 timeout = MIN_TIMEOUT;
625 630
631/*
632 * Maximum number of bytes to get from the console driver if hvc_poll is
633 * called from driver (and can't sleep). Any more than this and we break
634 * and start polling with khvcd. This value was derived from from an OpenBMC
635 * console with the OPAL driver that results in about 0.25ms interrupts off
636 * latency.
637 */
638#define HVC_ATOMIC_READ_MAX 128
639
626#define HVC_POLL_READ 0x00000001 640#define HVC_POLL_READ 0x00000001
627#define HVC_POLL_WRITE 0x00000002 641#define HVC_POLL_WRITE 0x00000002
628 642
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
669 if (!hp->irq_requested) 683 if (!hp->irq_requested)
670 poll_mask |= HVC_POLL_READ; 684 poll_mask |= HVC_POLL_READ;
671 685
686 read_again:
672 /* Read data if any */ 687 /* Read data if any */
673
674 count = tty_buffer_request_room(&hp->port, N_INBUF); 688 count = tty_buffer_request_room(&hp->port, N_INBUF);
675 689
676 /* If flip is full, just reschedule a later read */ 690 /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
717#endif /* CONFIG_MAGIC_SYSRQ */ 731#endif /* CONFIG_MAGIC_SYSRQ */
718 tty_insert_flip_char(&hp->port, buf[i], 0); 732 tty_insert_flip_char(&hp->port, buf[i], 0);
719 } 733 }
720 if (n == count) 734 read_total += n;
721 poll_mask |= HVC_POLL_READ; 735
722 read_total = n; 736 if (may_sleep) {
737 /* Keep going until the flip is full */
738 spin_unlock_irqrestore(&hp->lock, flags);
739 cond_resched();
740 spin_lock_irqsave(&hp->lock, flags);
741 goto read_again;
742 } else if (read_total < HVC_ATOMIC_READ_MAX) {
743 /* Break and defer if it's a large read in atomic */
744 goto read_again;
745 }
746
747 /*
748 * Latency break, schedule another poll immediately.
749 */
750 poll_mask |= HVC_POLL_READ;
723 751
724 out: 752 out:
725 /* Wakeup write queue if necessary */ 753 /* Wakeup write queue if necessary */
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 24a5f05e769b..e5389591bb4f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
1054 /* Get the address of the host memory buffer. 1054 /* Get the address of the host memory buffer.
1055 */ 1055 */
1056 bdp = pinfo->rx_cur; 1056 bdp = pinfo->rx_cur;
1057 while (bdp->cbd_sc & BD_SC_EMPTY) 1057 if (bdp->cbd_sc & BD_SC_EMPTY)
1058 ; 1058 return NO_POLL_CHAR;
1059 1059
1060 /* If the buffer address is in the CPM DPRAM, don't 1060 /* If the buffer address is in the CPM DPRAM, don't
1061 * convert it. 1061 * convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
1090 poll_chars = 0; 1090 poll_chars = 0;
1091 } 1091 }
1092 if (poll_chars <= 0) { 1092 if (poll_chars <= 0) {
1093 poll_chars = poll_wait_key(poll_buf, pinfo); 1093 int ret = poll_wait_key(poll_buf, pinfo);
1094
1095 if (ret == NO_POLL_CHAR)
1096 return ret;
1097 poll_chars = ret;
1094 pollp = poll_buf; 1098 pollp = poll_buf;
1095 } 1099 }
1096 poll_chars--; 1100 poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 51e47a63d61a..3f8d1274fc85 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
979 struct circ_buf *ring = &sport->rx_ring; 979 struct circ_buf *ring = &sport->rx_ring;
980 int ret, nent; 980 int ret, nent;
981 int bits, baud; 981 int bits, baud;
982 struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); 982 struct tty_port *port = &sport->port.state->port;
983 struct tty_struct *tty = port->tty;
983 struct ktermios *termios = &tty->termios; 984 struct ktermios *termios = &tty->termios;
984 985
985 baud = tty_get_baud_rate(tty); 986 baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 239c0fa2e981..0f67197a3783 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
2351 ret); 2351 ret);
2352 return ret; 2352 return ret;
2353 } 2353 }
2354
2355 ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
2356 dev_name(&pdev->dev), sport);
2357 if (ret) {
2358 dev_err(&pdev->dev, "failed to request rts irq: %d\n",
2359 ret);
2360 return ret;
2361 }
2354 } else { 2362 } else {
2355 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, 2363 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
2356 dev_name(&pdev->dev), sport); 2364 dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index d04b5eeea3c6..170e446a2f62 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
511 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 511 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
512 termios->c_cflag &= CREAD | CBAUD; 512 termios->c_cflag &= CREAD | CBAUD;
513 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 513 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
514 termios->c_cflag |= CS8;
514 } 515 }
515 516
516 spin_unlock_irqrestore(&port->lock, flags); 517 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 32bc3e3fe4d3..5e5da9acaf0a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1255static int tty_reopen(struct tty_struct *tty) 1255static int tty_reopen(struct tty_struct *tty)
1256{ 1256{
1257 struct tty_driver *driver = tty->driver; 1257 struct tty_driver *driver = tty->driver;
1258 int retval;
1258 1259
1259 if (driver->type == TTY_DRIVER_TYPE_PTY && 1260 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1260 driver->subtype == PTY_TYPE_MASTER) 1261 driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
1268 1269
1269 tty->count++; 1270 tty->count++;
1270 1271
1271 if (!tty->ldisc) 1272 if (tty->ldisc)
1272 return tty_ldisc_reinit(tty, tty->termios.c_line); 1273 return 0;
1273 1274
1274 return 0; 1275 retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1276 if (retval)
1277 tty->count--;
1278
1279 return retval;
1275} 1280}
1276 1281
1277/** 1282/**
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a78ad10a119b..73cdc0d633dd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -32,6 +32,8 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34 34
35#include <linux/nospec.h>
36
35#include <linux/kbd_kern.h> 37#include <linux/kbd_kern.h>
36#include <linux/vt_kern.h> 38#include <linux/vt_kern.h>
37#include <linux/kbd_diacr.h> 39#include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
700 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 702 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
701 ret = -ENXIO; 703 ret = -ENXIO;
702 else { 704 else {
705 vsa.console = array_index_nospec(vsa.console,
706 MAX_NR_CONSOLES + 1);
703 vsa.console--; 707 vsa.console--;
704 console_lock(); 708 console_lock();
705 ret = vc_allocate(vsa.console); 709 ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 27346d69f393..f9b40a9dc4d3 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
780 } 780 }
781 781
782 if (acm->susp_count) { 782 if (acm->susp_count) {
783 if (acm->putbuffer) {
784 /* now to preserve order */
785 usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
786 acm->putbuffer = NULL;
787 }
788 usb_anchor_urb(wb->urb, &acm->delayed); 783 usb_anchor_urb(wb->urb, &acm->delayed);
789 spin_unlock_irqrestore(&acm->write_lock, flags); 784 spin_unlock_irqrestore(&acm->write_lock, flags);
790 return count; 785 return count;
791 } else {
792 if (acm->putbuffer) {
793 /* at this point there is no good way to handle errors */
794 acm_start_wb(acm, acm->putbuffer);
795 acm->putbuffer = NULL;
796 }
797 } 786 }
798 787
799 stat = acm_start_wb(acm, wb); 788 stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
804 return count; 793 return count;
805} 794}
806 795
807static void acm_tty_flush_chars(struct tty_struct *tty)
808{
809 struct acm *acm = tty->driver_data;
810 struct acm_wb *cur;
811 int err;
812 unsigned long flags;
813
814 spin_lock_irqsave(&acm->write_lock, flags);
815
816 cur = acm->putbuffer;
817 if (!cur) /* nothing to do */
818 goto out;
819
820 acm->putbuffer = NULL;
821 err = usb_autopm_get_interface_async(acm->control);
822 if (err < 0) {
823 cur->use = 0;
824 acm->putbuffer = cur;
825 goto out;
826 }
827
828 if (acm->susp_count)
829 usb_anchor_urb(cur->urb, &acm->delayed);
830 else
831 acm_start_wb(acm, cur);
832out:
833 spin_unlock_irqrestore(&acm->write_lock, flags);
834 return;
835}
836
837static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
838{
839 struct acm *acm = tty->driver_data;
840 struct acm_wb *cur;
841 int wbn;
842 unsigned long flags;
843
844overflow:
845 cur = acm->putbuffer;
846 if (!cur) {
847 spin_lock_irqsave(&acm->write_lock, flags);
848 wbn = acm_wb_alloc(acm);
849 if (wbn >= 0) {
850 cur = &acm->wb[wbn];
851 acm->putbuffer = cur;
852 }
853 spin_unlock_irqrestore(&acm->write_lock, flags);
854 if (!cur)
855 return 0;
856 }
857
858 if (cur->len == acm->writesize) {
859 acm_tty_flush_chars(tty);
860 goto overflow;
861 }
862
863 cur->buf[cur->len++] = ch;
864 return 1;
865}
866
867static int acm_tty_write_room(struct tty_struct *tty) 796static int acm_tty_write_room(struct tty_struct *tty)
868{ 797{
869 struct acm *acm = tty->driver_data; 798 struct acm *acm = tty->driver_data;
@@ -1987,8 +1916,6 @@ static const struct tty_operations acm_ops = {
1987 .cleanup = acm_tty_cleanup, 1916 .cleanup = acm_tty_cleanup,
1988 .hangup = acm_tty_hangup, 1917 .hangup = acm_tty_hangup,
1989 .write = acm_tty_write, 1918 .write = acm_tty_write,
1990 .put_char = acm_tty_put_char,
1991 .flush_chars = acm_tty_flush_chars,
1992 .write_room = acm_tty_write_room, 1919 .write_room = acm_tty_write_room,
1993 .ioctl = acm_tty_ioctl, 1920 .ioctl = acm_tty_ioctl,
1994 .throttle = acm_tty_throttle, 1921 .throttle = acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index eacc116e83da..ca06b20d7af9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
96 unsigned long read_urbs_free; 96 unsigned long read_urbs_free;
97 struct urb *read_urbs[ACM_NR]; 97 struct urb *read_urbs[ACM_NR];
98 struct acm_rb read_buffers[ACM_NR]; 98 struct acm_rb read_buffers[ACM_NR];
99 struct acm_wb *putbuffer; /* for acm_tty_put_char() */
100 int rx_buflimit; 99 int rx_buflimit;
101 spinlock_t read_lock; 100 spinlock_t read_lock;
102 u8 *notification_buffer; /* to reassemble fragmented notifications */ 101 u8 *notification_buffer; /* to reassemble fragmented notifications */
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 50a2362ed3ea..48277bbc15e4 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
246} 246}
247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps); 247EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
248 248
249/**
250 * usb_of_get_companion_dev - Find the companion device
251 * @dev: the device pointer to find a companion
252 *
253 * Find the companion device from platform bus.
254 *
255 * Takes a reference to the returned struct device which needs to be dropped
256 * after use.
257 *
258 * Return: On success, a pointer to the companion device, %NULL on failure.
259 */
260struct device *usb_of_get_companion_dev(struct device *dev)
261{
262 struct device_node *node;
263 struct platform_device *pdev = NULL;
264
265 node = of_parse_phandle(dev->of_node, "companion", 0);
266 if (node)
267 pdev = of_find_device_by_node(node);
268
269 of_node_put(node);
270
271 return pdev ? &pdev->dev : NULL;
272}
273EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
249#endif 274#endif
250 275
251MODULE_LICENSE("GPL"); 276MODULE_LICENSE("GPL");
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
index 15cc76e22123..99116af07f1d 100644
--- a/drivers/usb/common/roles.c
+++ b/drivers/usb/common/roles.c
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
109 */ 109 */
110struct usb_role_switch *usb_role_switch_get(struct device *dev) 110struct usb_role_switch *usb_role_switch_get(struct device *dev)
111{ 111{
112 return device_connection_find_match(dev, "usb-role-switch", NULL, 112 struct usb_role_switch *sw;
113 usb_role_switch_match); 113
114 sw = device_connection_find_match(dev, "usb-role-switch", NULL,
115 usb_role_switch_match);
116
117 if (!IS_ERR_OR_NULL(sw))
118 WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
119
120 return sw;
114} 121}
115EXPORT_SYMBOL_GPL(usb_role_switch_get); 122EXPORT_SYMBOL_GPL(usb_role_switch_get);
116 123
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
122 */ 129 */
123void usb_role_switch_put(struct usb_role_switch *sw) 130void usb_role_switch_put(struct usb_role_switch *sw)
124{ 131{
125 if (!IS_ERR_OR_NULL(sw)) 132 if (!IS_ERR_OR_NULL(sw)) {
126 put_device(&sw->dev); 133 put_device(&sw->dev);
134 module_put(sw->dev.parent->driver->owner);
135 }
127} 136}
128EXPORT_SYMBOL_GPL(usb_role_switch_put); 137EXPORT_SYMBOL_GPL(usb_role_switch_put);
129 138
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ce77b33da61..244417d0dfd1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1434 struct async *as = NULL; 1434 struct async *as = NULL;
1435 struct usb_ctrlrequest *dr = NULL; 1435 struct usb_ctrlrequest *dr = NULL;
1436 unsigned int u, totlen, isofrmlen; 1436 unsigned int u, totlen, isofrmlen;
1437 int i, ret, is_in, num_sgs = 0, ifnum = -1; 1437 int i, ret, num_sgs = 0, ifnum = -1;
1438 int number_of_packets = 0; 1438 int number_of_packets = 0;
1439 unsigned int stream_id = 0; 1439 unsigned int stream_id = 0;
1440 void *buf; 1440 void *buf;
1441 bool is_in;
1442 bool allow_short = false;
1443 bool allow_zero = false;
1441 unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | 1444 unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
1442 USBDEVFS_URB_BULK_CONTINUATION | 1445 USBDEVFS_URB_BULK_CONTINUATION |
1443 USBDEVFS_URB_NO_FSBR | 1446 USBDEVFS_URB_NO_FSBR |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1471 u = 0; 1474 u = 0;
1472 switch (uurb->type) { 1475 switch (uurb->type) {
1473 case USBDEVFS_URB_TYPE_CONTROL: 1476 case USBDEVFS_URB_TYPE_CONTROL:
1477 if (is_in)
1478 allow_short = true;
1474 if (!usb_endpoint_xfer_control(&ep->desc)) 1479 if (!usb_endpoint_xfer_control(&ep->desc))
1475 return -EINVAL; 1480 return -EINVAL;
1476 /* min 8 byte setup packet */ 1481 /* min 8 byte setup packet */
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1511 break; 1516 break;
1512 1517
1513 case USBDEVFS_URB_TYPE_BULK: 1518 case USBDEVFS_URB_TYPE_BULK:
1519 if (!is_in)
1520 allow_zero = true;
1521 else
1522 allow_short = true;
1514 switch (usb_endpoint_type(&ep->desc)) { 1523 switch (usb_endpoint_type(&ep->desc)) {
1515 case USB_ENDPOINT_XFER_CONTROL: 1524 case USB_ENDPOINT_XFER_CONTROL:
1516 case USB_ENDPOINT_XFER_ISOC: 1525 case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1531 if (!usb_endpoint_xfer_int(&ep->desc)) 1540 if (!usb_endpoint_xfer_int(&ep->desc))
1532 return -EINVAL; 1541 return -EINVAL;
1533 interrupt_urb: 1542 interrupt_urb:
1543 if (!is_in)
1544 allow_zero = true;
1545 else
1546 allow_short = true;
1534 break; 1547 break;
1535 1548
1536 case USBDEVFS_URB_TYPE_ISO: 1549 case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1676 u = (is_in ? URB_DIR_IN : URB_DIR_OUT); 1689 u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
1677 if (uurb->flags & USBDEVFS_URB_ISO_ASAP) 1690 if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
1678 u |= URB_ISO_ASAP; 1691 u |= URB_ISO_ASAP;
1679 if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) 1692 if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
1680 u |= URB_SHORT_NOT_OK; 1693 u |= URB_SHORT_NOT_OK;
1681 if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) 1694 if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
1682 u |= URB_ZERO_PACKET; 1695 u |= URB_ZERO_PACKET;
1683 if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) 1696 if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
1684 u |= URB_NO_INTERRUPT; 1697 u |= URB_NO_INTERRUPT;
1685 as->urb->transfer_flags = u; 1698 as->urb->transfer_flags = u;
1686 1699
1700 if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
1701 dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
1702 if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
1703 dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
1704
1687 as->urb->transfer_buffer_length = uurb->buffer_length; 1705 as->urb->transfer_buffer_length = uurb->buffer_length;
1688 as->urb->setup_packet = (unsigned char *)dr; 1706 as->urb->setup_packet = (unsigned char *)dr;
1689 dr = NULL; 1707 dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e76e95f62f76..a1f225f077cd 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
512 struct device *dev; 512 struct device *dev;
513 struct usb_device *udev; 513 struct usb_device *udev;
514 int retval = 0; 514 int retval = 0;
515 int lpm_disable_error = -ENODEV;
516 515
517 if (!iface) 516 if (!iface)
518 return -ENODEV; 517 return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
533 532
534 iface->condition = USB_INTERFACE_BOUND; 533 iface->condition = USB_INTERFACE_BOUND;
535 534
536 /* See the comment about disabling LPM in usb_probe_interface(). */
537 if (driver->disable_hub_initiated_lpm) {
538 lpm_disable_error = usb_unlocked_disable_lpm(udev);
539 if (lpm_disable_error) {
540 dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
541 __func__, driver->name);
542 return -ENOMEM;
543 }
544 }
545
546 /* Claimed interfaces are initially inactive (suspended) and 535 /* Claimed interfaces are initially inactive (suspended) and
547 * runtime-PM-enabled, but only if the driver has autosuspend 536 * runtime-PM-enabled, but only if the driver has autosuspend
548 * support. Otherwise they are marked active, to prevent the 537 * support. Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
561 if (device_is_registered(dev)) 550 if (device_is_registered(dev))
562 retval = device_bind_driver(dev); 551 retval = device_bind_driver(dev);
563 552
564 /* Attempt to re-enable USB3 LPM, if the disable was successful. */ 553 if (retval) {
565 if (!lpm_disable_error) 554 dev->driver = NULL;
566 usb_unlocked_enable_lpm(udev); 555 usb_set_intfdata(iface, NULL);
556 iface->needs_remote_wakeup = 0;
557 iface->condition = USB_INTERFACE_UNBOUND;
558
559 /*
560 * Unbound interfaces are always runtime-PM-disabled
561 * and runtime-PM-suspended
562 */
563 if (driver->supports_autosuspend)
564 pm_runtime_disable(dev);
565 pm_runtime_set_suspended(dev);
566 }
567 567
568 return retval; 568 return retval;
569} 569}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 66fe1b78d952..03432467b05f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
515 event == PM_EVENT_RESTORE); 515 event == PM_EVENT_RESTORE);
516 if (retval) { 516 if (retval) {
517 dev_err(dev, "PCI post-resume error %d!\n", retval); 517 dev_err(dev, "PCI post-resume error %d!\n", retval);
518 if (hcd->shared_hcd)
519 usb_hc_died(hcd->shared_hcd);
520 usb_hc_died(hcd); 518 usb_hc_died(hcd);
521 } 519 }
522 } 520 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 228672f2c4a1..bfa5eda0cc26 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
1341 * is submitted that needs that bandwidth. Some other operating systems 1341 * is submitted that needs that bandwidth. Some other operating systems
1342 * allocate bandwidth early, when a configuration is chosen. 1342 * allocate bandwidth early, when a configuration is chosen.
1343 * 1343 *
1344 * xHCI reserves bandwidth and configures the alternate setting in
1345 * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
1346 * may be disabled. Drivers cannot rely on any particular alternate
1347 * setting being in effect after a failure.
1348 *
1344 * This call is synchronous, and may not be used in an interrupt context. 1349 * This call is synchronous, and may not be used in an interrupt context.
1345 * Also, drivers must not change altsettings while urbs are scheduled for 1350 * Also, drivers must not change altsettings while urbs are scheduled for
1346 * endpoints in that interface; all such urbs must first be completed 1351 * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1376 alternate); 1381 alternate);
1377 return -EINVAL; 1382 return -EINVAL;
1378 } 1383 }
1384 /*
1385 * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
1386 * including freeing dropped endpoint ring buffers.
1387 * Make sure the interface endpoints are flushed before that
1388 */
1389 usb_disable_interface(dev, iface, false);
1379 1390
1380 /* Make sure we have enough bandwidth for this alternate interface. 1391 /* Make sure we have enough bandwidth for this alternate interface.
1381 * Remove the current alt setting and add the new alt setting. 1392 * Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index fd77442c2d12..651708d8c908 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
105 return NULL; 105 return NULL;
106} 106}
107EXPORT_SYMBOL_GPL(usb_of_get_interface_node); 107EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
108
109/**
110 * usb_of_get_companion_dev - Find the companion device
111 * @dev: the device pointer to find a companion
112 *
113 * Find the companion device from platform bus.
114 *
115 * Takes a reference to the returned struct device which needs to be dropped
116 * after use.
117 *
118 * Return: On success, a pointer to the companion device, %NULL on failure.
119 */
120struct device *usb_of_get_companion_dev(struct device *dev)
121{
122 struct device_node *node;
123 struct platform_device *pdev = NULL;
124
125 node = of_parse_phandle(dev->of_node, "companion", 0);
126 if (node)
127 pdev = of_find_device_by_node(node);
128
129 of_node_put(node);
130
131 return pdev ? &pdev->dev : NULL;
132}
133EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 097057d2eacf..178d6c6063c0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
58 quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), 58 quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
59 GFP_KERNEL); 59 GFP_KERNEL);
60 if (!quirk_list) { 60 if (!quirk_list) {
61 quirk_count = 0;
61 mutex_unlock(&quirk_mutex); 62 mutex_unlock(&quirk_mutex);
62 return -ENOMEM; 63 return -ENOMEM;
63 } 64 }
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
154 .string = quirks_param, 155 .string = quirks_param,
155}; 156};
156 157
157module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); 158device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
158MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); 159MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
159 160
160/* Lists of quirky USB devices, split in device quirks and interface quirks. 161/* Lists of quirky USB devices, split in device quirks and interface quirks.
@@ -178,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
178 /* CBM - Flash disk */ 179 /* CBM - Flash disk */
179 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 180 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
180 181
182 /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
183 { USB_DEVICE(0x0218, 0x0201), .driver_info =
184 USB_QUIRK_CONFIG_INTF_STRINGS },
185
181 /* WORLDE easy key (easykey.25) MIDI controller */ 186 /* WORLDE easy key (easykey.25) MIDI controller */
182 { USB_DEVICE(0x0218, 0x0401), .driver_info = 187 { USB_DEVICE(0x0218, 0x0401), .driver_info =
183 USB_QUIRK_CONFIG_INTF_STRINGS }, 188 USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +411,9 @@ static const struct usb_device_id usb_quirk_list[] = {
406 { USB_DEVICE(0x2040, 0x7200), .driver_info = 411 { USB_DEVICE(0x2040, 0x7200), .driver_info =
407 USB_QUIRK_CONFIG_INTF_STRINGS }, 412 USB_QUIRK_CONFIG_INTF_STRINGS },
408 413
414 /* DJI CineSSD */
415 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
416
409 /* INTEL VALUE SSD */ 417 /* INTEL VALUE SSD */
410 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 418 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
411 419
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 623be3174fb3..79d8bd7a612e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
228 struct usb_interface_cache *intf_cache = NULL; 228 struct usb_interface_cache *intf_cache = NULL;
229 int i; 229 int i;
230 230
231 if (!config)
232 return NULL;
231 for (i = 0; i < config->desc.bNumInterfaces; i++) { 233 for (i = 0; i < config->desc.bNumInterfaces; i++) {
232 if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber 234 if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
233 == iface_num) { 235 == iface_num) {
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 9a53a58e676e..577642895b57 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n", 412 dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
413 (unsigned long)res->start, hsotg->regs); 413 (unsigned long)res->start, hsotg->regs);
414 414
415 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
416
417 retval = dwc2_lowlevel_hw_init(hsotg); 415 retval = dwc2_lowlevel_hw_init(hsotg);
418 if (retval) 416 if (retval)
419 return retval; 417 return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
438 if (retval) 436 if (retval)
439 return retval; 437 return retval;
440 438
439 hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
440
441 retval = dwc2_get_dr_mode(hsotg); 441 retval = dwc2_get_dr_mode(hsotg);
442 if (retval) 442 if (retval)
443 goto error; 443 goto error;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 40bf9e0bbc59..4c2771c5e727 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
180 return 0; 180 return 0;
181} 181}
182 182
183#ifdef CONFIG_PM 183static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
184static int dwc3_of_simple_runtime_suspend(struct device *dev)
185{ 184{
186 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 185 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
187 int i; 186 int i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
192 return 0; 191 return 0;
193} 192}
194 193
195static int dwc3_of_simple_runtime_resume(struct device *dev) 194static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
196{ 195{
197 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 196 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
198 int ret; 197 int ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
210 return 0; 209 return 0;
211} 210}
212 211
213static int dwc3_of_simple_suspend(struct device *dev) 212static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
214{ 213{
215 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 214 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
216 215
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
220 return 0; 219 return 0;
221} 220}
222 221
223static int dwc3_of_simple_resume(struct device *dev) 222static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
224{ 223{
225 struct dwc3_of_simple *simple = dev_get_drvdata(dev); 224 struct dwc3_of_simple *simple = dev_get_drvdata(dev);
226 225
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
229 228
230 return 0; 229 return 0;
231} 230}
232#endif
233 231
234static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = { 232static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
235 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume) 233 SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 5edd79470368..1286076a8890 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
85 u32 value; 85 u32 value;
86 86
87 reg = pcim_iomap(pci, GP_RWBAR, 0); 87 reg = pcim_iomap(pci, GP_RWBAR, 0);
88 if (IS_ERR(reg)) 88 if (!reg)
89 return PTR_ERR(reg); 89 return -ENOMEM;
90 90
91 value = readl(reg + GP_RWREG1); 91 value = readl(reg + GP_RWREG1);
92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE)) 92 if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 032ea7d709ba..2b53194081ba 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
473 473
474/** 474/**
475 * dwc3_gadget_start_config - configure ep resources 475 * dwc3_gadget_start_config - configure ep resources
476 * @dwc: pointer to our controller context structure
477 * @dep: endpoint that is being enabled 476 * @dep: endpoint that is being enabled
478 * 477 *
479 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's 478 * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 53a48f561458..587c5037ff07 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
1063static int fotg210_udc_remove(struct platform_device *pdev) 1063static int fotg210_udc_remove(struct platform_device *pdev)
1064{ 1064{
1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev); 1065 struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
1066 int i;
1066 1067
1067 usb_del_gadget_udc(&fotg210->gadget); 1068 usb_del_gadget_udc(&fotg210->gadget);
1068 iounmap(fotg210->reg); 1069 iounmap(fotg210->reg);
1069 free_irq(platform_get_irq(pdev, 0), fotg210); 1070 free_irq(platform_get_irq(pdev, 0), fotg210);
1070 1071
1071 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1072 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1073 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1074 kfree(fotg210->ep[i]);
1072 kfree(fotg210); 1075 kfree(fotg210);
1073 1076
1074 return 0; 1077 return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1099 /* initialize udc */ 1102 /* initialize udc */
1100 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL); 1103 fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
1101 if (fotg210 == NULL) 1104 if (fotg210 == NULL)
1102 goto err_alloc; 1105 goto err;
1103 1106
1104 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) { 1107 for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
1105 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL); 1108 _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1111 fotg210->reg = ioremap(res->start, resource_size(res)); 1114 fotg210->reg = ioremap(res->start, resource_size(res));
1112 if (fotg210->reg == NULL) { 1115 if (fotg210->reg == NULL) {
1113 pr_err("ioremap error.\n"); 1116 pr_err("ioremap error.\n");
1114 goto err_map; 1117 goto err_alloc;
1115 } 1118 }
1116 1119
1117 spin_lock_init(&fotg210->lock); 1120 spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1159 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep, 1162 fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
1160 GFP_KERNEL); 1163 GFP_KERNEL);
1161 if (fotg210->ep0_req == NULL) 1164 if (fotg210->ep0_req == NULL)
1162 goto err_req; 1165 goto err_map;
1163 1166
1164 fotg210_init(fotg210); 1167 fotg210_init(fotg210);
1165 1168
@@ -1187,12 +1190,14 @@ err_req:
1187 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req); 1190 fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
1188 1191
1189err_map: 1192err_map:
1190 if (fotg210->reg) 1193 iounmap(fotg210->reg);
1191 iounmap(fotg210->reg);
1192 1194
1193err_alloc: 1195err_alloc:
1196 for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
1197 kfree(fotg210->ep[i]);
1194 kfree(fotg210); 1198 kfree(fotg210);
1195 1199
1200err:
1196 return ret; 1201 return ret;
1197} 1202}
1198 1203
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 318246d8b2e2..b02ab2a8d927 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1545 writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1546 } else { 1546 } else {
1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl); 1547 writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
1548 stop_activity(dev, dev->driver); 1548 stop_activity(dev, NULL);
1549 } 1549 }
1550 1550
1551 spin_unlock_irqrestore(&dev->lock, flags); 1551 spin_unlock_irqrestore(&dev->lock, flags);
1552 1552
1553 if (!is_on && dev->driver)
1554 dev->driver->disconnect(&dev->gadget);
1555
1553 return 0; 1556 return 0;
1554} 1557}
1555 1558
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
2466 nuke(&dev->ep[i]); 2469 nuke(&dev->ep[i]);
2467 2470
2468 /* report disconnect; the driver is already quiesced */ 2471 /* report disconnect; the driver is already quiesced */
2469 if (driver) 2472 if (driver) {
2473 spin_unlock(&dev->lock);
2470 driver->disconnect(&dev->gadget); 2474 driver->disconnect(&dev->gadget);
2475 spin_lock(&dev->lock);
2476 }
2471 2477
2472 usb_reinit(dev); 2478 usb_reinit(dev);
2473} 2479}
@@ -3341,6 +3347,8 @@ next_endpoints:
3341 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3347 BIT(PCI_RETRY_ABORT_INTERRUPT))
3342 3348
3343static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3349static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3350__releases(dev->lock)
3351__acquires(dev->lock)
3344{ 3352{
3345 struct net2280_ep *ep; 3353 struct net2280_ep *ep;
3346 u32 tmp, num, mask, scratch; 3354 u32 tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3381 if (disconnect || reset) { 3389 if (disconnect || reset) {
3382 stop_activity(dev, dev->driver); 3390 stop_activity(dev, dev->driver);
3383 ep0_start(dev); 3391 ep0_start(dev);
3392 spin_unlock(&dev->lock);
3384 if (reset) 3393 if (reset)
3385 usb_gadget_udc_reset 3394 usb_gadget_udc_reset
3386 (&dev->gadget, dev->driver); 3395 (&dev->gadget, dev->driver);
3387 else 3396 else
3388 (dev->driver->disconnect) 3397 (dev->driver->disconnect)
3389 (&dev->gadget); 3398 (&dev->gadget);
3399 spin_lock(&dev->lock);
3390 return; 3400 return;
3391 } 3401 }
3392 } 3402 }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3405 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT); 3415 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
3406 if (stat & tmp) { 3416 if (stat & tmp) {
3407 writel(tmp, &dev->regs->irqstat1); 3417 writel(tmp, &dev->regs->irqstat1);
3418 spin_unlock(&dev->lock);
3408 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) { 3419 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
3409 if (dev->driver->suspend) 3420 if (dev->driver->suspend)
3410 dev->driver->suspend(&dev->gadget); 3421 dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
3415 dev->driver->resume(&dev->gadget); 3426 dev->driver->resume(&dev->gadget);
3416 /* at high speed, note erratum 0133 */ 3427 /* at high speed, note erratum 0133 */
3417 } 3428 }
3429 spin_lock(&dev->lock);
3418 stat &= ~tmp; 3430 stat &= ~tmp;
3419 } 3431 }
3420 3432
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 1f879b3f2c96..e1656f361e08 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
812 switch (speed) { 812 switch (speed) {
813 case USB_STA_SPEED_SS: 813 case USB_STA_SPEED_SS:
814 usb3->gadget.speed = USB_SPEED_SUPER; 814 usb3->gadget.speed = USB_SPEED_SUPER;
815 usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
815 break; 816 break;
816 case USB_STA_SPEED_HS: 817 case USB_STA_SPEED_HS:
817 usb3->gadget.speed = USB_SPEED_HIGH; 818 usb3->gadget.speed = USB_SPEED_HIGH;
819 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
818 break; 820 break;
819 case USB_STA_SPEED_FS: 821 case USB_STA_SPEED_FS:
820 usb3->gadget.speed = USB_SPEED_FULL; 822 usb3->gadget.speed = USB_SPEED_FULL;
823 usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
821 break; 824 break;
822 default: 825 default:
823 usb3->gadget.speed = USB_SPEED_UNKNOWN; 826 usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
2513 /* for control pipe */ 2516 /* for control pipe */
2514 usb3->gadget.ep0 = &usb3_ep->ep; 2517 usb3->gadget.ep0 = &usb3_ep->ep;
2515 usb_ep_set_maxpacket_limit(&usb3_ep->ep, 2518 usb_ep_set_maxpacket_limit(&usb3_ep->ep,
2516 USB3_EP0_HSFS_MAX_PACKET_SIZE); 2519 USB3_EP0_SS_MAX_PACKET_SIZE);
2517 usb3_ep->ep.caps.type_control = true; 2520 usb3_ep->ep.caps.type_control = true;
2518 usb3_ep->ep.caps.dir_in = true; 2521 usb3_ep->ep.caps.dir_in = true;
2519 usb3_ep->ep.caps.dir_out = true; 2522 usb3_ep->ep.caps.dir_out = true;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 072bd5d5738e..5b8a3d9530c4 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
2555 } else { 2555 } else {
2556 int frame = 0; 2556 int frame = 0;
2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); 2557 dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
2558 msleep(100); 2558 mdelay(100);
2559 return frame; 2559 return frame;
2560 } 2560 }
2561} 2561}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ef350c33dc4a..b1f27aa38b10 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 1613 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1614 in_ep_ctx->deq = out_ep_ctx->deq; 1614 in_ep_ctx->deq = out_ep_ctx->deq;
1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 1615 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1616 if (xhci->quirks & XHCI_MTK_HOST) {
1617 in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
1618 in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
1619 }
1616} 1620}
1617 1621
1618/* Copy output xhci_slot_ctx to the input xhci_slot_ctx. 1622/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8dc77e34a859..94e939249b2b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
153{ 153{
154 const struct xhci_plat_priv *priv_match; 154 const struct xhci_plat_priv *priv_match;
155 const struct hc_driver *driver; 155 const struct hc_driver *driver;
156 struct device *sysdev; 156 struct device *sysdev, *tmpdev;
157 struct xhci_hcd *xhci; 157 struct xhci_hcd *xhci;
158 struct resource *res; 158 struct resource *res;
159 struct usb_hcd *hcd; 159 struct usb_hcd *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
273 goto disable_clk; 273 goto disable_clk;
274 } 274 }
275 275
276 if (device_property_read_bool(sysdev, "usb2-lpm-disable")) 276 /* imod_interval is the interrupt moderation value in nanoseconds. */
277 xhci->quirks |= XHCI_HW_LPM_DISABLE; 277 xhci->imod_interval = 40000;
278 278
279 if (device_property_read_bool(sysdev, "usb3-lpm-capable")) 279 /* Iterate over all parent nodes for finding quirks */
280 xhci->quirks |= XHCI_LPM_SUPPORT; 280 for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
281 281
282 if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped")) 282 if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
283 xhci->quirks |= XHCI_BROKEN_PORT_PED; 283 xhci->quirks |= XHCI_HW_LPM_DISABLE;
284 284
285 /* imod_interval is the interrupt moderation value in nanoseconds. */ 285 if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
286 xhci->imod_interval = 40000; 286 xhci->quirks |= XHCI_LPM_SUPPORT;
287 device_property_read_u32(sysdev, "imod-interval-ns", 287
288 &xhci->imod_interval); 288 if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
289 xhci->quirks |= XHCI_BROKEN_PORT_PED;
290
291 device_property_read_u32(tmpdev, "imod-interval-ns",
292 &xhci->imod_interval);
293 }
289 294
290 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0); 295 hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
291 if (IS_ERR(hcd->usb_phy)) { 296 if (IS_ERR(hcd->usb_phy)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 61f48b17e57b..0420eefa647a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -37,6 +37,21 @@ static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO); 37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39 39
40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
41{
42 struct xhci_segment *seg = ring->first_seg;
43
44 if (!td || !td->start_seg)
45 return false;
46 do {
47 if (seg == td->start_seg)
48 return true;
49 seg = seg->next;
50 } while (seg && seg != ring->first_seg);
51
52 return false;
53}
54
40/* TODO: copied from ehci-hcd.c - can this be refactored? */ 55/* TODO: copied from ehci-hcd.c - can this be refactored? */
41/* 56/*
42 * xhci_handshake - spin reading hc until handshake completes or fails 57 * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1571 goto done; 1586 goto done;
1572 } 1587 }
1573 1588
1589 /*
1590 * check ring is not re-allocated since URB was enqueued. If it is, then
1591 * make sure none of the ring related pointers in this URB private data
1592 * are touched, such as td_list, otherwise we overwrite freed data
1593 */
1594 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1595 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1596 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1597 td = &urb_priv->td[i];
1598 if (!list_empty(&td->cancelled_td_list))
1599 list_del_init(&td->cancelled_td_list);
1600 }
1601 goto err_giveback;
1602 }
1603
1574 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1604 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1575 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1605 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1576 "HC halted, freeing TD manually."); 1606 "HC halted, freeing TD manually.");
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 82f220631bd7..b5d661644263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
369 mask &= 0x0f; 369 mask &= 0x0f;
370 val &= 0x0f; 370 val &= 0x0f;
371 d = (priv->reg[1] & (~mask)) ^ val; 371 d = (priv->reg[1] & (~mask)) ^ val;
372 if (set_1284_register(pp, 2, d, GFP_KERNEL)) 372 if (set_1284_register(pp, 2, d, GFP_ATOMIC))
373 return 0; 373 return 0;
374 priv->reg[1] = d; 374 priv->reg[1] = d;
375 return d & 0xf; 375 return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
379{ 379{
380 unsigned char ret; 380 unsigned char ret;
381 381
382 if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) 382 if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
383 return 0; 383 return 0;
384 return ret & 0xf8; 384 return ret & 0xf8;
385} 385}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 3be40eaa1ac9..6d9fd5f64903 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
413 spin_unlock_irqrestore(&dev->lock, flags); 413 spin_unlock_irqrestore(&dev->lock, flags);
414 mutex_unlock(&dev->io_mutex); 414 mutex_unlock(&dev->io_mutex);
415 415
416 if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
417 return -EIO;
418
416 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len); 419 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
417} 420}
418 421
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
421{ 424{
422 struct usb_yurex *dev; 425 struct usb_yurex *dev;
423 int i, set = 0, retval = 0; 426 int i, set = 0, retval = 0;
424 char buffer[16]; 427 char buffer[16 + 1];
425 char *data = buffer; 428 char *data = buffer;
426 unsigned long long c, c2 = 0; 429 unsigned long long c, c2 = 0;
427 signed long timeout = 0; 430 signed long timeout = 0;
428 DEFINE_WAIT(wait); 431 DEFINE_WAIT(wait);
429 432
430 count = min(sizeof(buffer), count); 433 count = min(sizeof(buffer) - 1, count);
431 dev = file->private_data; 434 dev = file->private_data;
432 435
433 /* verify that we actually have some data to write */ 436 /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
446 retval = -EFAULT; 449 retval = -EFAULT;
447 goto error; 450 goto error;
448 } 451 }
452 buffer[count] = 0;
449 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); 453 memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
450 454
451 switch (buffer[0]) { 455 switch (buffer[0]) {
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index eecfd0671362..d045d8458f81 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN | 107 (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
108 SSUSB_U2_PORT_HOST_SEL)); 108 SSUSB_U2_PORT_HOST_SEL));
109 109
110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) 110 if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); 111 mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
112 if (mtu->is_u3_ip)
113 mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
114 SSUSB_U3_PORT_DUAL_MODE);
115 }
112 116
113 return ssusb_check_clocks(mtu->ssusb, check_clk); 117 return ssusb_check_clocks(mtu->ssusb, check_clk);
114} 118}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 6ee371478d89..a45bb253939f 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -459,6 +459,7 @@
459 459
460/* U3D_SSUSB_U3_CTRL_0P */ 460/* U3D_SSUSB_U3_CTRL_0P */
461#define SSUSB_U3_PORT_SSP_SPEED BIT(9) 461#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
462#define SSUSB_U3_PORT_DUAL_MODE BIT(7)
462#define SSUSB_U3_PORT_HOST_SEL BIT(2) 463#define SSUSB_U3_PORT_HOST_SEL BIT(2)
463#define SSUSB_U3_PORT_PDN BIT(1) 464#define SSUSB_U3_PORT_PDN BIT(1)
464#define SSUSB_U3_PORT_DIS BIT(0) 465#define SSUSB_U3_PORT_DIS BIT(0)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index df827ff57b0d..23a0df79ef21 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
658 return controller; 658 return controller;
659} 659}
660 660
661static void dsps_dma_controller_destroy(struct dma_controller *c)
662{
663 struct musb *musb = c->musb;
664 struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
665 void __iomem *usbss_base = glue->usbss_base;
666
667 musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
668 cppi41_dma_controller_destroy(c);
669}
670
671#ifdef CONFIG_PM_SLEEP 661#ifdef CONFIG_PM_SLEEP
672static void dsps_dma_controller_suspend(struct dsps_glue *glue) 662static void dsps_dma_controller_suspend(struct dsps_glue *glue)
673{ 663{
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
697 687
698#ifdef CONFIG_USB_TI_CPPI41_DMA 688#ifdef CONFIG_USB_TI_CPPI41_DMA
699 .dma_init = dsps_dma_controller_create, 689 .dma_init = dsps_dma_controller_create,
700 .dma_exit = dsps_dma_controller_destroy, 690 .dma_exit = cppi41_dma_controller_destroy,
701#endif 691#endif
702 .enable = dsps_musb_enable, 692 .enable = dsps_musb_enable,
703 .disable = dsps_musb_disable, 693 .disable = dsps_musb_disable,
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index e53c68261017..9bbcee37524e 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -173,7 +173,7 @@ struct ump_interrupt {
173} __attribute__((packed)); 173} __attribute__((packed));
174 174
175 175
176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) 176#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) 177#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
178#define TIUMP_INTERRUPT_CODE_LSR 0x03 178#define TIUMP_INTERRUPT_CODE_LSR 0x03
179#define TIUMP_INTERRUPT_CODE_MSR 0x04 179#define TIUMP_INTERRUPT_CODE_MSR 0x04
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3010878f7f8e..e3c5832337e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
1119 1119
1120static int ti_get_port_from_code(unsigned char code) 1120static int ti_get_port_from_code(unsigned char code)
1121{ 1121{
1122 return (code >> 4) - 3; 1122 return (code >> 6) & 0x01;
1123} 1123}
1124 1124
1125static int ti_get_func_from_code(unsigned char code) 1125static int ti_get_func_from_code(unsigned char code)
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c267f2812a04..e227bb5b794f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
376 return 0; 376 return 0;
377 } 377 }
378 378
379 if ((us->fflags & US_FL_NO_ATA_1X) &&
380 (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
381 memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
382 sizeof(usb_stor_sense_invalidCDB));
383 srb->result = SAM_STAT_CHECK_CONDITION;
384 done(srb);
385 return 0;
386 }
387
379 /* enqueue the command and wake up the control thread */ 388 /* enqueue the command and wake up the control thread */
380 srb->scsi_done = done; 389 srb->scsi_done = done;
381 us->srb = srb; 390 us->srb = srb;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9e9de5452860..1f7b401c4d04 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
842 sdev->skip_ms_page_8 = 1; 842 sdev->skip_ms_page_8 = 1;
843 sdev->wce_default_on = 1; 843 sdev->wce_default_on = 1;
844 } 844 }
845
846 /*
847 * Some disks return the total number of blocks in response
848 * to READ CAPACITY rather than the highest block number.
849 * If this device makes that mistake, tell the sd driver.
850 */
851 if (devinfo->flags & US_FL_FIX_CAPACITY)
852 sdev->fix_capacity = 1;
853
854 /*
855 * Some devices don't like MODE SENSE with page=0x3f,
856 * which is the command used for checking if a device
857 * is write-protected. Now that we tell the sd driver
858 * to do a 192-byte transfer with this command the
859 * majority of devices work fine, but a few still can't
860 * handle it. The sd driver will simply assume those
861 * devices are write-enabled.
862 */
863 if (devinfo->flags & US_FL_NO_WP_DETECT)
864 sdev->skip_ms_page_3f = 1;
865
845 scsi_change_queue_depth(sdev, devinfo->qdepth - 2); 866 scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
846 return 0; 867 return 0;
847} 868}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 22fcfccf453a..f7f83b21dc74 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2288 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2289 US_FL_GO_SLOW ), 2289 US_FL_GO_SLOW ),
2290 2290
2291/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
2292UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999,
2293 "DJI",
2294 "CineSSD",
2295 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2296 US_FL_NO_ATA_1X),
2297
2291/* 2298/*
2292 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com> 2299 * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
2293 * Mio Moov 330 2300 * Mio Moov 330
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 95a2b10127db..76299b6ff06d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
255/* API for the port drivers */ 255/* API for the port drivers */
256 256
257/** 257/**
258 * typec_match_altmode - Match SVID to an array of alternate modes 258 * typec_match_altmode - Match SVID and mode to an array of alternate modes
259 * @altmodes: Array of alternate modes 259 * @altmodes: Array of alternate modes
260 * @n: Number of elements in the array, or -1 for NULL termiated arrays 260 * @n: Number of elements in the array, or -1 for NULL terminated arrays
261 * @svid: Standard or Vendor ID to match with 261 * @svid: Standard or Vendor ID to match with
262 * @mode: Mode to match with
262 * 263 *
263 * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no 264 * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
264 * match is found. 265 * match is found.
265 */ 266 */
266struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, 267struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index c202975f8097..e61dffb27a0c 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode 1484 * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
1485 * @port: USB Type-C Port that supports the alternate mode 1485 * @port: USB Type-C Port that supports the alternate mode
1486 * @desc: Description of the alternate mode 1486 * @desc: Description of the alternate mode
1487 * @drvdata: Private pointer to driver specific info
1488 * 1487 *
1489 * This routine is used to register an alternate mode that @port is capable of 1488 * This routine is used to register an alternate mode that @port is capable of
1490 * supporting. 1489 * supporting.
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index ddaac63ecf12..d990aa510fab 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/usb/typec_mux.h> 14#include <linux/usb/typec_mux.h>
14 15
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
49 mutex_lock(&switch_lock); 50 mutex_lock(&switch_lock);
50 sw = device_connection_find_match(dev, "typec-switch", NULL, 51 sw = device_connection_find_match(dev, "typec-switch", NULL,
51 typec_switch_match); 52 typec_switch_match);
52 if (!IS_ERR_OR_NULL(sw)) 53 if (!IS_ERR_OR_NULL(sw)) {
54 WARN_ON(!try_module_get(sw->dev->driver->owner));
53 get_device(sw->dev); 55 get_device(sw->dev);
56 }
54 mutex_unlock(&switch_lock); 57 mutex_unlock(&switch_lock);
55 58
56 return sw; 59 return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
65 */ 68 */
66void typec_switch_put(struct typec_switch *sw) 69void typec_switch_put(struct typec_switch *sw)
67{ 70{
68 if (!IS_ERR_OR_NULL(sw)) 71 if (!IS_ERR_OR_NULL(sw)) {
72 module_put(sw->dev->driver->owner);
69 put_device(sw->dev); 73 put_device(sw->dev);
74 }
70} 75}
71EXPORT_SYMBOL_GPL(typec_switch_put); 76EXPORT_SYMBOL_GPL(typec_switch_put);
72 77
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
136 141
137 mutex_lock(&mux_lock); 142 mutex_lock(&mux_lock);
138 mux = device_connection_find_match(dev, name, NULL, typec_mux_match); 143 mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
139 if (!IS_ERR_OR_NULL(mux)) 144 if (!IS_ERR_OR_NULL(mux)) {
145 WARN_ON(!try_module_get(mux->dev->driver->owner));
140 get_device(mux->dev); 146 get_device(mux->dev);
147 }
141 mutex_unlock(&mux_lock); 148 mutex_unlock(&mux_lock);
142 149
143 return mux; 150 return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
152 */ 159 */
153void typec_mux_put(struct typec_mux *mux) 160void typec_mux_put(struct typec_mux *mux)
154{ 161{
155 if (!IS_ERR_OR_NULL(mux)) 162 if (!IS_ERR_OR_NULL(mux)) {
163 module_put(mux->dev->driver->owner);
156 put_device(mux->dev); 164 put_device(mux->dev);
165 }
157} 166}
158EXPORT_SYMBOL_GPL(typec_mux_put); 167EXPORT_SYMBOL_GPL(typec_mux_put);
159 168
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 96c1d8400822..b13c6b4b2c66 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
952 list_for_each_entry_safe(node, n, &d->pending_list, node) { 952 list_for_each_entry_safe(node, n, &d->pending_list, node) {
953 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 953 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
954 if (msg->iova <= vq_msg->iova && 954 if (msg->iova <= vq_msg->iova &&
955 msg->iova + msg->size - 1 > vq_msg->iova && 955 msg->iova + msg->size - 1 >= vq_msg->iova &&
956 vq_msg->type == VHOST_IOTLB_MISS) { 956 vq_msg->type == VHOST_IOTLB_MISS) {
957 vhost_poll_queue(&node->vq->poll); 957 vhost_poll_queue(&node->vq->poll);
958 list_del(&node->node); 958 list_del(&node->node);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3946649b85c8..ba906876cc45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -42,6 +42,7 @@ struct bmp_dib_header {
42 u32 colors_important; 42 u32 colors_important;
43} __packed; 43} __packed;
44 44
45static bool use_bgrt = true;
45static bool request_mem_succeeded = false; 46static bool request_mem_succeeded = false;
46static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC; 47static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
47 48
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
160 void *bgrt_image = NULL; 161 void *bgrt_image = NULL;
161 u8 *dst = info->screen_base; 162 u8 *dst = info->screen_base;
162 163
164 if (!use_bgrt)
165 return;
166
163 if (!bgrt_tab.image_address) { 167 if (!bgrt_tab.image_address) {
164 pr_info("efifb: No BGRT, not showing boot graphics\n"); 168 pr_info("efifb: No BGRT, not showing boot graphics\n");
165 return; 169 return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
290 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 294 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
291 else if (!strcmp(this_opt, "nowc")) 295 else if (!strcmp(this_opt, "nowc"))
292 mem_flags &= ~EFI_MEMORY_WC; 296 mem_flags &= ~EFI_MEMORY_WC;
297 else if (!strcmp(this_opt, "nobgrt"))
298 use_bgrt = false;
293 } 299 }
294 } 300 }
295 301
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) 496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
497 return -EFAULT; 497 return -EFAULT;
498 498
499 if (mr->w > 4096 || mr->h > 4096)
500 return -EINVAL;
501
499 if (mr->w * mr->h * 3 > mr->buffer_size) 502 if (mr->w * mr->h * 3 > mr->buffer_size)
500 return -EINVAL; 503 return -EINVAL;
501 504
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
509 mr->x, mr->y, mr->w, mr->h); 512 mr->x, mr->y, mr->w, mr->h);
510 513
511 if (r > 0) { 514 if (r > 0) {
512 if (copy_to_user(mr->buffer, buf, mr->buffer_size)) 515 if (copy_to_user(mr->buffer, buf, r))
513 r = -EFAULT; 516 r = -EFAULT;
514 } 517 }
515 518
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index def3a501acd6..d059d04c63ac 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
712 /* 712 /*
713 * enable controller clock 713 * enable controller clock
714 */ 714 */
715 clk_enable(fbi->clk); 715 clk_prepare_enable(fbi->clk);
716 716
717 pxa168fb_set_par(info); 717 pxa168fb_set_par(info);
718 718
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
767failed_free_cmap: 767failed_free_cmap:
768 fb_dealloc_cmap(&info->cmap); 768 fb_dealloc_cmap(&info->cmap);
769failed_free_clk: 769failed_free_clk:
770 clk_disable(fbi->clk); 770 clk_disable_unprepare(fbi->clk);
771failed_free_fbmem: 771failed_free_fbmem:
772 dma_free_coherent(fbi->dev, info->fix.smem_len, 772 dma_free_coherent(fbi->dev, info->fix.smem_len,
773 info->screen_base, fbi->fb_start_dma); 773 info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len), 807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
808 info->screen_base, info->fix.smem_start); 808 info->screen_base, info->fix.smem_start);
809 809
810 clk_disable(fbi->clk); 810 clk_disable_unprepare(fbi->clk);
811 811
812 framebuffer_release(info); 812 framebuffer_release(info);
813 813
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 045e8afe398b..9e88e3f594c2 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1157 dev_name); 1157 dev_name);
1158 goto out_err0; 1158 goto out_err0;
1159 } 1159 }
1160 /* fall though */ 1160 /* fall through */
1161 case S9000_ID_ARTIST: 1161 case S9000_ID_ARTIST:
1162 case S9000_ID_HCRX: 1162 case S9000_ID_HCRX:
1163 case S9000_ID_TIMBER: 1163 case S9000_ID_TIMBER:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
79 This value is used to allocate enough space in internal 79 This value is used to allocate enough space in internal
80 tables needed for physical memory administration. 80 tables needed for physical memory administration.
81 81
82config XEN_SCRUB_PAGES 82config XEN_SCRUB_PAGES_DEFAULT
83 bool "Scrub pages before returning them to system" 83 bool "Scrub pages before returning them to system by default"
84 depends on XEN_BALLOON 84 depends on XEN_BALLOON
85 default y 85 default y
86 help 86 help
87 Scrub pages before returning them to the system for reuse by 87 Scrub pages before returning them to the system for reuse by
88 other domains. This makes sure that any confidential data 88 other domains. This makes sure that any confidential data
89 is not accidentally visible to other domains. Is it more 89 is not accidentally visible to other domains. Is it more
90 secure, but slightly less efficient. 90 secure, but slightly less efficient. This can be controlled with
91 xen_scrub_pages=0 parameter and
92 /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
93 This option only sets the default value.
94
91 If in doubt, say yes. 95 If in doubt, say yes.
92 96
93config XEN_DEV_EVTCHN 97config XEN_DEV_EVTCHN
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
19 19
20static void disable_hotplug_cpu(int cpu) 20static void disable_hotplug_cpu(int cpu)
21{ 21{
22 if (cpu_online(cpu)) { 22 if (!cpu_is_hotpluggable(cpu))
23 lock_device_hotplug(); 23 return;
24 lock_device_hotplug();
25 if (cpu_online(cpu))
24 device_offline(get_cpu_device(cpu)); 26 device_offline(get_cpu_device(cpu));
25 unlock_device_hotplug(); 27 if (!cpu_online(cpu) && cpu_present(cpu)) {
26 }
27 if (cpu_present(cpu))
28 xen_arch_unregister_cpu(cpu); 28 xen_arch_unregister_cpu(cpu);
29 29 set_cpu_present(cpu, false);
30 set_cpu_present(cpu, false); 30 }
31 unlock_device_hotplug();
31} 32}
32 33
33static int vcpu_online(unsigned int cpu) 34static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
138 clear_evtchn_to_irq_row(row); 138 clear_evtchn_to_irq_row(row);
139 } 139 }
140 140
141 evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; 141 evtchn_to_irq[row][col] = irq;
142 return 0; 142 return 0;
143} 143}
144 144
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
492 return true; 492 return true;
493} 493}
494 494
495static void unmap_if_in_range(struct gntdev_grant_map *map, 495static int unmap_if_in_range(struct gntdev_grant_map *map,
496 unsigned long start, unsigned long end) 496 unsigned long start, unsigned long end,
497 bool blockable)
497{ 498{
498 unsigned long mstart, mend; 499 unsigned long mstart, mend;
499 int err; 500 int err;
500 501
502 if (!in_range(map, start, end))
503 return 0;
504
505 if (!blockable)
506 return -EAGAIN;
507
501 mstart = max(start, map->vma->vm_start); 508 mstart = max(start, map->vma->vm_start);
502 mend = min(end, map->vma->vm_end); 509 mend = min(end, map->vma->vm_end);
503 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", 510 pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
508 (mstart - map->vma->vm_start) >> PAGE_SHIFT, 515 (mstart - map->vma->vm_start) >> PAGE_SHIFT,
509 (mend - mstart) >> PAGE_SHIFT); 516 (mend - mstart) >> PAGE_SHIFT);
510 WARN_ON(err); 517 WARN_ON(err);
518
519 return 0;
511} 520}
512 521
513static int mn_invl_range_start(struct mmu_notifier *mn, 522static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
519 struct gntdev_grant_map *map; 528 struct gntdev_grant_map *map;
520 int ret = 0; 529 int ret = 0;
521 530
522 /* TODO do we really need a mutex here? */
523 if (blockable) 531 if (blockable)
524 mutex_lock(&priv->lock); 532 mutex_lock(&priv->lock);
525 else if (!mutex_trylock(&priv->lock)) 533 else if (!mutex_trylock(&priv->lock))
526 return -EAGAIN; 534 return -EAGAIN;
527 535
528 list_for_each_entry(map, &priv->maps, next) { 536 list_for_each_entry(map, &priv->maps, next) {
529 if (in_range(map, start, end)) { 537 ret = unmap_if_in_range(map, start, end, blockable);
530 ret = -EAGAIN; 538 if (ret)
531 goto out_unlock; 539 goto out_unlock;
532 }
533 unmap_if_in_range(map, start, end);
534 } 540 }
535 list_for_each_entry(map, &priv->freeable_maps, next) { 541 list_for_each_entry(map, &priv->freeable_maps, next) {
536 if (in_range(map, start, end)) { 542 ret = unmap_if_in_range(map, start, end, blockable);
537 ret = -EAGAIN; 543 if (ret)
538 goto out_unlock; 544 goto out_unlock;
539 }
540 unmap_if_in_range(map, start, end);
541 } 545 }
542 546
543out_unlock: 547out_unlock:
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7bafa703a992..84575baceebc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1040 return ret; 1040 return ret;
1041 1041
1042 for (i = 0; i < count; i++) { 1042 for (i = 0; i < count; i++) {
1043 /* Retry eagain maps */ 1043 switch (map_ops[i].status) {
1044 if (map_ops[i].status == GNTST_eagain) 1044 case GNTST_okay:
1045 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 1045 {
1046 &map_ops[i].status, __func__);
1047
1048 if (map_ops[i].status == GNTST_okay) {
1049 struct xen_page_foreign *foreign; 1046 struct xen_page_foreign *foreign;
1050 1047
1051 SetPageForeign(pages[i]); 1048 SetPageForeign(pages[i]);
1052 foreign = xen_page_foreign(pages[i]); 1049 foreign = xen_page_foreign(pages[i]);
1053 foreign->domid = map_ops[i].dom; 1050 foreign->domid = map_ops[i].dom;
1054 foreign->gref = map_ops[i].ref; 1051 foreign->gref = map_ops[i].ref;
1052 break;
1053 }
1054
1055 case GNTST_no_device_space:
1056 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1057 break;
1058
1059 case GNTST_eagain:
1060 /* Retry eagain maps */
1061 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1062 map_ops + i,
1063 &map_ops[i].status, __func__);
1064 /* Test status in next loop iteration. */
1065 i--;
1066 break;
1067
1068 default:
1069 break;
1055 } 1070 }
1056 } 1071 }
1057 1072
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
280 /* 280 /*
281 * The Xenstore watch fires directly after registering it and 281 * The Xenstore watch fires directly after registering it and
282 * after a suspend/resume cycle. So ENOENT is no error but 282 * after a suspend/resume cycle. So ENOENT is no error but
283 * might happen in those cases. 283 * might happen in those cases. ERANGE is observed when we get
284 * an empty value (''), this happens when we acknowledge the
285 * request by writing '\0' below.
284 */ 286 */
285 if (err != -ENOENT) 287 if (err != -ENOENT && err != -ERANGE)
286 pr_err("Error %d reading sysrq code in control/sysrq\n", 288 pr_err("Error %d reading sysrq code in control/sysrq\n",
287 err); 289 err);
288 xenbus_transaction_end(xbt, 1); 290 xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
14 14
15#include <xen/interface/memory.h> 15#include <xen/interface/memory.h>
16#include <xen/mem-reservation.h> 16#include <xen/mem-reservation.h>
17#include <linux/moduleparam.h>
18
19bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
17 21
18/* 22/*
19 * Use one extent per PAGE_SIZE to avoid to break down the page into 23 * Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
44#include <xen/xenbus.h> 44#include <xen/xenbus.h>
45#include <xen/features.h> 45#include <xen/features.h>
46#include <xen/page.h> 46#include <xen/page.h>
47#include <xen/mem-reservation.h>
47 48
48#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) 49#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
49 50
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
137static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); 138static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
138static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); 139static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
139static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); 140static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
141static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
140 142
141static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, 143static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
142 char *buf) 144 char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
203 &dev_attr_max_schedule_delay.attr.attr, 205 &dev_attr_max_schedule_delay.attr.attr,
204 &dev_attr_retry_count.attr.attr, 206 &dev_attr_retry_count.attr.attr,
205 &dev_attr_max_retry_count.attr.attr, 207 &dev_attr_max_retry_count.attr.attr,
208 &dev_attr_scrub_pages.attr.attr,
206 NULL 209 NULL
207}; 210};
208 211
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index f2088838f690..5b471889d723 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
402} 402}
403static DEVICE_ATTR_RO(modalias); 403static DEVICE_ATTR_RO(modalias);
404 404
405static ssize_t state_show(struct device *dev,
406 struct device_attribute *attr, char *buf)
407{
408 return sprintf(buf, "%s\n",
409 xenbus_strstate(to_xenbus_device(dev)->state));
410}
411static DEVICE_ATTR_RO(state);
412
405static struct attribute *xenbus_dev_attrs[] = { 413static struct attribute *xenbus_dev_attrs[] = {
406 &dev_attr_nodename.attr, 414 &dev_attr_nodename.attr,
407 &dev_attr_devtype.attr, 415 &dev_attr_devtype.attr,
408 &dev_attr_modalias.attr, 416 &dev_attr_modalias.attr,
417 &dev_attr_state.attr,
409 NULL, 418 NULL,
410}; 419};
411 420
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 0c3285c8db95..476dcbb79713 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
98 goto inval; 98 goto inval;
99 99
100 args = strchr(name, ' '); 100 args = strchr(name, ' ');
101 if (!args) 101 if (args) {
102 goto inval; 102 do {
103 do { 103 *args++ = 0;
104 *args++ = 0; 104 } while(*args == ' ');
105 } while(*args == ' '); 105 if (!*args)
106 if (!*args) 106 goto inval;
107 goto inval; 107 }
108 108
109 /* determine command to perform */ 109 /* determine command to perform */
110 _debug("cmd=%s name=%s args=%s", buf, name, args); 110 _debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
120 120
121 if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags)) 121 if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
122 afs_put_cell(net, cell); 122 afs_put_cell(net, cell);
123 printk("kAFS: Added new cell '%s'\n", name);
124 } else { 123 } else {
125 goto inval; 124 goto inval;
126 } 125 }
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 53af9f5253f4..2cddfe7806a4 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1280,6 +1280,7 @@ struct btrfs_root {
1280 int send_in_progress; 1280 int send_in_progress;
1281 struct btrfs_subvolume_writers *subv_writers; 1281 struct btrfs_subvolume_writers *subv_writers;
1282 atomic_t will_be_snapshotted; 1282 atomic_t will_be_snapshotted;
1283 atomic_t snapshot_force_cow;
1283 1284
1284 /* For qgroup metadata reserved space */ 1285 /* For qgroup metadata reserved space */
1285 spinlock_t qgroup_meta_rsv_lock; 1286 spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do { \
3390#define btrfs_debug(fs_info, fmt, args...) \ 3391#define btrfs_debug(fs_info, fmt, args...) \
3391 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3392 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3392#define btrfs_debug_in_rcu(fs_info, fmt, args...) \ 3393#define btrfs_debug_in_rcu(fs_info, fmt, args...) \
3393 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3394 btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
3394#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ 3395#define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
3395 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3396 btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
3396#define btrfs_debug_rl(fs_info, fmt, args...) \ 3397#define btrfs_debug_rl(fs_info, fmt, args...) \
3397 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) 3398 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
3398#endif 3399#endif
@@ -3404,6 +3405,13 @@ do { \
3404 rcu_read_unlock(); \ 3405 rcu_read_unlock(); \
3405} while (0) 3406} while (0)
3406 3407
3408#define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \
3409do { \
3410 rcu_read_lock(); \
3411 btrfs_no_printk(fs_info, fmt, ##args); \
3412 rcu_read_unlock(); \
3413} while (0)
3414
3407#define btrfs_printk_ratelimited(fs_info, fmt, args...) \ 3415#define btrfs_printk_ratelimited(fs_info, fmt, args...) \
3408do { \ 3416do { \
3409 static DEFINE_RATELIMIT_STATE(_rs, \ 3417 static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5124c15705ce..05dc3c17cb62 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1187 atomic_set(&root->log_batch, 0); 1187 atomic_set(&root->log_batch, 0);
1188 refcount_set(&root->refs, 1); 1188 refcount_set(&root->refs, 1);
1189 atomic_set(&root->will_be_snapshotted, 0); 1189 atomic_set(&root->will_be_snapshotted, 0);
1190 atomic_set(&root->snapshot_force_cow, 0);
1190 root->log_transid = 0; 1191 root->log_transid = 0;
1191 root->log_transid_committed = -1; 1192 root->log_transid_committed = -1;
1192 root->last_log_commit = 0; 1193 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index de6f75f5547b..2d9074295d7f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5800 * root: the root of the parent directory 5800 * root: the root of the parent directory
5801 * rsv: block reservation 5801 * rsv: block reservation
5802 * items: the number of items that we need do reservation 5802 * items: the number of items that we need do reservation
5803 * qgroup_reserved: used to return the reserved size in qgroup 5803 * use_global_rsv: allow fallback to the global block reservation
5804 * 5804 *
5805 * This function is used to reserve the space for snapshot/subvolume 5805 * This function is used to reserve the space for snapshot/subvolume
5806 * creation and deletion. Those operations are different with the 5806 * creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5810 * the space reservation mechanism in start_transaction(). 5810 * the space reservation mechanism in start_transaction().
5811 */ 5811 */
5812int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, 5812int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5813 struct btrfs_block_rsv *rsv, 5813 struct btrfs_block_rsv *rsv, int items,
5814 int items,
5815 bool use_global_rsv) 5814 bool use_global_rsv)
5816{ 5815{
5816 u64 qgroup_num_bytes = 0;
5817 u64 num_bytes; 5817 u64 num_bytes;
5818 int ret; 5818 int ret;
5819 struct btrfs_fs_info *fs_info = root->fs_info; 5819 struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5821 5821
5822 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { 5822 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
5823 /* One for parent inode, two for dir entries */ 5823 /* One for parent inode, two for dir entries */
5824 num_bytes = 3 * fs_info->nodesize; 5824 qgroup_num_bytes = 3 * fs_info->nodesize;
5825 ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); 5825 ret = btrfs_qgroup_reserve_meta_prealloc(root,
5826 qgroup_num_bytes, true);
5826 if (ret) 5827 if (ret)
5827 return ret; 5828 return ret;
5828 } else {
5829 num_bytes = 0;
5830 } 5829 }
5831 5830
5832 num_bytes = btrfs_calc_trans_metadata_size(fs_info, items); 5831 num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5838 if (ret == -ENOSPC && use_global_rsv) 5837 if (ret == -ENOSPC && use_global_rsv)
5839 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1); 5838 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
5840 5839
5841 if (ret && num_bytes) 5840 if (ret && qgroup_num_bytes)
5842 btrfs_qgroup_free_meta_prealloc(root, num_bytes); 5841 btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
5843 5842
5844 return ret; 5843 return ret;
5845} 5844}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9357a19d2bff..3ea5339603cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1271 u64 disk_num_bytes; 1271 u64 disk_num_bytes;
1272 u64 ram_bytes; 1272 u64 ram_bytes;
1273 int extent_type; 1273 int extent_type;
1274 int ret, err; 1274 int ret;
1275 int type; 1275 int type;
1276 int nocow; 1276 int nocow;
1277 int check_prev = 1; 1277 int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
1403 * if there are pending snapshots for this root, 1403 * if there are pending snapshots for this root,
1404 * we fall into common COW way. 1404 * we fall into common COW way.
1405 */ 1405 */
1406 if (!nolock) { 1406 if (!nolock && atomic_read(&root->snapshot_force_cow))
1407 err = btrfs_start_write_no_snapshotting(root); 1407 goto out_check;
1408 if (!err)
1409 goto out_check;
1410 }
1411 /* 1408 /*
1412 * force cow if csum exists in the range. 1409 * force cow if csum exists in the range.
1413 * this ensure that csum for a given extent are 1410 * this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
1416 ret = csum_exist_in_range(fs_info, disk_bytenr, 1413 ret = csum_exist_in_range(fs_info, disk_bytenr,
1417 num_bytes); 1414 num_bytes);
1418 if (ret) { 1415 if (ret) {
1419 if (!nolock)
1420 btrfs_end_write_no_snapshotting(root);
1421
1422 /* 1416 /*
1423 * ret could be -EIO if the above fails to read 1417 * ret could be -EIO if the above fails to read
1424 * metadata. 1418 * metadata.
@@ -1431,11 +1425,8 @@ next_slot:
1431 WARN_ON_ONCE(nolock); 1425 WARN_ON_ONCE(nolock);
1432 goto out_check; 1426 goto out_check;
1433 } 1427 }
1434 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { 1428 if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
1435 if (!nolock)
1436 btrfs_end_write_no_snapshotting(root);
1437 goto out_check; 1429 goto out_check;
1438 }
1439 nocow = 1; 1430 nocow = 1;
1440 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1431 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1441 extent_end = found_key.offset + 1432 extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
1448out_check: 1439out_check:
1449 if (extent_end <= start) { 1440 if (extent_end <= start) {
1450 path->slots[0]++; 1441 path->slots[0]++;
1451 if (!nolock && nocow)
1452 btrfs_end_write_no_snapshotting(root);
1453 if (nocow) 1442 if (nocow)
1454 btrfs_dec_nocow_writers(fs_info, disk_bytenr); 1443 btrfs_dec_nocow_writers(fs_info, disk_bytenr);
1455 goto next_slot; 1444 goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
1471 end, page_started, nr_written, 1, 1460 end, page_started, nr_written, 1,
1472 NULL); 1461 NULL);
1473 if (ret) { 1462 if (ret) {
1474 if (!nolock && nocow)
1475 btrfs_end_write_no_snapshotting(root);
1476 if (nocow) 1463 if (nocow)
1477 btrfs_dec_nocow_writers(fs_info, 1464 btrfs_dec_nocow_writers(fs_info,
1478 disk_bytenr); 1465 disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
1492 ram_bytes, BTRFS_COMPRESS_NONE, 1479 ram_bytes, BTRFS_COMPRESS_NONE,
1493 BTRFS_ORDERED_PREALLOC); 1480 BTRFS_ORDERED_PREALLOC);
1494 if (IS_ERR(em)) { 1481 if (IS_ERR(em)) {
1495 if (!nolock && nocow)
1496 btrfs_end_write_no_snapshotting(root);
1497 if (nocow) 1482 if (nocow)
1498 btrfs_dec_nocow_writers(fs_info, 1483 btrfs_dec_nocow_writers(fs_info,
1499 disk_bytenr); 1484 disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
1532 EXTENT_CLEAR_DATA_RESV, 1517 EXTENT_CLEAR_DATA_RESV,
1533 PAGE_UNLOCK | PAGE_SET_PRIVATE2); 1518 PAGE_UNLOCK | PAGE_SET_PRIVATE2);
1534 1519
1535 if (!nolock && nocow)
1536 btrfs_end_write_no_snapshotting(root);
1537 cur_offset = extent_end; 1520 cur_offset = extent_end;
1538 1521
1539 /* 1522 /*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6639 drop_inode = 1; 6622 drop_inode = 1;
6640 } else { 6623 } else {
6641 struct dentry *parent = dentry->d_parent; 6624 struct dentry *parent = dentry->d_parent;
6625 int ret;
6626
6642 err = btrfs_update_inode(trans, root, inode); 6627 err = btrfs_update_inode(trans, root, inode);
6643 if (err) 6628 if (err)
6644 goto fail; 6629 goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6652 goto fail; 6637 goto fail;
6653 } 6638 }
6654 d_instantiate(dentry, inode); 6639 d_instantiate(dentry, inode);
6655 btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent); 6640 ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
6641 true, NULL);
6642 if (ret == BTRFS_NEED_TRANS_COMMIT) {
6643 err = btrfs_commit_transaction(trans);
6644 trans = NULL;
6645 }
6656 } 6646 }
6657 6647
6658fail: 6648fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
9388 u64 new_idx = 0; 9378 u64 new_idx = 0;
9389 u64 root_objectid; 9379 u64 root_objectid;
9390 int ret; 9380 int ret;
9391 int ret2;
9392 bool root_log_pinned = false; 9381 bool root_log_pinned = false;
9393 bool dest_log_pinned = false; 9382 bool dest_log_pinned = false;
9383 struct btrfs_log_ctx ctx_root;
9384 struct btrfs_log_ctx ctx_dest;
9385 bool sync_log_root = false;
9386 bool sync_log_dest = false;
9387 bool commit_transaction = false;
9394 9388
9395 /* we only allow rename subvolume link between subvolumes */ 9389 /* we only allow rename subvolume link between subvolumes */
9396 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) 9390 if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9397 return -EXDEV; 9391 return -EXDEV;
9398 9392
9393 btrfs_init_log_ctx(&ctx_root, old_inode);
9394 btrfs_init_log_ctx(&ctx_dest, new_inode);
9395
9399 /* close the race window with snapshot create/destroy ioctl */ 9396 /* close the race window with snapshot create/destroy ioctl */
9400 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9397 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9401 down_read(&fs_info->subvol_sem); 9398 down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
9542 9539
9543 if (root_log_pinned) { 9540 if (root_log_pinned) {
9544 parent = new_dentry->d_parent; 9541 parent = new_dentry->d_parent;
9545 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9542 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9546 parent); 9543 BTRFS_I(old_dir), parent,
9544 false, &ctx_root);
9545 if (ret == BTRFS_NEED_LOG_SYNC)
9546 sync_log_root = true;
9547 else if (ret == BTRFS_NEED_TRANS_COMMIT)
9548 commit_transaction = true;
9549 ret = 0;
9547 btrfs_end_log_trans(root); 9550 btrfs_end_log_trans(root);
9548 root_log_pinned = false; 9551 root_log_pinned = false;
9549 } 9552 }
9550 if (dest_log_pinned) { 9553 if (dest_log_pinned) {
9551 parent = old_dentry->d_parent; 9554 if (!commit_transaction) {
9552 btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir), 9555 parent = old_dentry->d_parent;
9553 parent); 9556 ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
9557 BTRFS_I(new_dir), parent,
9558 false, &ctx_dest);
9559 if (ret == BTRFS_NEED_LOG_SYNC)
9560 sync_log_dest = true;
9561 else if (ret == BTRFS_NEED_TRANS_COMMIT)
9562 commit_transaction = true;
9563 ret = 0;
9564 }
9554 btrfs_end_log_trans(dest); 9565 btrfs_end_log_trans(dest);
9555 dest_log_pinned = false; 9566 dest_log_pinned = false;
9556 } 9567 }
@@ -9583,8 +9594,26 @@ out_fail:
9583 dest_log_pinned = false; 9594 dest_log_pinned = false;
9584 } 9595 }
9585 } 9596 }
9586 ret2 = btrfs_end_transaction(trans); 9597 if (!ret && sync_log_root && !commit_transaction) {
9587 ret = ret ? ret : ret2; 9598 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
9599 &ctx_root);
9600 if (ret)
9601 commit_transaction = true;
9602 }
9603 if (!ret && sync_log_dest && !commit_transaction) {
9604 ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
9605 &ctx_dest);
9606 if (ret)
9607 commit_transaction = true;
9608 }
9609 if (commit_transaction) {
9610 ret = btrfs_commit_transaction(trans);
9611 } else {
9612 int ret2;
9613
9614 ret2 = btrfs_end_transaction(trans);
9615 ret = ret ? ret : ret2;
9616 }
9588out_notrans: 9617out_notrans:
9589 if (new_ino == BTRFS_FIRST_FREE_OBJECTID) 9618 if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
9590 up_read(&fs_info->subvol_sem); 9619 up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9661 int ret; 9690 int ret;
9662 u64 old_ino = btrfs_ino(BTRFS_I(old_inode)); 9691 u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
9663 bool log_pinned = false; 9692 bool log_pinned = false;
9693 struct btrfs_log_ctx ctx;
9694 bool sync_log = false;
9695 bool commit_transaction = false;
9664 9696
9665 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) 9697 if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9666 return -EPERM; 9698 return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9818 if (log_pinned) { 9850 if (log_pinned) {
9819 struct dentry *parent = new_dentry->d_parent; 9851 struct dentry *parent = new_dentry->d_parent;
9820 9852
9821 btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir), 9853 btrfs_init_log_ctx(&ctx, old_inode);
9822 parent); 9854 ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
9855 BTRFS_I(old_dir), parent,
9856 false, &ctx);
9857 if (ret == BTRFS_NEED_LOG_SYNC)
9858 sync_log = true;
9859 else if (ret == BTRFS_NEED_TRANS_COMMIT)
9860 commit_transaction = true;
9861 ret = 0;
9823 btrfs_end_log_trans(root); 9862 btrfs_end_log_trans(root);
9824 log_pinned = false; 9863 log_pinned = false;
9825 } 9864 }
@@ -9856,7 +9895,19 @@ out_fail:
9856 btrfs_end_log_trans(root); 9895 btrfs_end_log_trans(root);
9857 log_pinned = false; 9896 log_pinned = false;
9858 } 9897 }
9859 btrfs_end_transaction(trans); 9898 if (!ret && sync_log) {
9899 ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
9900 if (ret)
9901 commit_transaction = true;
9902 }
9903 if (commit_transaction) {
9904 ret = btrfs_commit_transaction(trans);
9905 } else {
9906 int ret2;
9907
9908 ret2 = btrfs_end_transaction(trans);
9909 ret = ret ? ret : ret2;
9910 }
9860out_notrans: 9911out_notrans:
9861 if (old_ino == BTRFS_FIRST_FREE_OBJECTID) 9912 if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9862 up_read(&fs_info->subvol_sem); 9913 up_read(&fs_info->subvol_sem);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 63600dc2ac4c..d60b6caf09e8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
747 struct btrfs_pending_snapshot *pending_snapshot; 747 struct btrfs_pending_snapshot *pending_snapshot;
748 struct btrfs_trans_handle *trans; 748 struct btrfs_trans_handle *trans;
749 int ret; 749 int ret;
750 bool snapshot_force_cow = false;
750 751
751 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) 752 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
752 return -EINVAL; 753 return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
763 goto free_pending; 764 goto free_pending;
764 } 765 }
765 766
767 /*
768 * Force new buffered writes to reserve space even when NOCOW is
769 * possible. This is to avoid later writeback (running dealloc) to
770 * fallback to COW mode and unexpectedly fail with ENOSPC.
771 */
766 atomic_inc(&root->will_be_snapshotted); 772 atomic_inc(&root->will_be_snapshotted);
767 smp_mb__after_atomic(); 773 smp_mb__after_atomic();
768 /* wait for no snapshot writes */ 774 /* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
773 if (ret) 779 if (ret)
774 goto dec_and_free; 780 goto dec_and_free;
775 781
782 /*
783 * All previous writes have started writeback in NOCOW mode, so now
784 * we force future writes to fallback to COW mode during snapshot
785 * creation.
786 */
787 atomic_inc(&root->snapshot_force_cow);
788 snapshot_force_cow = true;
789
776 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); 790 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
777 791
778 btrfs_init_block_rsv(&pending_snapshot->block_rsv, 792 btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
837fail: 851fail:
838 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); 852 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
839dec_and_free: 853dec_and_free:
854 if (snapshot_force_cow)
855 atomic_dec(&root->snapshot_force_cow);
840 if (atomic_dec_and_test(&root->will_be_snapshotted)) 856 if (atomic_dec_and_test(&root->will_be_snapshotted))
841 wake_up_var(&root->will_be_snapshotted); 857 wake_up_var(&root->will_be_snapshotted);
842free_pending: 858free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3453 3469
3454 same_lock_start = min_t(u64, loff, dst_loff); 3470 same_lock_start = min_t(u64, loff, dst_loff);
3455 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; 3471 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3472 } else {
3473 /*
3474 * If the source and destination inodes are different, the
3475 * source's range end offset matches the source's i_size, that
3476 * i_size is not a multiple of the sector size, and the
3477 * destination range does not go past the destination's i_size,
3478 * we must round down the length to the nearest sector size
3479 * multiple. If we don't do this adjustment we end replacing
3480 * with zeroes the bytes in the range that starts at the
3481 * deduplication range's end offset and ends at the next sector
3482 * size multiple.
3483 */
3484 if (loff + olen == i_size_read(src) &&
3485 dst_loff + len < i_size_read(dst)) {
3486 const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3487
3488 len = round_down(i_size_read(src), sz) - loff;
3489 olen = len;
3490 }
3456 } 3491 }
3457 3492
3458again: 3493again:
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 4353bb69bb86..d4917c0cddf5 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1019,10 +1019,9 @@ out_add_root:
1019 spin_unlock(&fs_info->qgroup_lock); 1019 spin_unlock(&fs_info->qgroup_lock);
1020 1020
1021 ret = btrfs_commit_transaction(trans); 1021 ret = btrfs_commit_transaction(trans);
1022 if (ret) { 1022 trans = NULL;
1023 trans = NULL; 1023 if (ret)
1024 goto out_free_path; 1024 goto out_free_path;
1025 }
1026 1025
1027 ret = qgroup_rescan_init(fs_info, 0, 1); 1026 ret = qgroup_rescan_init(fs_info, 0, 1);
1028 if (!ret) { 1027 if (!ret) {
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1650dc44a5e3..3c2ae0e4f25a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6025 * Call this after adding a new name for a file and it will properly 6025 * Call this after adding a new name for a file and it will properly
6026 * update the log to reflect the new name. 6026 * update the log to reflect the new name.
6027 * 6027 *
6028 * It will return zero if all goes well, and it will return 1 if a 6028 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6029 * full transaction commit is required. 6029 * true (because it's not used).
6030 *
6031 * Return value depends on whether @sync_log is true or false.
6032 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6033 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6034 * otherwise.
6035 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6036 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6037 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6038 * committed (without attempting to sync the log).
6030 */ 6039 */
6031int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6040int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6032 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6041 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6033 struct dentry *parent) 6042 struct dentry *parent,
6043 bool sync_log, struct btrfs_log_ctx *ctx)
6034{ 6044{
6035 struct btrfs_fs_info *fs_info = trans->fs_info; 6045 struct btrfs_fs_info *fs_info = trans->fs_info;
6046 int ret;
6036 6047
6037 /* 6048 /*
6038 * this will force the logging code to walk the dentry chain 6049 * this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6047 */ 6058 */
6048 if (inode->logged_trans <= fs_info->last_trans_committed && 6059 if (inode->logged_trans <= fs_info->last_trans_committed &&
6049 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6060 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6050 return 0; 6061 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6062 BTRFS_DONT_NEED_LOG_SYNC;
6063
6064 if (sync_log) {
6065 struct btrfs_log_ctx ctx2;
6066
6067 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6068 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6069 LOG_INODE_EXISTS, &ctx2);
6070 if (ret == BTRFS_NO_LOG_SYNC)
6071 return BTRFS_DONT_NEED_TRANS_COMMIT;
6072 else if (ret)
6073 return BTRFS_NEED_TRANS_COMMIT;
6074
6075 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6076 if (ret)
6077 return BTRFS_NEED_TRANS_COMMIT;
6078 return BTRFS_DONT_NEED_TRANS_COMMIT;
6079 }
6080
6081 ASSERT(ctx);
6082 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6083 LOG_INODE_EXISTS, ctx);
6084 if (ret == BTRFS_NO_LOG_SYNC)
6085 return BTRFS_DONT_NEED_LOG_SYNC;
6086 else if (ret)
6087 return BTRFS_NEED_TRANS_COMMIT;
6051 6088
6052 return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6089 return BTRFS_NEED_LOG_SYNC;
6053 LOG_INODE_EXISTS, NULL);
6054} 6090}
6055 6091
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 122e68b89a5a..7ab9bb88a639 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
71 int for_rename); 71 int for_rename);
72void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 72void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
73 struct btrfs_inode *dir); 73 struct btrfs_inode *dir);
74/* Return values for btrfs_log_new_name() */
75enum {
76 BTRFS_DONT_NEED_TRANS_COMMIT,
77 BTRFS_NEED_TRANS_COMMIT,
78 BTRFS_DONT_NEED_LOG_SYNC,
79 BTRFS_NEED_LOG_SYNC,
80};
74int btrfs_log_new_name(struct btrfs_trans_handle *trans, 81int btrfs_log_new_name(struct btrfs_trans_handle *trans,
75 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 82 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
76 struct dentry *parent); 83 struct dentry *parent,
84 bool sync_log, struct btrfs_log_ctx *ctx);
77 85
78#endif 86#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index da86706123ff..f4405e430da6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4491,7 +4491,12 @@ again:
4491 4491
4492 /* Now btrfs_update_device() will change the on-disk size. */ 4492 /* Now btrfs_update_device() will change the on-disk size. */
4493 ret = btrfs_update_device(trans, device); 4493 ret = btrfs_update_device(trans, device);
4494 btrfs_end_transaction(trans); 4494 if (ret < 0) {
4495 btrfs_abort_transaction(trans, ret);
4496 btrfs_end_transaction(trans);
4497 } else {
4498 ret = btrfs_commit_transaction(trans);
4499 }
4495done: 4500done:
4496 btrfs_free_path(path); 4501 btrfs_free_path(path);
4497 if (ret) { 4502 if (ret) {
diff --git a/fs/buffer.c b/fs/buffer.c
index 4cc679d5bf58..6f1ae3ac9789 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -39,7 +39,6 @@
39#include <linux/buffer_head.h> 39#include <linux/buffer_head.h>
40#include <linux/task_io_accounting_ops.h> 40#include <linux/task_io_accounting_ops.h>
41#include <linux/bio.h> 41#include <linux/bio.h>
42#include <linux/notifier.h>
43#include <linux/cpu.h> 42#include <linux/cpu.h>
44#include <linux/bitops.h> 43#include <linux/bitops.h>
45#include <linux/mpage.h> 44#include <linux/mpage.h>
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 43ca3b763875..eab1359d0553 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
602 602
603/* 603/*
604 * create a new fs client 604 * create a new fs client
605 *
606 * Success or not, this function consumes @fsopt and @opt.
605 */ 607 */
606static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, 608static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
607 struct ceph_options *opt) 609 struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
609 struct ceph_fs_client *fsc; 611 struct ceph_fs_client *fsc;
610 int page_count; 612 int page_count;
611 size_t size; 613 size_t size;
612 int err = -ENOMEM; 614 int err;
613 615
614 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); 616 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
615 if (!fsc) 617 if (!fsc) {
616 return ERR_PTR(-ENOMEM); 618 err = -ENOMEM;
619 goto fail;
620 }
617 621
618 fsc->client = ceph_create_client(opt, fsc); 622 fsc->client = ceph_create_client(opt, fsc);
619 if (IS_ERR(fsc->client)) { 623 if (IS_ERR(fsc->client)) {
620 err = PTR_ERR(fsc->client); 624 err = PTR_ERR(fsc->client);
621 goto fail; 625 goto fail;
622 } 626 }
627 opt = NULL; /* fsc->client now owns this */
623 628
624 fsc->client->extra_mon_dispatch = extra_mon_dispatch; 629 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
625 fsc->client->osdc.abort_on_full = true; 630 fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
677 ceph_destroy_client(fsc->client); 682 ceph_destroy_client(fsc->client);
678fail: 683fail:
679 kfree(fsc); 684 kfree(fsc);
685 if (opt)
686 ceph_destroy_options(opt);
687 destroy_mount_options(fsopt);
680 return ERR_PTR(err); 688 return ERR_PTR(err);
681} 689}
682 690
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
1042 fsc = create_fs_client(fsopt, opt); 1050 fsc = create_fs_client(fsopt, opt);
1043 if (IS_ERR(fsc)) { 1051 if (IS_ERR(fsc)) {
1044 res = ERR_CAST(fsc); 1052 res = ERR_CAST(fsc);
1045 destroy_mount_options(fsopt);
1046 ceph_destroy_options(opt);
1047 goto out_final; 1053 goto out_final;
1048 } 1054 }
1049 1055
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 35c83fe7dba0..abcd78e332fe 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -6,6 +6,7 @@ config CIFS
6 select CRYPTO_MD4 6 select CRYPTO_MD4
7 select CRYPTO_MD5 7 select CRYPTO_MD5
8 select CRYPTO_SHA256 8 select CRYPTO_SHA256
9 select CRYPTO_SHA512
9 select CRYPTO_CMAC 10 select CRYPTO_CMAC
10 select CRYPTO_HMAC 11 select CRYPTO_HMAC
11 select CRYPTO_ARC4 12 select CRYPTO_ARC4
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index b380e0871372..a2b2355e7f01 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
105 case SFM_LESSTHAN: 105 case SFM_LESSTHAN:
106 *target = '<'; 106 *target = '<';
107 break; 107 break;
108 case SFM_SLASH:
109 *target = '\\';
110 break;
111 case SFM_SPACE: 108 case SFM_SPACE:
112 *target = ' '; 109 *target = ' ';
113 break; 110 break;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index dc2f4cf08fe9..5657b79dbc99 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
601 } 601 }
602 602
603 count = 0; 603 count = 0;
604 /*
605 * We know that all the name entries in the protocols array
606 * are short (< 16 bytes anyway) and are NUL terminated.
607 */
604 for (i = 0; i < CIFS_NUM_PROT; i++) { 608 for (i = 0; i < CIFS_NUM_PROT; i++) {
605 strncpy(pSMB->DialectsArray+count, protocols[i].name, 16); 609 size_t len = strlen(protocols[i].name) + 1;
606 count += strlen(protocols[i].name) + 1; 610
607 /* null at end of source and target buffers anyway */ 611 memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
612 count += len;
608 } 613 }
609 inc_rfc1001_len(pSMB, count); 614 inc_rfc1001_len(pSMB, count);
610 pSMB->ByteCount = cpu_to_le16(count); 615 pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index c832a8a1970a..7aa08dba4719 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2547,7 +2547,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
2547 if (tcon == NULL) 2547 if (tcon == NULL)
2548 return -ENOMEM; 2548 return -ENOMEM;
2549 2549
2550 snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName); 2550 snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
2551 2551
2552 /* cannot fail */ 2552 /* cannot fail */
2553 nls_codepage = load_nls_default(); 2553 nls_codepage = load_nls_default();
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d32eaa4b2437..6e8765f44508 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
467 oparms.cifs_sb = cifs_sb; 467 oparms.cifs_sb = cifs_sb;
468 oparms.desired_access = GENERIC_READ; 468 oparms.desired_access = GENERIC_READ;
469 oparms.create_options = CREATE_NOT_DIR; 469 oparms.create_options = CREATE_NOT_DIR;
470 if (backup_cred(cifs_sb))
471 oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
470 oparms.disposition = FILE_OPEN; 472 oparms.disposition = FILE_OPEN;
471 oparms.path = path; 473 oparms.path = path;
472 oparms.fid = &fid; 474 oparms.fid = &fid;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index dacb2c05674c..6926685e513c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
402 (struct smb_com_transaction_change_notify_rsp *)buf; 402 (struct smb_com_transaction_change_notify_rsp *)buf;
403 struct file_notify_information *pnotify; 403 struct file_notify_information *pnotify;
404 __u32 data_offset = 0; 404 __u32 data_offset = 0;
405 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
406
405 if (get_bcc(buf) > sizeof(struct file_notify_information)) { 407 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
406 data_offset = le32_to_cpu(pSMBr->DataOffset); 408 data_offset = le32_to_cpu(pSMBr->DataOffset);
407 409
410 if (data_offset >
411 len - sizeof(struct file_notify_information)) {
412 cifs_dbg(FYI, "invalid data_offset %u\n",
413 data_offset);
414 return true;
415 }
408 pnotify = (struct file_notify_information *) 416 pnotify = (struct file_notify_information *)
409 ((char *)&pSMBr->hdr.Protocol + data_offset); 417 ((char *)&pSMBr->hdr.Protocol + data_offset);
410 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n", 418 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index eeab81c9452f..e169e1a5fd35 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
376 376
377 new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + 377 new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
378 pfData->FileNameLength; 378 pfData->FileNameLength;
379 } else 379 } else {
380 new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); 380 u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
381
382 if (old_entry + next_offset < old_entry) {
383 cifs_dbg(VFS, "invalid offset %u\n", next_offset);
384 return NULL;
385 }
386 new_entry = old_entry + next_offset;
387 }
381 cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); 388 cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
382 /* validate that new_entry is not past end of SMB */ 389 /* validate that new_entry is not past end of SMB */
383 if (new_entry >= end_of_smb) { 390 if (new_entry >= end_of_smb) {
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index db0453660ff6..6a9c47541c53 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
248 * MacOS server pads after SMB2.1 write response with 3 bytes 248 * MacOS server pads after SMB2.1 write response with 3 bytes
249 * of junk. Other servers match RFC1001 len to actual 249 * of junk. Other servers match RFC1001 len to actual
250 * SMB2/SMB3 frame length (header + smb2 response specific data) 250 * SMB2/SMB3 frame length (header + smb2 response specific data)
251 * Some windows servers do too when compounding is used. 251 * Some windows servers also pad up to 8 bytes when compounding.
252 * Log the server error (once), but allow it and continue 252 * If pad is longer than eight bytes, log the server behavior
253 * (once), since may indicate a problem but allow it and continue
253 * since the frame is parseable. 254 * since the frame is parseable.
254 */ 255 */
255 if (clc_len < len) { 256 if (clc_len < len) {
256 printk_once(KERN_WARNING 257 pr_warn_once(
257 "SMB2 server sent bad RFC1001 len %d not %d\n", 258 "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
258 len, clc_len); 259 len, clc_len, command, mid);
259 return 0; 260 return 0;
260 } 261 }
262 pr_warn_once(
263 "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
264 len, clc_len, command, mid);
261 265
262 return 1; 266 return 1;
263 } 267 }
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 247a98e6c856..d954ce36b473 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
630 oparms.tcon = tcon; 630 oparms.tcon = tcon;
631 oparms.desired_access = FILE_READ_ATTRIBUTES; 631 oparms.desired_access = FILE_READ_ATTRIBUTES;
632 oparms.disposition = FILE_OPEN; 632 oparms.disposition = FILE_OPEN;
633 oparms.create_options = 0; 633 if (backup_cred(cifs_sb))
634 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
635 else
636 oparms.create_options = 0;
634 oparms.fid = &fid; 637 oparms.fid = &fid;
635 oparms.reconnect = false; 638 oparms.reconnect = false;
636 639
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
779 oparms.tcon = tcon; 782 oparms.tcon = tcon;
780 oparms.desired_access = FILE_READ_EA; 783 oparms.desired_access = FILE_READ_EA;
781 oparms.disposition = FILE_OPEN; 784 oparms.disposition = FILE_OPEN;
782 oparms.create_options = 0; 785 if (backup_cred(cifs_sb))
786 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
787 else
788 oparms.create_options = 0;
783 oparms.fid = &fid; 789 oparms.fid = &fid;
784 oparms.reconnect = false; 790 oparms.reconnect = false;
785 791
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
858 oparms.tcon = tcon; 864 oparms.tcon = tcon;
859 oparms.desired_access = FILE_WRITE_EA; 865 oparms.desired_access = FILE_WRITE_EA;
860 oparms.disposition = FILE_OPEN; 866 oparms.disposition = FILE_OPEN;
861 oparms.create_options = 0; 867 if (backup_cred(cifs_sb))
868 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
869 else
870 oparms.create_options = 0;
862 oparms.fid = &fid; 871 oparms.fid = &fid;
863 oparms.reconnect = false; 872 oparms.reconnect = false;
864 873
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
1453 oparms.tcon = tcon; 1462 oparms.tcon = tcon;
1454 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA; 1463 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
1455 oparms.disposition = FILE_OPEN; 1464 oparms.disposition = FILE_OPEN;
1456 oparms.create_options = 0; 1465 if (backup_cred(cifs_sb))
1466 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1467 else
1468 oparms.create_options = 0;
1457 oparms.fid = fid; 1469 oparms.fid = fid;
1458 oparms.reconnect = false; 1470 oparms.reconnect = false;
1459 1471
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
1857 oparms.tcon = tcon; 1869 oparms.tcon = tcon;
1858 oparms.desired_access = FILE_READ_ATTRIBUTES; 1870 oparms.desired_access = FILE_READ_ATTRIBUTES;
1859 oparms.disposition = FILE_OPEN; 1871 oparms.disposition = FILE_OPEN;
1860 oparms.create_options = 0; 1872 if (backup_cred(cifs_sb))
1873 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1874 else
1875 oparms.create_options = 0;
1861 oparms.fid = &fid; 1876 oparms.fid = &fid;
1862 oparms.reconnect = false; 1877 oparms.reconnect = false;
1863 1878
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
3639struct smb_version_values smb3any_values = { 3654struct smb_version_values smb3any_values = {
3640 .version_string = SMB3ANY_VERSION_STRING, 3655 .version_string = SMB3ANY_VERSION_STRING,
3641 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 3656 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3642 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3657 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3643 .large_lock_type = 0, 3658 .large_lock_type = 0,
3644 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3659 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3645 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, 3660 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
3660struct smb_version_values smbdefault_values = { 3675struct smb_version_values smbdefault_values = {
3661 .version_string = SMBDEFAULT_VERSION_STRING, 3676 .version_string = SMBDEFAULT_VERSION_STRING,
3662 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */ 3677 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3663 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3678 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3664 .large_lock_type = 0, 3679 .large_lock_type = 0,
3665 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3680 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3666 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, 3681 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
3681struct smb_version_values smb30_values = { 3696struct smb_version_values smb30_values = {
3682 .version_string = SMB30_VERSION_STRING, 3697 .version_string = SMB30_VERSION_STRING,
3683 .protocol_id = SMB30_PROT_ID, 3698 .protocol_id = SMB30_PROT_ID,
3684 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3699 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3685 .large_lock_type = 0, 3700 .large_lock_type = 0,
3686 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3701 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3687 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, 3702 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
3702struct smb_version_values smb302_values = { 3717struct smb_version_values smb302_values = {
3703 .version_string = SMB302_VERSION_STRING, 3718 .version_string = SMB302_VERSION_STRING,
3704 .protocol_id = SMB302_PROT_ID, 3719 .protocol_id = SMB302_PROT_ID,
3705 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3720 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3706 .large_lock_type = 0, 3721 .large_lock_type = 0,
3707 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3722 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3708 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, 3723 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
3723struct smb_version_values smb311_values = { 3738struct smb_version_values smb311_values = {
3724 .version_string = SMB311_VERSION_STRING, 3739 .version_string = SMB311_VERSION_STRING,
3725 .protocol_id = SMB311_PROT_ID, 3740 .protocol_id = SMB311_PROT_ID,
3726 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION, 3741 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3727 .large_lock_type = 0, 3742 .large_lock_type = 0,
3728 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK, 3743 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3729 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK, 3744 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 5740aa809be6..f54d07bda067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
2178 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) || 2178 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
2179 *oplock == SMB2_OPLOCK_LEVEL_NONE) 2179 *oplock == SMB2_OPLOCK_LEVEL_NONE)
2180 req->RequestedOplockLevel = *oplock; 2180 req->RequestedOplockLevel = *oplock;
2181 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2182 (oparms->create_options & CREATE_NOT_FILE))
2183 req->RequestedOplockLevel = *oplock; /* no srv lease support */
2181 else { 2184 else {
2182 rc = add_lease_context(server, iov, &n_iov, 2185 rc = add_lease_context(server, iov, &n_iov,
2183 oparms->fid->lease_key, oplock); 2186 oparms->fid->lease_key, oplock);
@@ -2456,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2456 /* We check for obvious errors in the output buffer length and offset */ 2459 /* We check for obvious errors in the output buffer length and offset */
2457 if (*plen == 0) 2460 if (*plen == 0)
2458 goto ioctl_exit; /* server returned no data */ 2461 goto ioctl_exit; /* server returned no data */
2459 else if (*plen > 0xFF00) { 2462 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
2460 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen); 2463 cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
2461 *plen = 0; 2464 *plen = 0;
2462 rc = -EIO; 2465 rc = -EIO;
2463 goto ioctl_exit; 2466 goto ioctl_exit;
2464 } 2467 }
2465 2468
2466 if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) { 2469 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
2467 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen, 2470 cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
2468 le32_to_cpu(rsp->OutputOffset)); 2471 le32_to_cpu(rsp->OutputOffset));
2469 *plen = 0; 2472 *plen = 0;
@@ -3574,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
3574 int len; 3577 int len;
3575 unsigned int entrycount = 0; 3578 unsigned int entrycount = 0;
3576 unsigned int next_offset = 0; 3579 unsigned int next_offset = 0;
3577 FILE_DIRECTORY_INFO *entryptr; 3580 char *entryptr;
3581 FILE_DIRECTORY_INFO *dir_info;
3578 3582
3579 if (bufstart == NULL) 3583 if (bufstart == NULL)
3580 return 0; 3584 return 0;
3581 3585
3582 entryptr = (FILE_DIRECTORY_INFO *)bufstart; 3586 entryptr = bufstart;
3583 3587
3584 while (1) { 3588 while (1) {
3585 entryptr = (FILE_DIRECTORY_INFO *) 3589 if (entryptr + next_offset < entryptr ||
3586 ((char *)entryptr + next_offset); 3590 entryptr + next_offset > end_of_buf ||
3587 3591 entryptr + next_offset + size > end_of_buf) {
3588 if ((char *)entryptr + size > end_of_buf) {
3589 cifs_dbg(VFS, "malformed search entry would overflow\n"); 3592 cifs_dbg(VFS, "malformed search entry would overflow\n");
3590 break; 3593 break;
3591 } 3594 }
3592 3595
3593 len = le32_to_cpu(entryptr->FileNameLength); 3596 entryptr = entryptr + next_offset;
3594 if ((char *)entryptr + len + size > end_of_buf) { 3597 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
3598
3599 len = le32_to_cpu(dir_info->FileNameLength);
3600 if (entryptr + len < entryptr ||
3601 entryptr + len > end_of_buf ||
3602 entryptr + len + size > end_of_buf) {
3595 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", 3603 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
3596 end_of_buf); 3604 end_of_buf);
3597 break; 3605 break;
3598 } 3606 }
3599 3607
3600 *lastentry = (char *)entryptr; 3608 *lastentry = entryptr;
3601 entrycount++; 3609 entrycount++;
3602 3610
3603 next_offset = le32_to_cpu(entryptr->NextEntryOffset); 3611 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
3604 if (!next_offset) 3612 if (!next_offset)
3605 break; 3613 break;
3606 } 3614 }
diff --git a/fs/dax.c b/fs/dax.c
index f32d7125ad0f..4becbf168b7f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
447 xa_unlock_irq(&mapping->i_pages); 447 xa_unlock_irq(&mapping->i_pages);
448 break; 448 break;
449 } else if (IS_ERR(entry)) { 449 } else if (IS_ERR(entry)) {
450 xa_unlock_irq(&mapping->i_pages);
450 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN); 451 WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
451 continue; 452 continue;
452 } 453 }
@@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1120{ 1121{
1121 struct inode *inode = mapping->host; 1122 struct inode *inode = mapping->host;
1122 unsigned long vaddr = vmf->address; 1123 unsigned long vaddr = vmf->address;
1123 vm_fault_t ret = VM_FAULT_NOPAGE; 1124 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1124 struct page *zero_page; 1125 vm_fault_t ret;
1125 pfn_t pfn;
1126
1127 zero_page = ZERO_PAGE(0);
1128 if (unlikely(!zero_page)) {
1129 ret = VM_FAULT_OOM;
1130 goto out;
1131 }
1132 1126
1133 pfn = page_to_pfn_t(zero_page);
1134 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, 1127 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1135 false); 1128 false);
1136 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); 1129 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1137out:
1138 trace_dax_load_hole(inode, vmf, ret); 1130 trace_dax_load_hole(inode, vmf, ret);
1139 return ret; 1131 return ret;
1140} 1132}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 7f7ee18fe179..e4bb9386c045 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1448 } 1448 }
1449 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 1449 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
1450 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 1450 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1451 ext2_set_inode_flags(inode);
1451 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 1452 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
1452 ei->i_frag_no = raw_inode->i_frag; 1453 ei->i_frag_no = raw_inode->i_frag;
1453 ei->i_frag_size = raw_inode->i_fsize; 1454 ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
1517 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 1518 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1518 } 1519 }
1519 brelse (bh); 1520 brelse (bh);
1520 ext2_set_inode_flags(inode);
1521 unlock_new_inode(inode); 1521 unlock_new_inode(inode);
1522 return inode; 1522 return inode;
1523 1523
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e2902d394f1b..f93f9881ec18 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
76 else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) 76 else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
77 error_msg = "rec_len is too small for name_len"; 77 error_msg = "rec_len is too small for name_len";
78 else if (unlikely(((char *) de - buf) + rlen > size)) 78 else if (unlikely(((char *) de - buf) + rlen > size))
79 error_msg = "directory entry across range"; 79 error_msg = "directory entry overrun";
80 else if (unlikely(le32_to_cpu(de->inode) > 80 else if (unlikely(le32_to_cpu(de->inode) >
81 le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) 81 le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
82 error_msg = "inode out of bounds"; 82 error_msg = "inode out of bounds";
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
85 85
86 if (filp) 86 if (filp)
87 ext4_error_file(filp, function, line, bh->b_blocknr, 87 ext4_error_file(filp, function, line, bh->b_blocknr,
88 "bad entry in directory: %s - offset=%u(%u), " 88 "bad entry in directory: %s - offset=%u, "
89 "inode=%u, rec_len=%d, name_len=%d", 89 "inode=%u, rec_len=%d, name_len=%d, size=%d",
90 error_msg, (unsigned) (offset % size), 90 error_msg, offset, le32_to_cpu(de->inode),
91 offset, le32_to_cpu(de->inode), 91 rlen, de->name_len, size);
92 rlen, de->name_len);
93 else 92 else
94 ext4_error_inode(dir, function, line, bh->b_blocknr, 93 ext4_error_inode(dir, function, line, bh->b_blocknr,
95 "bad entry in directory: %s - offset=%u(%u), " 94 "bad entry in directory: %s - offset=%u, "
96 "inode=%u, rec_len=%d, name_len=%d", 95 "inode=%u, rec_len=%d, name_len=%d, size=%d",
97 error_msg, (unsigned) (offset % size), 96 error_msg, offset, le32_to_cpu(de->inode),
98 offset, le32_to_cpu(de->inode), 97 rlen, de->name_len, size);
99 rlen, de->name_len);
100 98
101 return 1; 99 return 1;
102} 100}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0f0edd1cd0cd..caff935fbeb8 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -43,6 +43,17 @@
43#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION) 43#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
44#include <linux/fscrypt.h> 44#include <linux/fscrypt.h>
45 45
46#include <linux/compiler.h>
47
48/* Until this gets included into linux/compiler-gcc.h */
49#ifndef __nonstring
50#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
51#define __nonstring __attribute__((nonstring))
52#else
53#define __nonstring
54#endif
55#endif
56
46/* 57/*
47 * The fourth extended filesystem constants/structures 58 * The fourth extended filesystem constants/structures
48 */ 59 */
@@ -675,6 +686,9 @@ enum {
675/* Max physical block we can address w/o extents */ 686/* Max physical block we can address w/o extents */
676#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF 687#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
677 688
689/* Max logical block we can support */
690#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
691
678/* 692/*
679 * Structure of an inode on the disk 693 * Structure of an inode on the disk
680 */ 694 */
@@ -1226,7 +1240,7 @@ struct ext4_super_block {
1226 __le32 s_feature_ro_compat; /* readonly-compatible feature set */ 1240 __le32 s_feature_ro_compat; /* readonly-compatible feature set */
1227/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ 1241/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
1228/*78*/ char s_volume_name[16]; /* volume name */ 1242/*78*/ char s_volume_name[16]; /* volume name */
1229/*88*/ char s_last_mounted[64]; /* directory where last mounted */ 1243/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
1230/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */ 1244/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
1231 /* 1245 /*
1232 * Performance hints. Directory preallocation should only 1246 * Performance hints. Directory preallocation should only
@@ -1277,13 +1291,13 @@ struct ext4_super_block {
1277 __le32 s_first_error_time; /* first time an error happened */ 1291 __le32 s_first_error_time; /* first time an error happened */
1278 __le32 s_first_error_ino; /* inode involved in first error */ 1292 __le32 s_first_error_ino; /* inode involved in first error */
1279 __le64 s_first_error_block; /* block involved of first error */ 1293 __le64 s_first_error_block; /* block involved of first error */
1280 __u8 s_first_error_func[32]; /* function where the error happened */ 1294 __u8 s_first_error_func[32] __nonstring; /* function where the error happened */
1281 __le32 s_first_error_line; /* line number where error happened */ 1295 __le32 s_first_error_line; /* line number where error happened */
1282 __le32 s_last_error_time; /* most recent time of an error */ 1296 __le32 s_last_error_time; /* most recent time of an error */
1283 __le32 s_last_error_ino; /* inode involved in last error */ 1297 __le32 s_last_error_ino; /* inode involved in last error */
1284 __le32 s_last_error_line; /* line number where error happened */ 1298 __le32 s_last_error_line; /* line number where error happened */
1285 __le64 s_last_error_block; /* block involved of last error */ 1299 __le64 s_last_error_block; /* block involved of last error */
1286 __u8 s_last_error_func[32]; /* function where the error happened */ 1300 __u8 s_last_error_func[32] __nonstring; /* function where the error happened */
1287#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts) 1301#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
1288 __u8 s_mount_opts[64]; 1302 __u8 s_mount_opts[64];
1289 __le32 s_usr_quota_inum; /* inode for tracking user quota */ 1303 __le32 s_usr_quota_inum; /* inode for tracking user quota */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3543fe80a3c4..7b4736022761 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
1753{ 1753{
1754 int err, inline_size; 1754 int err, inline_size;
1755 struct ext4_iloc iloc; 1755 struct ext4_iloc iloc;
1756 size_t inline_len;
1756 void *inline_pos; 1757 void *inline_pos;
1757 unsigned int offset; 1758 unsigned int offset;
1758 struct ext4_dir_entry_2 *de; 1759 struct ext4_dir_entry_2 *de;
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
1780 goto out; 1781 goto out;
1781 } 1782 }
1782 1783
1784 inline_len = ext4_get_inline_size(dir);
1783 offset = EXT4_INLINE_DOTDOT_SIZE; 1785 offset = EXT4_INLINE_DOTDOT_SIZE;
1784 while (offset < dir->i_size) { 1786 while (offset < inline_len) {
1785 de = ext4_get_inline_entry(dir, &iloc, offset, 1787 de = ext4_get_inline_entry(dir, &iloc, offset,
1786 &inline_pos, &inline_size); 1788 &inline_pos, &inline_size);
1787 if (ext4_check_dir_entry(dir, NULL, de, 1789 if (ext4_check_dir_entry(dir, NULL, de,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d0dd585add6a..d767e993591d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3413{ 3413{
3414 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3414 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3415 unsigned int blkbits = inode->i_blkbits; 3415 unsigned int blkbits = inode->i_blkbits;
3416 unsigned long first_block = offset >> blkbits; 3416 unsigned long first_block, last_block;
3417 unsigned long last_block = (offset + length - 1) >> blkbits;
3418 struct ext4_map_blocks map; 3417 struct ext4_map_blocks map;
3419 bool delalloc = false; 3418 bool delalloc = false;
3420 int ret; 3419 int ret;
3421 3420
3421 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3422 return -EINVAL;
3423 first_block = offset >> blkbits;
3424 last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
3425 EXT4_MAX_LOGICAL_BLOCK);
3422 3426
3423 if (flags & IOMAP_REPORT) { 3427 if (flags & IOMAP_REPORT) {
3424 if (ext4_has_inline_data(inode)) { 3428 if (ext4_has_inline_data(inode)) {
@@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = {
3948 .writepages = ext4_dax_writepages, 3952 .writepages = ext4_dax_writepages,
3949 .direct_IO = noop_direct_IO, 3953 .direct_IO = noop_direct_IO,
3950 .set_page_dirty = noop_set_page_dirty, 3954 .set_page_dirty = noop_set_page_dirty,
3955 .bmap = ext4_bmap,
3951 .invalidatepage = noop_invalidatepage, 3956 .invalidatepage = noop_invalidatepage,
3952}; 3957};
3953 3958
@@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4192 return 0; 4197 return 0;
4193} 4198}
4194 4199
4195static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock) 4200static void ext4_wait_dax_page(struct ext4_inode_info *ei)
4196{ 4201{
4197 *did_unlock = true;
4198 up_write(&ei->i_mmap_sem); 4202 up_write(&ei->i_mmap_sem);
4199 schedule(); 4203 schedule();
4200 down_write(&ei->i_mmap_sem); 4204 down_write(&ei->i_mmap_sem);
@@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode)
4204{ 4208{
4205 struct ext4_inode_info *ei = EXT4_I(inode); 4209 struct ext4_inode_info *ei = EXT4_I(inode);
4206 struct page *page; 4210 struct page *page;
4207 bool retry;
4208 int error; 4211 int error;
4209 4212
4210 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem))) 4213 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
4211 return -EINVAL; 4214 return -EINVAL;
4212 4215
4213 do { 4216 do {
4214 retry = false;
4215 page = dax_layout_busy_page(inode->i_mapping); 4217 page = dax_layout_busy_page(inode->i_mapping);
4216 if (!page) 4218 if (!page)
4217 return 0; 4219 return 0;
@@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode)
4219 error = ___wait_var_event(&page->_refcount, 4221 error = ___wait_var_event(&page->_refcount,
4220 atomic_read(&page->_refcount) == 1, 4222 atomic_read(&page->_refcount) == 1,
4221 TASK_INTERRUPTIBLE, 0, 0, 4223 TASK_INTERRUPTIBLE, 0, 0,
4222 ext4_wait_dax_page(ei, &retry)); 4224 ext4_wait_dax_page(ei));
4223 } while (error == 0 && retry); 4225 } while (error == 0);
4224 4226
4225 return error; 4227 return error;
4226} 4228}
@@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4895 * not initialized on a new filesystem. */ 4897 * not initialized on a new filesystem. */
4896 } 4898 }
4897 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 4899 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4900 ext4_set_inode_flags(inode);
4898 inode->i_blocks = ext4_inode_blocks(raw_inode, ei); 4901 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4899 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo); 4902 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4900 if (ext4_has_feature_64bit(sb)) 4903 if (ext4_has_feature_64bit(sb))
@@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
5041 goto bad_inode; 5044 goto bad_inode;
5042 } 5045 }
5043 brelse(iloc.bh); 5046 brelse(iloc.bh);
5044 ext4_set_inode_flags(inode);
5045 5047
5046 unlock_new_inode(inode); 5048 unlock_new_inode(inode);
5047 return inode; 5049 return inode;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 39b07c2d3384..2305b4374fd3 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
49 */ 49 */
50 sb_start_write(sb); 50 sb_start_write(sb);
51 ext4_mmp_csum_set(sb, mmp); 51 ext4_mmp_csum_set(sb, mmp);
52 mark_buffer_dirty(bh);
53 lock_buffer(bh); 52 lock_buffer(bh);
54 bh->b_end_io = end_buffer_write_sync; 53 bh->b_end_io = end_buffer_write_sync;
55 get_bh(bh); 54 get_bh(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 116ff68c5bd4..377d516c475f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3478 int credits; 3478 int credits;
3479 u8 old_file_type; 3479 u8 old_file_type;
3480 3480
3481 if (new.inode && new.inode->i_nlink == 0) {
3482 EXT4_ERROR_INODE(new.inode,
3483 "target of rename is already freed");
3484 return -EFSCORRUPTED;
3485 }
3486
3481 if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) && 3487 if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
3482 (!projid_eq(EXT4_I(new_dir)->i_projid, 3488 (!projid_eq(EXT4_I(new_dir)->i_projid,
3483 EXT4_I(old_dentry->d_inode)->i_projid))) 3489 EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e5fb38451a73..ebbc663d0798 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -19,6 +19,7 @@
19 19
20int ext4_resize_begin(struct super_block *sb) 20int ext4_resize_begin(struct super_block *sb)
21{ 21{
22 struct ext4_sb_info *sbi = EXT4_SB(sb);
22 int ret = 0; 23 int ret = 0;
23 24
24 if (!capable(CAP_SYS_RESOURCE)) 25 if (!capable(CAP_SYS_RESOURCE))
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
29 * because the user tools have no way of handling this. Probably a 30 * because the user tools have no way of handling this. Probably a
30 * bad time to do it anyways. 31 * bad time to do it anyways.
31 */ 32 */
32 if (EXT4_SB(sb)->s_sbh->b_blocknr != 33 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
33 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { 34 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
34 ext4_warning(sb, "won't resize using backup superblock at %llu", 35 ext4_warning(sb, "won't resize using backup superblock at %llu",
35 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); 36 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1986,6 +1987,26 @@ retry:
1986 } 1987 }
1987 } 1988 }
1988 1989
1990 /*
1991 * Make sure the last group has enough space so that it's
1992 * guaranteed to have enough space for all metadata blocks
1993 * that it might need to hold. (We might not need to store
1994 * the inode table blocks in the last block group, but there
1995 * will be cases where this might be needed.)
1996 */
1997 if ((ext4_group_first_block_no(sb, n_group) +
1998 ext4_group_overhead_blocks(sb, n_group) + 2 +
1999 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2000 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2001 n_group--;
2002 n_blocks_count_retry = 0;
2003 if (resize_inode) {
2004 iput(resize_inode);
2005 resize_inode = NULL;
2006 }
2007 goto retry;
2008 }
2009
1989 /* extend the last group */ 2010 /* extend the last group */
1990 if (n_group == o_group) 2011 if (n_group == o_group)
1991 add = n_blocks_count - o_blocks_count; 2012 add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5863fd22e90b..1145109968ef 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
2145 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb); 2145 SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
2146 if (test_opt(sb, DATA_ERR_ABORT)) 2146 if (test_opt(sb, DATA_ERR_ABORT))
2147 SEQ_OPTS_PUTS("data_err=abort"); 2147 SEQ_OPTS_PUTS("data_err=abort");
2148 if (DUMMY_ENCRYPTION_ENABLED(sbi))
2149 SEQ_OPTS_PUTS("test_dummy_encryption");
2148 2150
2149 ext4_show_quota_options(seq, sb); 2151 ext4_show_quota_options(seq, sb);
2150 return 0; 2152 return 0;
@@ -4378,11 +4380,13 @@ no_journal:
4378 block = ext4_count_free_clusters(sb); 4380 block = ext4_count_free_clusters(sb);
4379 ext4_free_blocks_count_set(sbi->s_es, 4381 ext4_free_blocks_count_set(sbi->s_es,
4380 EXT4_C2B(sbi, block)); 4382 EXT4_C2B(sbi, block));
4383 ext4_superblock_csum_set(sb);
4381 err = percpu_counter_init(&sbi->s_freeclusters_counter, block, 4384 err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
4382 GFP_KERNEL); 4385 GFP_KERNEL);
4383 if (!err) { 4386 if (!err) {
4384 unsigned long freei = ext4_count_free_inodes(sb); 4387 unsigned long freei = ext4_count_free_inodes(sb);
4385 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei); 4388 sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
4389 ext4_superblock_csum_set(sb);
4386 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei, 4390 err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
4387 GFP_KERNEL); 4391 GFP_KERNEL);
4388 } 4392 }
diff --git a/fs/iomap.c b/fs/iomap.c
index 74762b1ec233..ec15cf2ec696 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1051 } else { 1051 } else {
1052 WARN_ON_ONCE(!PageUptodate(page)); 1052 WARN_ON_ONCE(!PageUptodate(page));
1053 iomap_page_create(inode, page); 1053 iomap_page_create(inode, page);
1054 set_page_dirty(page);
1054 } 1055 }
1055 1056
1056 return length; 1057 return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1090 length -= ret; 1091 length -= ret;
1091 } 1092 }
1092 1093
1093 set_page_dirty(page);
1094 wait_for_stable_page(page); 1094 wait_for_stable_page(page);
1095 return VM_FAULT_LOCKED; 1095 return VM_FAULT_LOCKED;
1096out_unlock: 1096out_unlock:
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ec3fba7d492f..488a9e7f8f66 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -24,6 +24,7 @@
24#include <linux/mpage.h> 24#include <linux/mpage.h>
25#include <linux/user_namespace.h> 25#include <linux/user_namespace.h>
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/blkdev.h>
27 28
28#include "isofs.h" 29#include "isofs.h"
29#include "zisofs.h" 30#include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
653 /* 654 /*
654 * What if bugger tells us to go beyond page size? 655 * What if bugger tells us to go beyond page size?
655 */ 656 */
657 if (bdev_logical_block_size(s->s_bdev) > 2048) {
658 printk(KERN_WARNING
659 "ISOFS: unsupported/invalid hardware sector size %d\n",
660 bdev_logical_block_size(s->s_bdev));
661 goto out_freesbi;
662 }
656 opt.blocksize = sb_min_blocksize(s, opt.blocksize); 663 opt.blocksize = sb_min_blocksize(s, opt.blocksize);
657 664
658 sbi->s_high_sierra = 0; /* default is iso9660 */ 665 sbi->s_high_sierra = 0; /* default is iso9660 */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 34830f6457ea..8220a168282e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
1637 write_sequnlock(&state->seqlock); 1637 write_sequnlock(&state->seqlock);
1638} 1638}
1639 1639
1640static void nfs_state_clear_delegation(struct nfs4_state *state)
1641{
1642 write_seqlock(&state->seqlock);
1643 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1644 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1645 write_sequnlock(&state->seqlock);
1646}
1647
1640static int update_open_stateid(struct nfs4_state *state, 1648static int update_open_stateid(struct nfs4_state *state,
1641 const nfs4_stateid *open_stateid, 1649 const nfs4_stateid *open_stateid,
1642 const nfs4_stateid *delegation, 1650 const nfs4_stateid *delegation,
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2145 if (IS_ERR(opendata)) 2153 if (IS_ERR(opendata))
2146 return PTR_ERR(opendata); 2154 return PTR_ERR(opendata);
2147 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2155 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2148 write_seqlock(&state->seqlock); 2156 nfs_state_clear_delegation(state);
2149 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2150 write_sequnlock(&state->seqlock);
2151 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2152 switch (type & (FMODE_READ|FMODE_WRITE)) { 2157 switch (type & (FMODE_READ|FMODE_WRITE)) {
2153 case FMODE_READ|FMODE_WRITE: 2158 case FMODE_READ|FMODE_WRITE:
2154 case FMODE_WRITE: 2159 case FMODE_WRITE:
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2601 const nfs4_stateid *stateid) 2606 const nfs4_stateid *stateid)
2602{ 2607{
2603 nfs_remove_bad_delegation(state->inode, stateid); 2608 nfs_remove_bad_delegation(state->inode, stateid);
2604 write_seqlock(&state->seqlock); 2609 nfs_state_clear_delegation(state);
2605 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2606 write_sequnlock(&state->seqlock);
2607 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2608} 2610}
2609 2611
2610static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2612static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2672 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2674 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2673 if (delegation == NULL) { 2675 if (delegation == NULL) {
2674 rcu_read_unlock(); 2676 rcu_read_unlock();
2677 nfs_state_clear_delegation(state);
2675 return; 2678 return;
2676 } 2679 }
2677 2680
2678 nfs4_stateid_copy(&stateid, &delegation->stateid); 2681 nfs4_stateid_copy(&stateid, &delegation->stateid);
2679 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) || 2682 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
2680 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2683 rcu_read_unlock();
2681 &delegation->flags)) { 2684 nfs_state_clear_delegation(state);
2685 return;
2686 }
2687
2688 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2689 &delegation->flags)) {
2682 rcu_read_unlock(); 2690 rcu_read_unlock();
2683 nfs_finish_clear_delegation_stateid(state, &stateid);
2684 return; 2691 return;
2685 } 2692 }
2686 2693
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3df0eb52da1c..40a08cd483f0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
1390 1390
1391 if (!nfs4_state_mark_reclaim_nograce(clp, state)) 1391 if (!nfs4_state_mark_reclaim_nograce(clp, state))
1392 return -EBADF; 1392 return -EBADF;
1393 nfs_inode_find_delegation_state_and_recover(state->inode,
1394 &state->stateid);
1393 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1395 dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
1394 clp->cl_hostname); 1396 clp->cl_hostname);
1395 nfs4_schedule_state_manager(clp); 1397 nfs4_schedule_state_manager(clp);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index a275fba93170..b1483b303e0b 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
1137 TP_fast_assign( 1137 TP_fast_assign(
1138 __entry->error = error; 1138 __entry->error = error;
1139 __entry->fhandle = nfs_fhandle_hash(fhandle); 1139 __entry->fhandle = nfs_fhandle_hash(fhandle);
1140 if (inode != NULL) { 1140 if (!IS_ERR_OR_NULL(inode)) {
1141 __entry->fileid = NFS_FILEID(inode); 1141 __entry->fileid = NFS_FILEID(inode);
1142 __entry->dev = inode->i_sb->s_dev; 1142 __entry->dev = inode->i_sb->s_dev;
1143 } else { 1143 } else {
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
1194 TP_fast_assign( 1194 TP_fast_assign(
1195 __entry->error = error; 1195 __entry->error = error;
1196 __entry->fhandle = nfs_fhandle_hash(fhandle); 1196 __entry->fhandle = nfs_fhandle_hash(fhandle);
1197 if (inode != NULL) { 1197 if (!IS_ERR_OR_NULL(inode)) {
1198 __entry->fileid = NFS_FILEID(inode); 1198 __entry->fileid = NFS_FILEID(inode);
1199 __entry->dev = inode->i_sb->s_dev; 1199 __entry->dev = inode->i_sb->s_dev;
1200 } else { 1200 } else {
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index e8f232de484f..7d9a51e6b847 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1740 return ret; 1740 return ret;
1741} 1741}
1742 1742
1743static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1743static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1744{ 1744{
1745 /* 1745 /*
1746 * send layoutcommit as it can hold up layoutreturn due to lseg 1746 * send layoutcommit as it can hold up layoutreturn due to lseg
1747 * reference 1747 * reference
1748 */ 1748 */
1749 pnfs_layoutcommit_inode(lo->plh_inode, false); 1749 pnfs_layoutcommit_inode(lo->plh_inode, false);
1750 return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, 1750 return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1751 nfs_wait_bit_killable, 1751 nfs_wait_bit_killable,
1752 TASK_UNINTERRUPTIBLE); 1752 TASK_KILLABLE);
1753} 1753}
1754 1754
1755static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo) 1755static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
1830 } 1830 }
1831 1831
1832lookup_again: 1832lookup_again:
1833 nfs4_client_recover_expired_lease(clp); 1833 lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
1834 if (IS_ERR(lseg))
1835 goto out;
1834 first = false; 1836 first = false;
1835 spin_lock(&ino->i_lock); 1837 spin_lock(&ino->i_lock);
1836 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); 1838 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1863,9 +1865,9 @@ lookup_again:
1863 if (list_empty(&lo->plh_segs) && 1865 if (list_empty(&lo->plh_segs) &&
1864 atomic_read(&lo->plh_outstanding) != 0) { 1866 atomic_read(&lo->plh_outstanding) != 0) {
1865 spin_unlock(&ino->i_lock); 1867 spin_unlock(&ino->i_lock);
1866 if (wait_var_event_killable(&lo->plh_outstanding, 1868 lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
1867 atomic_read(&lo->plh_outstanding) == 0 1869 atomic_read(&lo->plh_outstanding)));
1868 || !list_empty(&lo->plh_segs))) 1870 if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
1869 goto out_put_layout_hdr; 1871 goto out_put_layout_hdr;
1870 pnfs_put_layout_hdr(lo); 1872 pnfs_put_layout_hdr(lo);
1871 goto lookup_again; 1873 goto lookup_again;
@@ -1898,8 +1900,11 @@ lookup_again:
1898 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, 1900 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
1899 &lo->plh_flags)) { 1901 &lo->plh_flags)) {
1900 spin_unlock(&ino->i_lock); 1902 spin_unlock(&ino->i_lock);
1901 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, 1903 lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
1902 TASK_UNINTERRUPTIBLE); 1904 NFS_LAYOUT_FIRST_LAYOUTGET,
1905 TASK_KILLABLE));
1906 if (IS_ERR(lseg))
1907 goto out_put_layout_hdr;
1903 pnfs_put_layout_hdr(lo); 1908 pnfs_put_layout_hdr(lo);
1904 dprintk("%s retrying\n", __func__); 1909 dprintk("%s retrying\n", __func__);
1905 goto lookup_again; 1910 goto lookup_again;
@@ -1925,7 +1930,8 @@ lookup_again:
1925 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { 1930 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1926 spin_unlock(&ino->i_lock); 1931 spin_unlock(&ino->i_lock);
1927 dprintk("%s wait for layoutreturn\n", __func__); 1932 dprintk("%s wait for layoutreturn\n", __func__);
1928 if (pnfs_prepare_to_retry_layoutget(lo)) { 1933 lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1934 if (!IS_ERR(lseg)) {
1929 if (first) 1935 if (first)
1930 pnfs_clear_first_layoutget(lo); 1936 pnfs_clear_first_layoutget(lo);
1931 pnfs_put_layout_hdr(lo); 1937 pnfs_put_layout_hdr(lo);
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 03b8ba933eb2..235b959fc2b3 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * alloc.c - NILFS dat/inode allocator 3 * alloc.c - NILFS dat/inode allocator
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Originally written by Koji Sato. 7 * Originally written by Koji Sato.
17 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. 8 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
18 */ 9 */
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 05149e606a78..0303c3968cee 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator 3 * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Originally written by Koji Sato. 7 * Originally written by Koji Sato.
17 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. 8 * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
18 */ 9 */
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 01fb1831ca25..fb5a9a8a13cf 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * bmap.c - NILFS block mapping. 3 * bmap.c - NILFS block mapping.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 2b6ffbe5997a..2c63858e81c9 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * bmap.h - NILFS block mapping. 3 * bmap.h - NILFS block mapping.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index dec98cab729d..ebb24a314f43 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * btnode.c - NILFS B-tree node cache 3 * btnode.c - NILFS B-tree node cache
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Originally written by Seiji Kihara. 7 * Originally written by Seiji Kihara.
17 * Fully revised by Ryusuke Konishi for stabilization and simplification. 8 * Fully revised by Ryusuke Konishi for stabilization and simplification.
18 * 9 *
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 4e8aaa1aeb65..0f88dbc9bcb3 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * btnode.h - NILFS B-tree node cache 3 * btnode.h - NILFS B-tree node cache
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Seiji Kihara. 7 * Written by Seiji Kihara.
17 * Revised by Ryusuke Konishi. 8 * Revised by Ryusuke Konishi.
18 */ 9 */
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 16a7a67a11c9..23e043eca237 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * btree.c - NILFS B-tree. 3 * btree.c - NILFS B-tree.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 2184e47fa4bf..d1421b646ce4 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * btree.h - NILFS B-tree. 3 * btree.h - NILFS B-tree.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index a15a1601e931..8d41311b5db4 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * cpfile.c - NILFS checkpoint file. 3 * cpfile.c - NILFS checkpoint file.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index 6eca972f9673..6336222df24a 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * cpfile.h - NILFS checkpoint file. 3 * cpfile.h - NILFS checkpoint file.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index dffedb2f8817..6f4066636be9 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * dat.c - NILFS disk address translation. 3 * dat.c - NILFS disk address translation.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index 57dc6cf466d0..b17ee34580ae 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * dat.h - NILFS disk address translation. 3 * dat.h - NILFS disk address translation.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index 582831ab3eb9..81394e22d0a0 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * dir.c - NILFS directory entry operations 3 * dir.c - NILFS directory entry operations
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Modified for NILFS by Amagai Yoshiji. 7 * Modified for NILFS by Amagai Yoshiji.
17 */ 8 */
18/* 9/*
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 96e3ed0d9652..533e24ea3a88 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * direct.c - NILFS direct block pointer. 3 * direct.c - NILFS direct block pointer.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index cfe85e848bba..ec9a23c77994 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * direct.h - NILFS direct block pointer. 3 * direct.h - NILFS direct block pointer.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 7da0fac71dc2..64bc81363c6c 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * file.c - NILFS regular file handling primitives including fsync(). 3 * file.c - NILFS regular file handling primitives including fsync().
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Amagai Yoshiji and Ryusuke Konishi. 7 * Written by Amagai Yoshiji and Ryusuke Konishi.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 853a831dcde0..aa3c328ee189 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * gcinode.c - dummy inodes to buffer blocks for garbage collection 3 * gcinode.c - dummy inodes to buffer blocks for garbage collection
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. 7 * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
17 * Revised by Ryusuke Konishi. 8 * Revised by Ryusuke Konishi.
18 * 9 *
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index b8fa45c20c63..4140d232cadc 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * ifile.c - NILFS inode file 3 * ifile.c - NILFS inode file
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Amagai Yoshiji. 7 * Written by Amagai Yoshiji.
17 * Revised by Ryusuke Konishi. 8 * Revised by Ryusuke Konishi.
18 * 9 *
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 188b94fe0ec5..a1e1e5711a05 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * ifile.h - NILFS inode file 3 * ifile.h - NILFS inode file
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Amagai Yoshiji. 7 * Written by Amagai Yoshiji.
17 * Revised by Ryusuke Konishi. 8 * Revised by Ryusuke Konishi.
18 * 9 *
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 6a612d832e7d..671085512e0f 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * inode.c - NILFS inode operations. 3 * inode.c - NILFS inode operations.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 1d2c3d7711fe..9b96d79eea6c 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * ioctl.c - NILFS ioctl operations. 3 * ioctl.c - NILFS ioctl operations.
3 * 4 *
4 * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index c6bc1033e7d2..700870a92bc4 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * mdt.c - meta data file for NILFS 3 * mdt.c - meta data file for NILFS
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 3f67f3932097..e77aea4bb921 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * mdt.h - NILFS meta data file prototype and definitions 3 * mdt.h - NILFS meta data file prototype and definitions
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index dd52d3f82e8d..9fe6d4ab74f0 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * namei.c - NILFS pathname lookup operations. 3 * namei.c - NILFS pathname lookup operations.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. 7 * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
17 */ 8 */
18/* 9/*
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 33f8c8fc96e8..a2f247b6a209 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * nilfs.h - NILFS local header file. 3 * nilfs.h - NILFS local header file.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato and Ryusuke Konishi. 7 * Written by Koji Sato and Ryusuke Konishi.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 4cb850a6f1c2..329a056b73b1 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * page.c - buffer/page management specific to NILFS 3 * page.c - buffer/page management specific to NILFS
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi and Seiji Kihara. 7 * Written by Ryusuke Konishi and Seiji Kihara.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index f3687c958fa8..62b9bb469e92 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * page.h - buffer/page management specific to NILFS 3 * page.h - buffer/page management specific to NILFS
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi and Seiji Kihara. 7 * Written by Ryusuke Konishi and Seiji Kihara.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 5139efed1888..140b663e91c7 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * recovery.c - NILFS recovery logic 3 * recovery.c - NILFS recovery logic
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 68cb9e4740b4..20c479b5e41b 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * segbuf.c - NILFS segment buffer 3 * segbuf.c - NILFS segment buffer
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 10e16935fff6..9bea1bd59041 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * segbuf.h - NILFS Segment buffer prototypes and definitions 3 * segbuf.h - NILFS Segment buffer prototypes and definitions
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0953635e7d48..445eef41bfaf 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * segment.c - NILFS segment constructor. 3 * segment.c - NILFS segment constructor.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 04634e3e3d58..f5cf5308f3fc 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * segment.h - NILFS Segment constructor prototypes and definitions 3 * segment.h - NILFS Segment constructor prototypes and definitions
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c7fa139d50e8..bf3f8f05c89b 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * sufile.c - NILFS segment usage file. 3 * sufile.c - NILFS segment usage file.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 * Revised by Ryusuke Konishi. 8 * Revised by Ryusuke Konishi.
18 */ 9 */
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 673a891350f4..c4e2c7a7add1 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * sufile.h - NILFS segment usage file. 3 * sufile.h - NILFS segment usage file.
3 * 4 *
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Koji Sato. 7 * Written by Koji Sato.
17 */ 8 */
18 9
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1b9067cf4511..26290aa1023f 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * super.c - NILFS module and super block management. 3 * super.c - NILFS module and super block management.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 */ 8 */
18/* 9/*
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 4b25837e7724..e60be7bb55b0 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -1,19 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * sysfs.c - sysfs support implementation. 3 * sysfs.c - sysfs support implementation.
3 * 4 *
4 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
5 * Copyright (C) 2014 HGST, Inc., a Western Digital Company. 6 * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> 8 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
18 */ 9 */
19 10
diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h
index 648cedf9c06e..d001eb862dae 100644
--- a/fs/nilfs2/sysfs.h
+++ b/fs/nilfs2/sysfs.h
@@ -1,19 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * sysfs.h - sysfs support declarations. 3 * sysfs.h - sysfs support declarations.
3 * 4 *
4 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
5 * Copyright (C) 2014 HGST, Inc., a Western Digital Company. 6 * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com> 8 * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
18 */ 9 */
19 10
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 1a85317e83f0..484785cdf96e 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * the_nilfs.c - the_nilfs shared structure. 3 * the_nilfs.c - the_nilfs shared structure.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 36da1779f976..380a543c5b19 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -1,18 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
1/* 2/*
2 * the_nilfs.h - the_nilfs shared structure. 3 * the_nilfs.h - the_nilfs shared structure.
3 * 4 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi. 7 * Written by Ryusuke Konishi.
17 * 8 *
18 */ 9 */
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index f174397b63a0..ababdbfab537 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
351 351
352 iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); 352 iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
353 353
354 if ((mask & FS_MODIFY) || 354 iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
355 (test_mask & to_tell->i_fsnotify_mask)) { 355 fsnotify_first_mark(&to_tell->i_fsnotify_marks);
356 iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = 356 if (mnt) {
357 fsnotify_first_mark(&to_tell->i_fsnotify_marks);
358 }
359
360 if (mnt && ((mask & FS_MODIFY) ||
361 (test_mask & mnt->mnt_fsnotify_mask))) {
362 iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
363 fsnotify_first_mark(&to_tell->i_fsnotify_marks);
364 iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = 357 iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
365 fsnotify_first_mark(&mnt->mnt_fsnotify_marks); 358 fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
366 } 359 }
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 05506d60131c..59cdb27826de 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
132 struct fsnotify_mark *mark; 132 struct fsnotify_mark *mark;
133 133
134 assert_spin_locked(&conn->lock); 134 assert_spin_locked(&conn->lock);
135 /* We can get detached connector here when inode is getting unlinked. */
136 if (!fsnotify_valid_obj_type(conn->type))
137 return;
135 hlist_for_each_entry(mark, &conn->list, obj_list) { 138 hlist_for_each_entry(mark, &conn->list, obj_list) {
136 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) 139 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
137 new_mask |= mark->mask; 140 new_mask |= mark->mask;
138 } 141 }
139 if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
140 return;
141
142 *fsnotify_conn_mask_p(conn) = new_mask; 142 *fsnotify_conn_mask_p(conn) = new_mask;
143} 143}
144 144
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d9ebe11c8990..1d098c3c00e0 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
342 * for this bh as it's not marked locally 342 * for this bh as it's not marked locally
343 * uptodate. */ 343 * uptodate. */
344 status = -EIO; 344 status = -EIO;
345 clear_buffer_needs_validate(bh);
345 put_bh(bh); 346 put_bh(bh);
346 bhs[i] = NULL; 347 bhs[i] = NULL;
347 continue; 348 continue;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 32e9282893c9..aeaefd2a551b 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file)
131 if (IS_ERR(realfile)) 131 if (IS_ERR(realfile))
132 return PTR_ERR(realfile); 132 return PTR_ERR(realfile);
133 133
134 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
135 file->f_mapping = realfile->f_mapping;
136
137 file->private_data = realfile; 134 file->private_data = realfile;
138 135
139 return 0; 136 return 0;
@@ -334,6 +331,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
334 return ret; 331 return ret;
335} 332}
336 333
334static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
335{
336 struct fd real;
337 const struct cred *old_cred;
338 int ret;
339
340 ret = ovl_real_fdget(file, &real);
341 if (ret)
342 return ret;
343
344 old_cred = ovl_override_creds(file_inode(file)->i_sb);
345 ret = vfs_fadvise(real.file, offset, len, advice);
346 revert_creds(old_cred);
347
348 fdput(real);
349
350 return ret;
351}
352
337static long ovl_real_ioctl(struct file *file, unsigned int cmd, 353static long ovl_real_ioctl(struct file *file, unsigned int cmd,
338 unsigned long arg) 354 unsigned long arg)
339{ 355{
@@ -502,6 +518,7 @@ const struct file_operations ovl_file_operations = {
502 .fsync = ovl_fsync, 518 .fsync = ovl_fsync,
503 .mmap = ovl_mmap, 519 .mmap = ovl_mmap,
504 .fallocate = ovl_fallocate, 520 .fallocate = ovl_fallocate,
521 .fadvise = ovl_fadvise,
505 .unlocked_ioctl = ovl_ioctl, 522 .unlocked_ioctl = ovl_ioctl,
506 .compat_ioctl = ovl_compat_ioctl, 523 .compat_ioctl = ovl_compat_ioctl,
507 524
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index e0bb217c01e2..b6ac545b5a32 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
467 return -EOPNOTSUPP; 467 return -EOPNOTSUPP;
468 468
469 old_cred = ovl_override_creds(inode->i_sb); 469 old_cred = ovl_override_creds(inode->i_sb);
470
471 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
472 filemap_write_and_wait(realinode->i_mapping);
473
470 err = realinode->i_op->fiemap(realinode, fieinfo, start, len); 474 err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
471 revert_creds(old_cred); 475 revert_creds(old_cred);
472 476
@@ -500,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = {
500 .update_time = ovl_update_time, 504 .update_time = ovl_update_time,
501}; 505};
502 506
507const struct address_space_operations ovl_aops = {
508 /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
509 .direct_IO = noop_direct_IO,
510};
511
503/* 512/*
504 * It is possible to stack overlayfs instance on top of another 513 * It is possible to stack overlayfs instance on top of another
505 * overlayfs instance as lower layer. We need to annonate the 514 * overlayfs instance as lower layer. We need to annonate the
@@ -571,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
571 case S_IFREG: 580 case S_IFREG:
572 inode->i_op = &ovl_file_inode_operations; 581 inode->i_op = &ovl_file_inode_operations;
573 inode->i_fop = &ovl_file_operations; 582 inode->i_fop = &ovl_file_operations;
583 inode->i_mapping->a_ops = &ovl_aops;
574 break; 584 break;
575 585
576 case S_IFDIR: 586 case S_IFDIR:
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 2e0fc93c2c06..30adc9d408a0 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
982 if (err) 982 if (err)
983 goto out; 983 goto out;
984 984
985 err = -EBUSY;
986 if (ovl_inuse_trylock(upperpath->dentry)) {
987 ofs->upperdir_locked = true;
988 } else if (ofs->config.index) {
989 pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
990 goto out;
991 } else {
992 pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
993 }
994
995 upper_mnt = clone_private_mount(upperpath); 985 upper_mnt = clone_private_mount(upperpath);
996 err = PTR_ERR(upper_mnt); 986 err = PTR_ERR(upper_mnt);
997 if (IS_ERR(upper_mnt)) { 987 if (IS_ERR(upper_mnt)) {
@@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
1002 /* Don't inherit atime flags */ 992 /* Don't inherit atime flags */
1003 upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); 993 upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
1004 ofs->upper_mnt = upper_mnt; 994 ofs->upper_mnt = upper_mnt;
995
996 err = -EBUSY;
997 if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
998 ofs->upperdir_locked = true;
999 } else if (ofs->config.index) {
1000 pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
1001 goto out;
1002 } else {
1003 pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
1004 }
1005
1005 err = 0; 1006 err = 0;
1006out: 1007out:
1007 return err; 1008 return err;
@@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
1101 goto out; 1102 goto out;
1102 } 1103 }
1103 1104
1105 ofs->workbasedir = dget(workpath.dentry);
1106
1104 err = -EBUSY; 1107 err = -EBUSY;
1105 if (ovl_inuse_trylock(workpath.dentry)) { 1108 if (ovl_inuse_trylock(ofs->workbasedir)) {
1106 ofs->workdir_locked = true; 1109 ofs->workdir_locked = true;
1107 } else if (ofs->config.index) { 1110 } else if (ofs->config.index) {
1108 pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); 1111 pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
@@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
1111 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); 1114 pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
1112 } 1115 }
1113 1116
1114 ofs->workbasedir = dget(workpath.dentry);
1115 err = ovl_make_workdir(ofs, &workpath); 1117 err = ovl_make_workdir(ofs, &workpath);
1116 if (err) 1118 if (err)
1117 goto out; 1119 goto out;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index ad72261ee3fe..d297fe4472a9 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
464 ret = -EFAULT; 464 ret = -EFAULT;
465 goto out; 465 goto out;
466 } 466 }
467 m = NULL; /* skip the list anchor */
467 } else if (m->type == KCORE_VMALLOC) { 468 } else if (m->type == KCORE_VMALLOC) {
468 vread(buf, (char *)start, tsz); 469 vread(buf, (char *)start, tsz);
469 /* we have to zero-fill user buffer even if no read */ 470 /* we have to zero-fill user buffer even if no read */
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index bbd1e357c23d..f4fd2e72add4 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
898 }, 898 },
899}; 899};
900 900
901static void ramoops_register_dummy(void) 901static inline void ramoops_unregister_dummy(void)
902{ 902{
903 platform_device_unregister(dummy);
904 dummy = NULL;
905
906 kfree(dummy_data);
907 dummy_data = NULL;
908}
909
910static void __init ramoops_register_dummy(void)
911{
912 /*
913 * Prepare a dummy platform data structure to carry the module
914 * parameters. If mem_size isn't set, then there are no module
915 * parameters, and we can skip this.
916 */
903 if (!mem_size) 917 if (!mem_size)
904 return; 918 return;
905 919
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
932 if (IS_ERR(dummy)) { 946 if (IS_ERR(dummy)) {
933 pr_info("could not create platform device: %ld\n", 947 pr_info("could not create platform device: %ld\n",
934 PTR_ERR(dummy)); 948 PTR_ERR(dummy));
949 dummy = NULL;
950 ramoops_unregister_dummy();
935 } 951 }
936} 952}
937 953
938static int __init ramoops_init(void) 954static int __init ramoops_init(void)
939{ 955{
956 int ret;
957
940 ramoops_register_dummy(); 958 ramoops_register_dummy();
941 return platform_driver_register(&ramoops_driver); 959 ret = platform_driver_register(&ramoops_driver);
960 if (ret != 0)
961 ramoops_unregister_dummy();
962
963 return ret;
942} 964}
943late_initcall(ramoops_init); 965late_initcall(ramoops_init);
944 966
945static void __exit ramoops_exit(void) 967static void __exit ramoops_exit(void)
946{ 968{
947 platform_driver_unregister(&ramoops_driver); 969 platform_driver_unregister(&ramoops_driver);
948 platform_device_unregister(dummy); 970 ramoops_unregister_dummy();
949 kfree(dummy_data);
950} 971}
951module_exit(ramoops_exit); 972module_exit(ramoops_exit);
952 973
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 951a14edcf51..0792595ebcfb 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
429 vaddr = vmap(pages, page_count, VM_MAP, prot); 429 vaddr = vmap(pages, page_count, VM_MAP, prot);
430 kfree(pages); 430 kfree(pages);
431 431
432 return vaddr; 432 /*
433 * Since vmap() uses page granularity, we must add the offset
434 * into the page here, to get the byte granularity address
435 * into the mapping to represent the actual "start" location.
436 */
437 return vaddr + offset_in_page(start);
433} 438}
434 439
435static void *persistent_ram_iomap(phys_addr_t start, size_t size, 440static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
448 else 453 else
449 va = ioremap_wc(start, size); 454 va = ioremap_wc(start, size);
450 455
456 /*
457 * Since request_mem_region() and ioremap() are byte-granularity
458 * there is no need handle anything special like we do when the
459 * vmap() case in persistent_ram_vmap() above.
460 */
451 return va; 461 return va;
452} 462}
453 463
@@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
468 return -ENOMEM; 478 return -ENOMEM;
469 } 479 }
470 480
471 prz->buffer = prz->vaddr + offset_in_page(start); 481 prz->buffer = prz->vaddr;
472 prz->buffer_size = size - sizeof(struct persistent_ram_buffer); 482 prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
473 483
474 return 0; 484 return 0;
@@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
515 525
516 if (prz->vaddr) { 526 if (prz->vaddr) {
517 if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { 527 if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
518 vunmap(prz->vaddr); 528 /* We must vunmap() at page-granularity. */
529 vunmap(prz->vaddr - offset_in_page(prz->paddr));
519 } else { 530 } else {
520 iounmap(prz->vaddr); 531 iounmap(prz->vaddr);
521 release_mem_region(prz->paddr, prz->size); 532 release_mem_region(prz->paddr, prz->size);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 860bfbe7a07a..f0cbf58ad4da 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,7 @@
18#include <linux/quotaops.h> 18#include <linux/quotaops.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/writeback.h> 20#include <linux/writeback.h>
21#include <linux/nospec.h>
21 22
22static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 23static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
23 qid_t id) 24 qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
120 struct if_dqinfo uinfo; 121 struct if_dqinfo uinfo;
121 int ret; 122 int ret;
122 123
123 /* This checks whether qc_state has enough entries... */
124 BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
125 if (!sb->s_qcop->get_state) 124 if (!sb->s_qcop->get_state)
126 return -ENOSYS; 125 return -ENOSYS;
127 ret = sb->s_qcop->get_state(sb, &state); 126 ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
354 * GETXSTATE quotactl has space for just one set of time limits so 353 * GETXSTATE quotactl has space for just one set of time limits so
355 * report them for the first enabled quota type 354 * report them for the first enabled quota type
356 */ 355 */
357 for (type = 0; type < XQM_MAXQUOTAS; type++) 356 for (type = 0; type < MAXQUOTAS; type++)
358 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 357 if (state.s_state[type].flags & QCI_ACCT_ENABLED)
359 break; 358 break;
360 BUG_ON(type == XQM_MAXQUOTAS); 359 BUG_ON(type == MAXQUOTAS);
361 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 360 fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
362 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 361 fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
363 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 362 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
427 * GETXSTATV quotactl has space for just one set of time limits so 426 * GETXSTATV quotactl has space for just one set of time limits so
428 * report them for the first enabled quota type 427 * report them for the first enabled quota type
429 */ 428 */
430 for (type = 0; type < XQM_MAXQUOTAS; type++) 429 for (type = 0; type < MAXQUOTAS; type++)
431 if (state.s_state[type].flags & QCI_ACCT_ENABLED) 430 if (state.s_state[type].flags & QCI_ACCT_ENABLED)
432 break; 431 break;
433 BUG_ON(type == XQM_MAXQUOTAS); 432 BUG_ON(type == MAXQUOTAS);
434 fqs->qs_btimelimit = state.s_state[type].spc_timelimit; 433 fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
435 fqs->qs_itimelimit = state.s_state[type].ino_timelimit; 434 fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
436 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit; 435 fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
701{ 700{
702 int ret; 701 int ret;
703 702
704 if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) 703 if (type >= MAXQUOTAS)
705 return -EINVAL; 704 return -EINVAL;
705 type = array_index_nospec(type, MAXQUOTAS);
706 /* 706 /*
707 * Quota not supported on this fs? Check this before s_quota_types 707 * Quota not supported on this fs? Check this before s_quota_types
708 * since they needn't be set if quota is not supported at all. 708 * since they needn't be set if quota is not supported at all.
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 23e7042666a7..bf000c8aeffb 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
1912 mutex_unlock(&c->bu_mutex); 1912 mutex_unlock(&c->bu_mutex);
1913 } 1913 }
1914 1914
1915 ubifs_assert(c, c->lst.taken_empty_lebs > 0); 1915 if (!c->need_recovery)
1916 ubifs_assert(c, c->lst.taken_empty_lebs > 0);
1917
1916 return 0; 1918 return 0;
1917} 1919}
1918 1920
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1954 int dev, vol; 1956 int dev, vol;
1955 char *endptr; 1957 char *endptr;
1956 1958
1959 if (!name || !*name)
1960 return ERR_PTR(-EINVAL);
1961
1957 /* First, try to open using the device node path method */ 1962 /* First, try to open using the device node path method */
1958 ubi = ubi_open_volume_path(name, mode); 1963 ubi = ubi_open_volume_path(name, mode);
1959 if (!IS_ERR(ubi)) 1964 if (!IS_ERR(ubi))
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 61afdfee4b28..f5ad1ede7990 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
152 ui->data_len = size; 152 ui->data_len = size;
153 153
154 mutex_lock(&host_ui->ui_mutex); 154 mutex_lock(&host_ui->ui_mutex);
155
156 if (!host->i_nlink) {
157 err = -ENOENT;
158 goto out_noent;
159 }
160
161 host->i_ctime = current_time(host); 155 host->i_ctime = current_time(host);
162 host_ui->xattr_cnt += 1; 156 host_ui->xattr_cnt += 1;
163 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); 157 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -190,7 +184,6 @@ out_cancel:
190 host_ui->xattr_size -= CALC_XATTR_BYTES(size); 184 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
191 host_ui->xattr_names -= fname_len(nm); 185 host_ui->xattr_names -= fname_len(nm);
192 host_ui->flags &= ~UBIFS_CRYPT_FL; 186 host_ui->flags &= ~UBIFS_CRYPT_FL;
193out_noent:
194 mutex_unlock(&host_ui->ui_mutex); 187 mutex_unlock(&host_ui->ui_mutex);
195out_free: 188out_free:
196 make_bad_inode(inode); 189 make_bad_inode(inode);
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
242 mutex_unlock(&ui->ui_mutex); 235 mutex_unlock(&ui->ui_mutex);
243 236
244 mutex_lock(&host_ui->ui_mutex); 237 mutex_lock(&host_ui->ui_mutex);
245
246 if (!host->i_nlink) {
247 err = -ENOENT;
248 goto out_noent;
249 }
250
251 host->i_ctime = current_time(host); 238 host->i_ctime = current_time(host);
252 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); 239 host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
253 host_ui->xattr_size += CALC_XATTR_BYTES(size); 240 host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
269out_cancel: 256out_cancel:
270 host_ui->xattr_size -= CALC_XATTR_BYTES(size); 257 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
271 host_ui->xattr_size += CALC_XATTR_BYTES(old_size); 258 host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
272out_noent:
273 mutex_unlock(&host_ui->ui_mutex); 259 mutex_unlock(&host_ui->ui_mutex);
274 make_bad_inode(inode); 260 make_bad_inode(inode);
275out_free: 261out_free:
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
496 return err; 482 return err;
497 483
498 mutex_lock(&host_ui->ui_mutex); 484 mutex_lock(&host_ui->ui_mutex);
499
500 if (!host->i_nlink) {
501 err = -ENOENT;
502 goto out_noent;
503 }
504
505 host->i_ctime = current_time(host); 485 host->i_ctime = current_time(host);
506 host_ui->xattr_cnt -= 1; 486 host_ui->xattr_cnt -= 1;
507 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); 487 host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -521,7 +501,6 @@ out_cancel:
521 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); 501 host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
522 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); 502 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
523 host_ui->xattr_names += fname_len(nm); 503 host_ui->xattr_names += fname_len(nm);
524out_noent:
525 mutex_unlock(&host_ui->ui_mutex); 504 mutex_unlock(&host_ui->ui_mutex);
526 ubifs_release_budget(c, &req); 505 ubifs_release_budget(c, &req);
527 make_bad_inode(inode); 506 make_bad_inode(inode);
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
561 540
562 ubifs_assert(c, inode_is_locked(host)); 541 ubifs_assert(c, inode_is_locked(host));
563 542
564 if (!host->i_nlink)
565 return -ENOENT;
566
567 if (fname_len(&nm) > UBIFS_MAX_NLEN) 543 if (fname_len(&nm) > UBIFS_MAX_NLEN)
568 return -ENAMETOOLONG; 544 return -ENAMETOOLONG;
569 545
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 3040dc2a32f6..6f515651a2c2 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
764 struct kernel_lb_addr *root) 764 struct kernel_lb_addr *root)
765{ 765{
766 struct buffer_head *bh = NULL; 766 struct buffer_head *bh = NULL;
767 long lastblock;
768 uint16_t ident; 767 uint16_t ident;
769 struct udf_sb_info *sbi;
770 768
771 if (fileset->logicalBlockNum != 0xFFFFFFFF || 769 if (fileset->logicalBlockNum != 0xFFFFFFFF ||
772 fileset->partitionReferenceNum != 0xFFFF) { 770 fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
779 return 1; 777 return 1;
780 } 778 }
781 779
782 }
783
784 sbi = UDF_SB(sb);
785 if (!bh) {
786 /* Search backwards through the partitions */
787 struct kernel_lb_addr newfileset;
788
789/* --> cvg: FIXME - is it reasonable? */
790 return 1;
791
792 for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
793 (newfileset.partitionReferenceNum != 0xFFFF &&
794 fileset->logicalBlockNum == 0xFFFFFFFF &&
795 fileset->partitionReferenceNum == 0xFFFF);
796 newfileset.partitionReferenceNum--) {
797 lastblock = sbi->s_partmaps
798 [newfileset.partitionReferenceNum]
799 .s_partition_len;
800 newfileset.logicalBlockNum = 0;
801
802 do {
803 bh = udf_read_ptagged(sb, &newfileset, 0,
804 &ident);
805 if (!bh) {
806 newfileset.logicalBlockNum++;
807 continue;
808 }
809
810 switch (ident) {
811 case TAG_IDENT_SBD:
812 {
813 struct spaceBitmapDesc *sp;
814 sp = (struct spaceBitmapDesc *)
815 bh->b_data;
816 newfileset.logicalBlockNum += 1 +
817 ((le32_to_cpu(sp->numOfBytes) +
818 sizeof(struct spaceBitmapDesc)
819 - 1) >> sb->s_blocksize_bits);
820 brelse(bh);
821 break;
822 }
823 case TAG_IDENT_FSD:
824 *fileset = newfileset;
825 break;
826 default:
827 newfileset.logicalBlockNum++;
828 brelse(bh);
829 bh = NULL;
830 break;
831 }
832 } while (newfileset.logicalBlockNum < lastblock &&
833 fileset->logicalBlockNum == 0xFFFFFFFF &&
834 fileset->partitionReferenceNum == 0xFFFF);
835 }
836 }
837
838 if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
839 fileset->partitionReferenceNum != 0xFFFF) && bh) {
840 udf_debug("Fileset at block=%u, partition=%u\n", 780 udf_debug("Fileset at block=%u, partition=%u\n",
841 fileset->logicalBlockNum, 781 fileset->logicalBlockNum,
842 fileset->partitionReferenceNum); 782 fileset->partitionReferenceNum);
843 783
844 sbi->s_partition = fileset->partitionReferenceNum; 784 UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
845 udf_load_fileset(sb, bh, root); 785 udf_load_fileset(sb, bh, root);
846 brelse(bh); 786 brelse(bh);
847 return 0; 787 return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
1570 */ 1510 */
1571#define PART_DESC_ALLOC_STEP 32 1511#define PART_DESC_ALLOC_STEP 32
1572 1512
1513struct part_desc_seq_scan_data {
1514 struct udf_vds_record rec;
1515 u32 partnum;
1516};
1517
1573struct desc_seq_scan_data { 1518struct desc_seq_scan_data {
1574 struct udf_vds_record vds[VDS_POS_LENGTH]; 1519 struct udf_vds_record vds[VDS_POS_LENGTH];
1575 unsigned int size_part_descs; 1520 unsigned int size_part_descs;
1576 struct udf_vds_record *part_descs_loc; 1521 unsigned int num_part_descs;
1522 struct part_desc_seq_scan_data *part_descs_loc;
1577}; 1523};
1578 1524
1579static struct udf_vds_record *handle_partition_descriptor( 1525static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
1582{ 1528{
1583 struct partitionDesc *desc = (struct partitionDesc *)bh->b_data; 1529 struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1584 int partnum; 1530 int partnum;
1531 int i;
1585 1532
1586 partnum = le16_to_cpu(desc->partitionNumber); 1533 partnum = le16_to_cpu(desc->partitionNumber);
1587 if (partnum >= data->size_part_descs) { 1534 for (i = 0; i < data->num_part_descs; i++)
1588 struct udf_vds_record *new_loc; 1535 if (partnum == data->part_descs_loc[i].partnum)
1536 return &(data->part_descs_loc[i].rec);
1537 if (data->num_part_descs >= data->size_part_descs) {
1538 struct part_desc_seq_scan_data *new_loc;
1589 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP); 1539 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1590 1540
1591 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL); 1541 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
1597 data->part_descs_loc = new_loc; 1547 data->part_descs_loc = new_loc;
1598 data->size_part_descs = new_size; 1548 data->size_part_descs = new_size;
1599 } 1549 }
1600 return &(data->part_descs_loc[partnum]); 1550 return &(data->part_descs_loc[data->num_part_descs++].rec);
1601} 1551}
1602 1552
1603 1553
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
1647 1597
1648 memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH); 1598 memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1649 data.size_part_descs = PART_DESC_ALLOC_STEP; 1599 data.size_part_descs = PART_DESC_ALLOC_STEP;
1600 data.num_part_descs = 0;
1650 data.part_descs_loc = kcalloc(data.size_part_descs, 1601 data.part_descs_loc = kcalloc(data.size_part_descs,
1651 sizeof(*data.part_descs_loc), 1602 sizeof(*data.part_descs_loc),
1652 GFP_KERNEL); 1603 GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
1658 * are in it. 1609 * are in it.
1659 */ 1610 */
1660 for (; (!done && block <= lastblock); block++) { 1611 for (; (!done && block <= lastblock); block++) {
1661
1662 bh = udf_read_tagged(sb, block, block, &ident); 1612 bh = udf_read_tagged(sb, block, block, &ident);
1663 if (!bh) 1613 if (!bh)
1664 break; 1614 break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
1730 } 1680 }
1731 1681
1732 /* Now handle prevailing Partition Descriptors */ 1682 /* Now handle prevailing Partition Descriptors */
1733 for (i = 0; i < data.size_part_descs; i++) { 1683 for (i = 0; i < data.num_part_descs; i++) {
1734 if (data.part_descs_loc[i].block) { 1684 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1735 ret = udf_load_partdesc(sb, 1685 if (ret < 0)
1736 data.part_descs_loc[i].block); 1686 return ret;
1737 if (ret < 0)
1738 return ret;
1739 }
1740 } 1687 }
1741 1688
1742 return 0; 1689 return 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index daa732550088..0d6a6a4af861 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
948 int err = 0; 948 int err = 0;
949 949
950#ifdef CONFIG_FS_POSIX_ACL 950#ifdef CONFIG_FS_POSIX_ACL
951 if (inode->i_acl) { 951 if (IS_POSIXACL(inode)) {
952 err = xattr_list_one(&buffer, &remaining_size, 952 if (inode->i_acl) {
953 XATTR_NAME_POSIX_ACL_ACCESS); 953 err = xattr_list_one(&buffer, &remaining_size,
954 if (err) 954 XATTR_NAME_POSIX_ACL_ACCESS);
955 return err; 955 if (err)
956 } 956 return err;
957 if (inode->i_default_acl) { 957 }
958 err = xattr_list_one(&buffer, &remaining_size, 958 if (inode->i_default_acl) {
959 XATTR_NAME_POSIX_ACL_DEFAULT); 959 err = xattr_list_one(&buffer, &remaining_size,
960 if (err) 960 XATTR_NAME_POSIX_ACL_DEFAULT);
961 return err; 961 if (err)
962 return err;
963 }
962 } 964 }
963#endif 965#endif
964 966
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 1e671d4eb6fa..c6299f82a6e4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
587 */ 587 */
588 error = xfs_attr3_leaf_to_node(args); 588 error = xfs_attr3_leaf_to_node(args);
589 if (error) 589 if (error)
590 goto out_defer_cancel; 590 return error;
591 error = xfs_defer_finish(&args->trans); 591 error = xfs_defer_finish(&args->trans);
592 if (error) 592 if (error)
593 return error; 593 return error;
@@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
675 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 675 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
676 /* bp is gone due to xfs_da_shrink_inode */ 676 /* bp is gone due to xfs_da_shrink_inode */
677 if (error) 677 if (error)
678 goto out_defer_cancel; 678 return error;
679 error = xfs_defer_finish(&args->trans); 679 error = xfs_defer_finish(&args->trans);
680 if (error) 680 if (error)
681 return error; 681 return error;
@@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
693 error = xfs_attr3_leaf_clearflag(args); 693 error = xfs_attr3_leaf_clearflag(args);
694 } 694 }
695 return error; 695 return error;
696out_defer_cancel:
697 xfs_defer_cancel(args->trans);
698 return error;
699} 696}
700 697
701/* 698/*
@@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
738 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 735 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
739 /* bp is gone due to xfs_da_shrink_inode */ 736 /* bp is gone due to xfs_da_shrink_inode */
740 if (error) 737 if (error)
741 goto out_defer_cancel; 738 return error;
742 error = xfs_defer_finish(&args->trans); 739 error = xfs_defer_finish(&args->trans);
743 if (error) 740 if (error)
744 return error; 741 return error;
745 } 742 }
746 return 0; 743 return 0;
747out_defer_cancel:
748 xfs_defer_cancel(args->trans);
749 return error;
750} 744}
751 745
752/* 746/*
@@ -864,7 +858,7 @@ restart:
864 state = NULL; 858 state = NULL;
865 error = xfs_attr3_leaf_to_node(args); 859 error = xfs_attr3_leaf_to_node(args);
866 if (error) 860 if (error)
867 goto out_defer_cancel; 861 goto out;
868 error = xfs_defer_finish(&args->trans); 862 error = xfs_defer_finish(&args->trans);
869 if (error) 863 if (error)
870 goto out; 864 goto out;
@@ -888,7 +882,7 @@ restart:
888 */ 882 */
889 error = xfs_da3_split(state); 883 error = xfs_da3_split(state);
890 if (error) 884 if (error)
891 goto out_defer_cancel; 885 goto out;
892 error = xfs_defer_finish(&args->trans); 886 error = xfs_defer_finish(&args->trans);
893 if (error) 887 if (error)
894 goto out; 888 goto out;
@@ -984,7 +978,7 @@ restart:
984 if (retval && (state->path.active > 1)) { 978 if (retval && (state->path.active > 1)) {
985 error = xfs_da3_join(state); 979 error = xfs_da3_join(state);
986 if (error) 980 if (error)
987 goto out_defer_cancel; 981 goto out;
988 error = xfs_defer_finish(&args->trans); 982 error = xfs_defer_finish(&args->trans);
989 if (error) 983 if (error)
990 goto out; 984 goto out;
@@ -1013,9 +1007,6 @@ out:
1013 if (error) 1007 if (error)
1014 return error; 1008 return error;
1015 return retval; 1009 return retval;
1016out_defer_cancel:
1017 xfs_defer_cancel(args->trans);
1018 goto out;
1019} 1010}
1020 1011
1021/* 1012/*
@@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
1107 if (retval && (state->path.active > 1)) { 1098 if (retval && (state->path.active > 1)) {
1108 error = xfs_da3_join(state); 1099 error = xfs_da3_join(state);
1109 if (error) 1100 if (error)
1110 goto out_defer_cancel; 1101 goto out;
1111 error = xfs_defer_finish(&args->trans); 1102 error = xfs_defer_finish(&args->trans);
1112 if (error) 1103 if (error)
1113 goto out; 1104 goto out;
@@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
1138 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); 1129 error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
1139 /* bp is gone due to xfs_da_shrink_inode */ 1130 /* bp is gone due to xfs_da_shrink_inode */
1140 if (error) 1131 if (error)
1141 goto out_defer_cancel; 1132 goto out;
1142 error = xfs_defer_finish(&args->trans); 1133 error = xfs_defer_finish(&args->trans);
1143 if (error) 1134 if (error)
1144 goto out; 1135 goto out;
@@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
1150out: 1141out:
1151 xfs_da_state_free(state); 1142 xfs_da_state_free(state);
1152 return error; 1143 return error;
1153out_defer_cancel:
1154 xfs_defer_cancel(args->trans);
1155 goto out;
1156} 1144}
1157 1145
1158/* 1146/*
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index af094063e402..d89363c6b523 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
485 blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map, 485 blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
486 &nmap); 486 &nmap);
487 if (error) 487 if (error)
488 goto out_defer_cancel; 488 return error;
489 error = xfs_defer_finish(&args->trans); 489 error = xfs_defer_finish(&args->trans);
490 if (error) 490 if (error)
491 return error; 491 return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
553 } 553 }
554 ASSERT(valuelen == 0); 554 ASSERT(valuelen == 0);
555 return 0; 555 return 0;
556out_defer_cancel:
557 xfs_defer_cancel(args->trans);
558 return error;
559} 556}
560 557
561/* 558/*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
625 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, 622 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
626 XFS_BMAPI_ATTRFORK, 1, &done); 623 XFS_BMAPI_ATTRFORK, 1, &done);
627 if (error) 624 if (error)
628 goto out_defer_cancel; 625 return error;
629 error = xfs_defer_finish(&args->trans); 626 error = xfs_defer_finish(&args->trans);
630 if (error) 627 if (error)
631 return error; 628 return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
638 return error; 635 return error;
639 } 636 }
640 return 0; 637 return 0;
641out_defer_cancel:
642 xfs_defer_cancel(args->trans);
643 return error;
644} 638}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760314fdf7f..a47670332326 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); 673 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
674 674
675 /* 675 /*
676 * Make space in the inode incore. 676 * Make space in the inode incore. This needs to be undone if we fail
677 * to expand the root.
677 */ 678 */
678 xfs_iroot_realloc(ip, 1, whichfork); 679 xfs_iroot_realloc(ip, 1, whichfork);
679 ifp->if_flags |= XFS_IFBROOT; 680 ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
711 args.minlen = args.maxlen = args.prod = 1; 712 args.minlen = args.maxlen = args.prod = 1;
712 args.wasdel = wasdel; 713 args.wasdel = wasdel;
713 *logflagsp = 0; 714 *logflagsp = 0;
714 if ((error = xfs_alloc_vextent(&args))) { 715 error = xfs_alloc_vextent(&args);
715 ASSERT(ifp->if_broot == NULL); 716 if (error)
716 goto err1; 717 goto out_root_realloc;
717 }
718 718
719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 719 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
720 ASSERT(ifp->if_broot == NULL);
721 error = -ENOSPC; 720 error = -ENOSPC;
722 goto err1; 721 goto out_root_realloc;
723 } 722 }
723
724 /* 724 /*
725 * Allocation can't fail, the space was reserved. 725 * Allocation can't fail, the space was reserved.
726 */ 726 */
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L); 732 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0); 733 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
734 if (!abp) { 734 if (!abp) {
735 error = -ENOSPC; 735 error = -EFSCORRUPTED;
736 goto err2; 736 goto out_unreserve_dquot;
737 } 737 }
738
738 /* 739 /*
739 * Fill in the child block. 740 * Fill in the child block.
740 */ 741 */
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
775 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork); 776 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
776 return 0; 777 return 0;
777 778
778err2: 779out_unreserve_dquot:
779 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); 780 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
780err1: 781out_root_realloc:
781 xfs_iroot_realloc(ip, -1, whichfork); 782 xfs_iroot_realloc(ip, -1, whichfork);
782 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 783 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
784 ASSERT(ifp->if_broot == NULL);
783 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 785 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
784 786
785 return error; 787 return error;
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 059bc44c27e8..afbe336600e1 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
1016#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ 1016#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
1017#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ 1017#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
1018#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ 1018#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
1019/* Do not use bit 15, di_flags is legacy and unchanging now */
1020
1019#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 1021#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
1020#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 1022#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
1021#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) 1023#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 30d1d60f1d46..09d9c8cfa4a0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
415 return NULL; 415 return NULL;
416} 416}
417 417
418static xfs_failaddr_t
419xfs_dinode_verify_forkoff(
420 struct xfs_dinode *dip,
421 struct xfs_mount *mp)
422{
423 if (!XFS_DFORK_Q(dip))
424 return NULL;
425
426 switch (dip->di_format) {
427 case XFS_DINODE_FMT_DEV:
428 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
429 return __this_address;
430 break;
431 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
432 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
433 case XFS_DINODE_FMT_BTREE:
434 if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
435 return __this_address;
436 break;
437 default:
438 return __this_address;
439 }
440 return NULL;
441}
442
418xfs_failaddr_t 443xfs_failaddr_t
419xfs_dinode_verify( 444xfs_dinode_verify(
420 struct xfs_mount *mp, 445 struct xfs_mount *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
470 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) 495 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
471 return __this_address; 496 return __this_address;
472 497
498 /* check for illegal values of forkoff */
499 fa = xfs_dinode_verify_forkoff(dip, mp);
500 if (fa)
501 return fa;
502
473 /* Do we have appropriate data fork formats for the mode? */ 503 /* Do we have appropriate data fork formats for the mode? */
474 switch (mode & S_IFMT) { 504 switch (mode & S_IFMT) {
475 case S_IFIFO: 505 case S_IFIFO:
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 036b5c7021eb..376bcb585ae6 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -17,7 +17,6 @@
17#include "xfs_sb.h" 17#include "xfs_sb.h"
18#include "xfs_alloc.h" 18#include "xfs_alloc.h"
19#include "xfs_rmap.h" 19#include "xfs_rmap.h"
20#include "xfs_alloc.h"
21#include "scrub/xfs_scrub.h" 20#include "scrub/xfs_scrub.h"
22#include "scrub/scrub.h" 21#include "scrub/scrub.h"
23#include "scrub/common.h" 22#include "scrub/common.h"
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 5b3b177c0fc9..e386c9b0b4ab 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -126,6 +126,7 @@ xchk_inode_flags(
126{ 126{
127 struct xfs_mount *mp = sc->mp; 127 struct xfs_mount *mp = sc->mp;
128 128
129 /* di_flags are all taken, last bit cannot be used */
129 if (flags & ~XFS_DIFLAG_ANY) 130 if (flags & ~XFS_DIFLAG_ANY)
130 goto bad; 131 goto bad;
131 132
@@ -172,8 +173,9 @@ xchk_inode_flags2(
172{ 173{
173 struct xfs_mount *mp = sc->mp; 174 struct xfs_mount *mp = sc->mp;
174 175
176 /* Unknown di_flags2 could be from a future kernel */
175 if (flags2 & ~XFS_DIFLAG2_ANY) 177 if (flags2 & ~XFS_DIFLAG2_ANY)
176 goto bad; 178 xchk_ino_set_warning(sc, ino);
177 179
178 /* reflink flag requires reflink feature */ 180 /* reflink flag requires reflink feature */
179 if ((flags2 & XFS_DIFLAG2_REFLINK) && 181 if ((flags2 & XFS_DIFLAG2_REFLINK) &&
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index addbd74ecd8e..6de8d90041ff 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
702 struct xfs_iext_cursor icur; 702 struct xfs_iext_cursor icur;
703 int error = 0; 703 int error = 0;
704 704
705 xfs_ilock(ip, XFS_ILOCK_EXCL); 705 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
706 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
707 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
708 if (error)
709 goto out_unlock;
710 }
711 706
707 xfs_ilock(ip, XFS_ILOCK_EXCL);
712 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got)) 708 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
713 goto out_unlock; 709 goto out_unlock;
714 710
@@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
1584 tirec.br_blockcount, &irec, 1580 tirec.br_blockcount, &irec,
1585 &nimaps, 0); 1581 &nimaps, 0);
1586 if (error) 1582 if (error)
1587 goto out_defer; 1583 goto out;
1588 ASSERT(nimaps == 1); 1584 ASSERT(nimaps == 1);
1589 ASSERT(tirec.br_startoff == irec.br_startoff); 1585 ASSERT(tirec.br_startoff == irec.br_startoff);
1590 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec); 1586 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
1599 /* Remove the mapping from the donor file. */ 1595 /* Remove the mapping from the donor file. */
1600 error = xfs_bmap_unmap_extent(tp, tip, &uirec); 1596 error = xfs_bmap_unmap_extent(tp, tip, &uirec);
1601 if (error) 1597 if (error)
1602 goto out_defer; 1598 goto out;
1603 1599
1604 /* Remove the mapping from the source file. */ 1600 /* Remove the mapping from the source file. */
1605 error = xfs_bmap_unmap_extent(tp, ip, &irec); 1601 error = xfs_bmap_unmap_extent(tp, ip, &irec);
1606 if (error) 1602 if (error)
1607 goto out_defer; 1603 goto out;
1608 1604
1609 /* Map the donor file's blocks into the source file. */ 1605 /* Map the donor file's blocks into the source file. */
1610 error = xfs_bmap_map_extent(tp, ip, &uirec); 1606 error = xfs_bmap_map_extent(tp, ip, &uirec);
1611 if (error) 1607 if (error)
1612 goto out_defer; 1608 goto out;
1613 1609
1614 /* Map the source file's blocks into the donor file. */ 1610 /* Map the source file's blocks into the donor file. */
1615 error = xfs_bmap_map_extent(tp, tip, &irec); 1611 error = xfs_bmap_map_extent(tp, tip, &irec);
1616 if (error) 1612 if (error)
1617 goto out_defer; 1613 goto out;
1618 1614
1619 error = xfs_defer_finish(tpp); 1615 error = xfs_defer_finish(tpp);
1620 tp = *tpp; 1616 tp = *tpp;
@@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
1636 tip->i_d.di_flags2 = tip_flags2; 1632 tip->i_d.di_flags2 = tip_flags2;
1637 return 0; 1633 return 0;
1638 1634
1639out_defer:
1640 xfs_defer_cancel(tp);
1641out: 1635out:
1642 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_); 1636 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1643 tip->i_d.di_flags2 = tip_flags2; 1637 tip->i_d.di_flags2 = tip_flags2;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1c9d1398980b..12d8455bfbb2 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -532,6 +532,49 @@ xfs_buf_item_push(
532} 532}
533 533
534/* 534/*
535 * Drop the buffer log item refcount and take appropriate action. This helper
536 * determines whether the bli must be freed or not, since a decrement to zero
537 * does not necessarily mean the bli is unused.
538 *
539 * Return true if the bli is freed, false otherwise.
540 */
541bool
542xfs_buf_item_put(
543 struct xfs_buf_log_item *bip)
544{
545 struct xfs_log_item *lip = &bip->bli_item;
546 bool aborted;
547 bool dirty;
548
549 /* drop the bli ref and return if it wasn't the last one */
550 if (!atomic_dec_and_test(&bip->bli_refcount))
551 return false;
552
553 /*
554 * We dropped the last ref and must free the item if clean or aborted.
555 * If the bli is dirty and non-aborted, the buffer was clean in the
556 * transaction but still awaiting writeback from previous changes. In
557 * that case, the bli is freed on buffer writeback completion.
558 */
559 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
560 XFS_FORCED_SHUTDOWN(lip->li_mountp);
561 dirty = bip->bli_flags & XFS_BLI_DIRTY;
562 if (dirty && !aborted)
563 return false;
564
565 /*
566 * The bli is aborted or clean. An aborted item may be in the AIL
567 * regardless of dirty state. For example, consider an aborted
568 * transaction that invalidated a dirty bli and cleared the dirty
569 * state.
570 */
571 if (aborted)
572 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
573 xfs_buf_item_relse(bip->bli_buf);
574 return true;
575}
576
577/*
535 * Release the buffer associated with the buf log item. If there is no dirty 578 * Release the buffer associated with the buf log item. If there is no dirty
536 * logged data associated with the buffer recorded in the buf log item, then 579 * logged data associated with the buffer recorded in the buf log item, then
537 * free the buf log item and remove the reference to it in the buffer. 580 * free the buf log item and remove the reference to it in the buffer.
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
556{ 599{
557 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 600 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
558 struct xfs_buf *bp = bip->bli_buf; 601 struct xfs_buf *bp = bip->bli_buf;
559 bool aborted; 602 bool released;
560 bool hold = !!(bip->bli_flags & XFS_BLI_HOLD); 603 bool hold = bip->bli_flags & XFS_BLI_HOLD;
561 bool dirty = !!(bip->bli_flags & XFS_BLI_DIRTY); 604 bool stale = bip->bli_flags & XFS_BLI_STALE;
562#if defined(DEBUG) || defined(XFS_WARN) 605#if defined(DEBUG) || defined(XFS_WARN)
563 bool ordered = !!(bip->bli_flags & XFS_BLI_ORDERED); 606 bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
607 bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
564#endif 608#endif
565 609
566 aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
567
568 /* Clear the buffer's association with this transaction. */
569 bp->b_transp = NULL;
570
571 /*
572 * The per-transaction state has been copied above so clear it from the
573 * bli.
574 */
575 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
576
577 /*
578 * If the buf item is marked stale, then don't do anything. We'll
579 * unlock the buffer and free the buf item when the buffer is unpinned
580 * for the last time.
581 */
582 if (bip->bli_flags & XFS_BLI_STALE) {
583 trace_xfs_buf_item_unlock_stale(bip);
584 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
585 if (!aborted) {
586 atomic_dec(&bip->bli_refcount);
587 return;
588 }
589 }
590
591 trace_xfs_buf_item_unlock(bip); 610 trace_xfs_buf_item_unlock(bip);
592 611
593 /* 612 /*
594 * If the buf item isn't tracking any data, free it, otherwise drop the
595 * reference we hold to it. If we are aborting the transaction, this may
596 * be the only reference to the buf item, so we free it anyway
597 * regardless of whether it is dirty or not. A dirty abort implies a
598 * shutdown, anyway.
599 *
600 * The bli dirty state should match whether the blf has logged segments 613 * The bli dirty state should match whether the blf has logged segments
601 * except for ordered buffers, where only the bli should be dirty. 614 * except for ordered buffers, where only the bli should be dirty.
602 */ 615 */
603 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) || 616 ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
604 (ordered && dirty && !xfs_buf_item_dirty_format(bip))); 617 (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
618 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
605 619
606 /* 620 /*
607 * Clean buffers, by definition, cannot be in the AIL. However, aborted 621 * Clear the buffer's association with this transaction and
608 * buffers may be in the AIL regardless of dirty state. An aborted 622 * per-transaction state from the bli, which has been copied above.
609 * transaction that invalidates a buffer already in the AIL may have
610 * marked it stale and cleared the dirty state, for example.
611 *
612 * Therefore if we are aborting a buffer and we've just taken the last
613 * reference away, we have to check if it is in the AIL before freeing
614 * it. We need to free it in this case, because an aborted transaction
615 * has already shut the filesystem down and this is the last chance we
616 * will have to do so.
617 */ 623 */
618 if (atomic_dec_and_test(&bip->bli_refcount)) { 624 bp->b_transp = NULL;
619 if (aborted) { 625 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
620 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
621 xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
622 xfs_buf_item_relse(bp);
623 } else if (!dirty)
624 xfs_buf_item_relse(bp);
625 }
626 626
627 if (!hold) 627 /*
628 xfs_buf_relse(bp); 628 * Unref the item and unlock the buffer unless held or stale. Stale
629 * buffers remain locked until final unpin unless the bli is freed by
630 * the unref call. The latter implies shutdown because buffer
631 * invalidation dirties the bli and transaction.
632 */
633 released = xfs_buf_item_put(bip);
634 if (hold || (stale && !released))
635 return;
636 ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
637 xfs_buf_relse(bp);
629} 638}
630 639
631/* 640/*
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 3f7d7b72e7e6..90f65f891fab 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
51 51
52int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); 52int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
53void xfs_buf_item_relse(struct xfs_buf *); 53void xfs_buf_item_relse(struct xfs_buf *);
54bool xfs_buf_item_put(struct xfs_buf_log_item *);
54void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint); 55void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
55bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *); 56bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
56void xfs_buf_attach_iodone(struct xfs_buf *, 57void xfs_buf_attach_iodone(struct xfs_buf *,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d957a46dc1cb..05db9540e459 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
1563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags, 1563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
1564 XFS_ITRUNC_MAX_EXTENTS, &done); 1564 XFS_ITRUNC_MAX_EXTENTS, &done);
1565 if (error) 1565 if (error)
1566 goto out_bmap_cancel; 1566 goto out;
1567 1567
1568 /* 1568 /*
1569 * Duplicate the transaction that has the permanent 1569 * Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
1599out: 1599out:
1600 *tpp = tp; 1600 *tpp = tp;
1601 return error; 1601 return error;
1602out_bmap_cancel:
1603 /*
1604 * If the bunmapi call encounters an error, return to the caller where
1605 * the transaction can be properly aborted. We just need to make sure
1606 * we're not holding any resources that we were not when we came in.
1607 */
1608 xfs_defer_cancel(tp);
1609 goto out;
1610} 1602}
1611 1603
1612int 1604int
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c3e74f9128e8..f48ffd7a8d3e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
471 struct inode *inode, 471 struct inode *inode,
472 struct delayed_call *done) 472 struct delayed_call *done)
473{ 473{
474 char *link;
475
474 ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE); 476 ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
475 return XFS_I(inode)->i_df.if_u1.if_data; 477
478 /*
479 * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
480 * if_data is junk.
481 */
482 link = XFS_I(inode)->i_df.if_u1.if_data;
483 if (!link)
484 return ERR_PTR(-EFSCORRUPTED);
485 return link;
476} 486}
477 487
478STATIC int 488STATIC int
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a21dc61ec09e..1fc9e9042e0e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
1570 if (last_cycle != 0) { /* log completely written to */ 1570 if (last_cycle != 0) { /* log completely written to */
1571 xlog_put_bp(bp); 1571 xlog_put_bp(bp);
1572 return 0; 1572 return 0;
1573 } else if (first_cycle != 1) {
1574 /*
1575 * If the cycle of the last block is zero, the cycle of
1576 * the first block must be 1. If it's not, maybe we're
1577 * not looking at a log... Bail out.
1578 */
1579 xfs_warn(log->l_mp,
1580 "Log inconsistent or not a log (last==0, first!=1)");
1581 error = -EINVAL;
1582 goto bp_err;
1583 } 1573 }
1584 1574
1585 /* we have a partially zeroed log */ 1575 /* we have a partially zeroed log */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 38f405415b88..5289e22cb081 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
352 return error; 352 return error;
353} 353}
354 354
355/*
356 * Find the extent that maps the given range in the COW fork. Even if the extent
357 * is not shared we might have a preallocation for it in the COW fork. If so we
358 * use it that rather than trigger a new allocation.
359 */
360static int
361xfs_find_trim_cow_extent(
362 struct xfs_inode *ip,
363 struct xfs_bmbt_irec *imap,
364 bool *shared,
365 bool *found)
366{
367 xfs_fileoff_t offset_fsb = imap->br_startoff;
368 xfs_filblks_t count_fsb = imap->br_blockcount;
369 struct xfs_iext_cursor icur;
370 struct xfs_bmbt_irec got;
371 bool trimmed;
372
373 *found = false;
374
375 /*
376 * If we don't find an overlapping extent, trim the range we need to
377 * allocate to fit the hole we found.
378 */
379 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
380 got.br_startoff > offset_fsb)
381 return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
382
383 *shared = true;
384 if (isnullstartblock(got.br_startblock)) {
385 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
386 return 0;
387 }
388
389 /* real extent found - no need to allocate */
390 xfs_trim_extent(&got, offset_fsb, count_fsb);
391 *imap = got;
392 *found = true;
393 return 0;
394}
395
355/* Allocate all CoW reservations covering a range of blocks in a file. */ 396/* Allocate all CoW reservations covering a range of blocks in a file. */
356int 397int
357xfs_reflink_allocate_cow( 398xfs_reflink_allocate_cow(
@@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
363 struct xfs_mount *mp = ip->i_mount; 404 struct xfs_mount *mp = ip->i_mount;
364 xfs_fileoff_t offset_fsb = imap->br_startoff; 405 xfs_fileoff_t offset_fsb = imap->br_startoff;
365 xfs_filblks_t count_fsb = imap->br_blockcount; 406 xfs_filblks_t count_fsb = imap->br_blockcount;
366 struct xfs_bmbt_irec got; 407 struct xfs_trans *tp;
367 struct xfs_trans *tp = NULL;
368 int nimaps, error = 0; 408 int nimaps, error = 0;
369 bool trimmed; 409 bool found;
370 xfs_filblks_t resaligned; 410 xfs_filblks_t resaligned;
371 xfs_extlen_t resblks = 0; 411 xfs_extlen_t resblks = 0;
372 struct xfs_iext_cursor icur;
373 412
374retry:
375 ASSERT(xfs_is_reflink_inode(ip));
376 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 413 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
414 ASSERT(xfs_is_reflink_inode(ip));
377 415
378 /* 416 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
379 * Even if the extent is not shared we might have a preallocation for 417 if (error || !*shared)
380 * it in the COW fork. If so use it. 418 return error;
381 */ 419 if (found)
382 if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) && 420 goto convert;
383 got.br_startoff <= offset_fsb) {
384 *shared = true;
385
386 /* If we have a real allocation in the COW fork we're done. */
387 if (!isnullstartblock(got.br_startblock)) {
388 xfs_trim_extent(&got, offset_fsb, count_fsb);
389 *imap = got;
390 goto convert;
391 }
392 421
393 xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); 422 resaligned = xfs_aligned_fsb_count(imap->br_startoff,
394 } else { 423 imap->br_blockcount, xfs_get_cowextsz_hint(ip));
395 error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed); 424 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
396 if (error || !*shared)
397 goto out;
398 }
399 425
400 if (!tp) { 426 xfs_iunlock(ip, *lockmode);
401 resaligned = xfs_aligned_fsb_count(imap->br_startoff, 427 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
402 imap->br_blockcount, xfs_get_cowextsz_hint(ip)); 428 *lockmode = XFS_ILOCK_EXCL;
403 resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 429 xfs_ilock(ip, *lockmode);
404 430
405 xfs_iunlock(ip, *lockmode); 431 if (error)
406 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); 432 return error;
407 *lockmode = XFS_ILOCK_EXCL;
408 xfs_ilock(ip, *lockmode);
409 433
410 if (error) 434 error = xfs_qm_dqattach_locked(ip, false);
411 return error; 435 if (error)
436 goto out_trans_cancel;
412 437
413 error = xfs_qm_dqattach_locked(ip, false); 438 /*
414 if (error) 439 * Check for an overlapping extent again now that we dropped the ilock.
415 goto out; 440 */
416 goto retry; 441 error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
442 if (error || !*shared)
443 goto out_trans_cancel;
444 if (found) {
445 xfs_trans_cancel(tp);
446 goto convert;
417 } 447 }
418 448
419 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, 449 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
420 XFS_QMOPT_RES_REGBLKS); 450 XFS_QMOPT_RES_REGBLKS);
421 if (error) 451 if (error)
422 goto out; 452 goto out_trans_cancel;
423 453
424 xfs_trans_ijoin(tp, ip, 0); 454 xfs_trans_ijoin(tp, ip, 0);
425 455
426 nimaps = 1;
427
428 /* Allocate the entire reservation as unwritten blocks. */ 456 /* Allocate the entire reservation as unwritten blocks. */
457 nimaps = 1;
429 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount, 458 error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
430 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 459 XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
431 resblks, imap, &nimaps); 460 resblks, imap, &nimaps);
432 if (error) 461 if (error)
433 goto out_trans_cancel; 462 goto out_unreserve;
434 463
435 xfs_inode_set_cowblocks_tag(ip); 464 xfs_inode_set_cowblocks_tag(ip);
436
437 /* Finish up. */
438 error = xfs_trans_commit(tp); 465 error = xfs_trans_commit(tp);
439 if (error) 466 if (error)
440 return error; 467 return error;
@@ -447,12 +474,12 @@ retry:
447 return -ENOSPC; 474 return -ENOSPC;
448convert: 475convert:
449 return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb); 476 return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
450out_trans_cancel: 477
478out_unreserve:
451 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0, 479 xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
452 XFS_QMOPT_RES_REGBLKS); 480 XFS_QMOPT_RES_REGBLKS);
453out: 481out_trans_cancel:
454 if (tp) 482 xfs_trans_cancel(tp);
455 xfs_trans_cancel(tp);
456 return error; 483 return error;
457} 484}
458 485
@@ -666,14 +693,12 @@ xfs_reflink_end_cow(
666 if (!del.br_blockcount) 693 if (!del.br_blockcount)
667 goto prev_extent; 694 goto prev_extent;
668 695
669 ASSERT(!isnullstartblock(got.br_startblock));
670
671 /* 696 /*
672 * Don't remap unwritten extents; these are 697 * Only remap real extent that contain data. With AIO
673 * speculatively preallocated CoW extents that have been 698 * speculatively preallocations can leak into the range we
674 * allocated but have not yet been involved in a write. 699 * are called upon, and we need to skip them.
675 */ 700 */
676 if (got.br_state == XFS_EXT_UNWRITTEN) 701 if (!xfs_bmap_is_real_extent(&got))
677 goto prev_extent; 702 goto prev_extent;
678 703
679 /* Unmap the old blocks in the data fork. */ 704 /* Unmap the old blocks in the data fork. */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad315e83bc02..3043e5ed6495 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
473DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); 473DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
474DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); 474DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
475DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); 475DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
476DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
477DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); 476DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
478DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); 477DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
479DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); 478DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index bedc5a5133a5..912b42f5fe4a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -259,6 +259,14 @@ xfs_trans_alloc(
259 struct xfs_trans *tp; 259 struct xfs_trans *tp;
260 int error; 260 int error;
261 261
262 /*
263 * Allocate the handle before we do our freeze accounting and setting up
264 * GFP_NOFS allocation context so that we avoid lockdep false positives
265 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
266 */
267 tp = kmem_zone_zalloc(xfs_trans_zone,
268 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
269
262 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 270 if (!(flags & XFS_TRANS_NO_WRITECOUNT))
263 sb_start_intwrite(mp->m_super); 271 sb_start_intwrite(mp->m_super);
264 272
@@ -270,8 +278,6 @@ xfs_trans_alloc(
270 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 278 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
271 atomic_inc(&mp->m_active_trans); 279 atomic_inc(&mp->m_active_trans);
272 280
273 tp = kmem_zone_zalloc(xfs_trans_zone,
274 (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
275 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 281 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
276 tp->t_flags = flags; 282 tp->t_flags = flags;
277 tp->t_mountp = mp; 283 tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 15919f67a88f..286a287ac57a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
322} 322}
323 323
324/* 324/*
325 * Release the buffer bp which was previously acquired with one of the 325 * Release a buffer previously joined to the transaction. If the buffer is
326 * xfs_trans_... buffer allocation routines if the buffer has not 326 * modified within this transaction, decrement the recursion count but do not
327 * been modified within this transaction. If the buffer is modified 327 * release the buffer even if the count goes to 0. If the buffer is not modified
328 * within this transaction, do decrement the recursion count but do 328 * within the transaction, decrement the recursion count and release the buffer
329 * not release the buffer even if the count goes to 0. If the buffer is not 329 * if the recursion count goes to 0.
330 * modified within the transaction, decrement the recursion count and
331 * release the buffer if the recursion count goes to 0.
332 * 330 *
333 * If the buffer is to be released and it was not modified before 331 * If the buffer is to be released and it was not already dirty before this
334 * this transaction began, then free the buf_log_item associated with it. 332 * transaction began, then also free the buf_log_item associated with it.
335 * 333 *
336 * If the transaction pointer is NULL, make this just a normal 334 * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
337 * brelse() call.
338 */ 335 */
339void 336void
340xfs_trans_brelse( 337xfs_trans_brelse(
341 xfs_trans_t *tp, 338 struct xfs_trans *tp,
342 xfs_buf_t *bp) 339 struct xfs_buf *bp)
343{ 340{
344 struct xfs_buf_log_item *bip; 341 struct xfs_buf_log_item *bip = bp->b_log_item;
345 int freed;
346 342
347 /* 343 ASSERT(bp->b_transp == tp);
348 * Default to a normal brelse() call if the tp is NULL. 344
349 */ 345 if (!tp) {
350 if (tp == NULL) {
351 ASSERT(bp->b_transp == NULL);
352 xfs_buf_relse(bp); 346 xfs_buf_relse(bp);
353 return; 347 return;
354 } 348 }
355 349
356 ASSERT(bp->b_transp == tp); 350 trace_xfs_trans_brelse(bip);
357 bip = bp->b_log_item;
358 ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 351 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
359 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
360 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
361 ASSERT(atomic_read(&bip->bli_refcount) > 0); 352 ASSERT(atomic_read(&bip->bli_refcount) > 0);
362 353
363 trace_xfs_trans_brelse(bip);
364
365 /* 354 /*
366 * If the release is just for a recursive lock, 355 * If the release is for a recursive lookup, then decrement the count
367 * then decrement the count and return. 356 * and return.
368 */ 357 */
369 if (bip->bli_recur > 0) { 358 if (bip->bli_recur > 0) {
370 bip->bli_recur--; 359 bip->bli_recur--;
@@ -372,64 +361,24 @@ xfs_trans_brelse(
372 } 361 }
373 362
374 /* 363 /*
375 * If the buffer is dirty within this transaction, we can't 364 * If the buffer is invalidated or dirty in this transaction, we can't
376 * release it until we commit. 365 * release it until we commit.
377 */ 366 */
378 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) 367 if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
379 return; 368 return;
380
381 /*
382 * If the buffer has been invalidated, then we can't release
383 * it until the transaction commits to disk unless it is re-dirtied
384 * as part of this transaction. This prevents us from pulling
385 * the item from the AIL before we should.
386 */
387 if (bip->bli_flags & XFS_BLI_STALE) 369 if (bip->bli_flags & XFS_BLI_STALE)
388 return; 370 return;
389 371
390 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
391
392 /* 372 /*
393 * Free up the log item descriptor tracking the released item. 373 * Unlink the log item from the transaction and clear the hold flag, if
374 * set. We wouldn't want the next user of the buffer to get confused.
394 */ 375 */
376 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
395 xfs_trans_del_item(&bip->bli_item); 377 xfs_trans_del_item(&bip->bli_item);
378 bip->bli_flags &= ~XFS_BLI_HOLD;
396 379
397 /* 380 /* drop the reference to the bli */
398 * Clear the hold flag in the buf log item if it is set. 381 xfs_buf_item_put(bip);
399 * We wouldn't want the next user of the buffer to
400 * get confused.
401 */
402 if (bip->bli_flags & XFS_BLI_HOLD) {
403 bip->bli_flags &= ~XFS_BLI_HOLD;
404 }
405
406 /*
407 * Drop our reference to the buf log item.
408 */
409 freed = atomic_dec_and_test(&bip->bli_refcount);
410
411 /*
412 * If the buf item is not tracking data in the log, then we must free it
413 * before releasing the buffer back to the free pool.
414 *
415 * If the fs has shutdown and we dropped the last reference, it may fall
416 * on us to release a (possibly dirty) bli if it never made it to the
417 * AIL (e.g., the aborted unpin already happened and didn't release it
418 * due to our reference). Since we're already shutdown and need
419 * ail_lock, just force remove from the AIL and release the bli here.
420 */
421 if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
422 xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
423 xfs_buf_item_relse(bp);
424 } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
425/***
426 ASSERT(bp->b_pincount == 0);
427***/
428 ASSERT(atomic_read(&bip->bli_refcount) == 0);
429 ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
430 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
431 xfs_buf_item_relse(bp);
432 }
433 382
434 bp->b_transp = NULL; 383 bp->b_transp = NULL;
435 xfs_buf_relse(bp); 384 xfs_buf_relse(bp);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 66d1d45fa2e1..d356f802945a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
1026#define ioport_map ioport_map 1026#define ioport_map ioport_map
1027static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 1027static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1028{ 1028{
1029 return PCI_IOBASE + (port & MMIO_UPPER_LIMIT); 1029 port &= IO_SPACE_LIMIT;
1030 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
1030} 1031}
1031#endif 1032#endif
1032 1033
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..152b3055e9e1 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
675static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) 675static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
676{ 676{
677 return drm_core_check_feature(dev, DRIVER_ATOMIC) || 677 return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
678 dev->mode_config.funcs->atomic_commit != NULL; 678 (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
679} 679}
680 680
681 681
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 582a0ec0aa70..777814755fa6 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,7 +89,6 @@ struct drm_panel {
89 struct drm_device *drm; 89 struct drm_device *drm;
90 struct drm_connector *connector; 90 struct drm_connector *connector;
91 struct device *dev; 91 struct device *dev;
92 struct device_link *link;
93 92
94 const struct drm_panel_funcs *funcs; 93 const struct drm_panel_funcs *funcs;
95 94
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index ca1d2cc2cdfa..18863d56273c 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
199 199
200#define __declare_arg_0(a0, res) \ 200#define __declare_arg_0(a0, res) \
201 struct arm_smccc_res *___res = res; \ 201 struct arm_smccc_res *___res = res; \
202 register u32 r0 asm("r0") = a0; \ 202 register unsigned long r0 asm("r0") = (u32)a0; \
203 register unsigned long r1 asm("r1"); \ 203 register unsigned long r1 asm("r1"); \
204 register unsigned long r2 asm("r2"); \ 204 register unsigned long r2 asm("r2"); \
205 register unsigned long r3 asm("r3") 205 register unsigned long r3 asm("r3")
206 206
207#define __declare_arg_1(a0, a1, res) \ 207#define __declare_arg_1(a0, a1, res) \
208 typeof(a1) __a1 = a1; \
208 struct arm_smccc_res *___res = res; \ 209 struct arm_smccc_res *___res = res; \
209 register u32 r0 asm("r0") = a0; \ 210 register unsigned long r0 asm("r0") = (u32)a0; \
210 register typeof(a1) r1 asm("r1") = a1; \ 211 register unsigned long r1 asm("r1") = __a1; \
211 register unsigned long r2 asm("r2"); \ 212 register unsigned long r2 asm("r2"); \
212 register unsigned long r3 asm("r3") 213 register unsigned long r3 asm("r3")
213 214
214#define __declare_arg_2(a0, a1, a2, res) \ 215#define __declare_arg_2(a0, a1, a2, res) \
216 typeof(a1) __a1 = a1; \
217 typeof(a2) __a2 = a2; \
215 struct arm_smccc_res *___res = res; \ 218 struct arm_smccc_res *___res = res; \
216 register u32 r0 asm("r0") = a0; \ 219 register unsigned long r0 asm("r0") = (u32)a0; \
217 register typeof(a1) r1 asm("r1") = a1; \ 220 register unsigned long r1 asm("r1") = __a1; \
218 register typeof(a2) r2 asm("r2") = a2; \ 221 register unsigned long r2 asm("r2") = __a2; \
219 register unsigned long r3 asm("r3") 222 register unsigned long r3 asm("r3")
220 223
221#define __declare_arg_3(a0, a1, a2, a3, res) \ 224#define __declare_arg_3(a0, a1, a2, a3, res) \
225 typeof(a1) __a1 = a1; \
226 typeof(a2) __a2 = a2; \
227 typeof(a3) __a3 = a3; \
222 struct arm_smccc_res *___res = res; \ 228 struct arm_smccc_res *___res = res; \
223 register u32 r0 asm("r0") = a0; \ 229 register unsigned long r0 asm("r0") = (u32)a0; \
224 register typeof(a1) r1 asm("r1") = a1; \ 230 register unsigned long r1 asm("r1") = __a1; \
225 register typeof(a2) r2 asm("r2") = a2; \ 231 register unsigned long r2 asm("r2") = __a2; \
226 register typeof(a3) r3 asm("r3") = a3 232 register unsigned long r3 asm("r3") = __a3
227 233
228#define __declare_arg_4(a0, a1, a2, a3, a4, res) \ 234#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
235 typeof(a4) __a4 = a4; \
229 __declare_arg_3(a0, a1, a2, a3, res); \ 236 __declare_arg_3(a0, a1, a2, a3, res); \
230 register typeof(a4) r4 asm("r4") = a4 237 register unsigned long r4 asm("r4") = __a4
231 238
232#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ 239#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
240 typeof(a5) __a5 = a5; \
233 __declare_arg_4(a0, a1, a2, a3, a4, res); \ 241 __declare_arg_4(a0, a1, a2, a3, a4, res); \
234 register typeof(a5) r5 asm("r5") = a5 242 register unsigned long r5 asm("r5") = __a5
235 243
236#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ 244#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
245 typeof(a6) __a6 = a6; \
237 __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ 246 __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
238 register typeof(a6) r6 asm("r6") = a6 247 register unsigned long r6 asm("r6") = __a6
239 248
240#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ 249#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
250 typeof(a7) __a7 = a7; \
241 __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ 251 __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
242 register typeof(a7) r7 asm("r7") = a7 252 register unsigned long r7 asm("r7") = __a7
243 253
244#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) 254#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
245#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) 255#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 34aec30e06c7..6d766a19f2bb 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -56,6 +56,7 @@ struct blkcg {
56 struct list_head all_blkcgs_node; 56 struct list_head all_blkcgs_node;
57#ifdef CONFIG_CGROUP_WRITEBACK 57#ifdef CONFIG_CGROUP_WRITEBACK
58 struct list_head cgwb_list; 58 struct list_head cgwb_list;
59 refcount_t cgwb_refcnt;
59#endif 60#endif
60}; 61};
61 62
@@ -89,7 +90,6 @@ struct blkg_policy_data {
89 /* the blkg and policy id this per-policy data belongs to */ 90 /* the blkg and policy id this per-policy data belongs to */
90 struct blkcg_gq *blkg; 91 struct blkcg_gq *blkg;
91 int plid; 92 int plid;
92 bool offline;
93}; 93};
94 94
95/* 95/*
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
387 return cpd ? cpd->blkcg : NULL; 387 return cpd ? cpd->blkcg : NULL;
388} 388}
389 389
390extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
391
392#ifdef CONFIG_CGROUP_WRITEBACK
393
394/**
395 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
396 * @blkcg: blkcg of interest
397 *
398 * This is used to track the number of active wb's related to a blkcg.
399 */
400static inline void blkcg_cgwb_get(struct blkcg *blkcg)
401{
402 refcount_inc(&blkcg->cgwb_refcnt);
403}
404
405/**
406 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
407 * @blkcg: blkcg of interest
408 *
409 * This is used to track the number of active wb's related to a blkcg.
410 * When this count goes to zero, all active wb has finished so the
411 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
412 * This work may occur in cgwb_release_workfn() on the cgwb_release
413 * workqueue.
414 */
415static inline void blkcg_cgwb_put(struct blkcg *blkcg)
416{
417 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
418 blkcg_destroy_blkgs(blkcg);
419}
420
421#else
422
423static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
424
425static inline void blkcg_cgwb_put(struct blkcg *blkcg)
426{
427 /* wb isn't being accounted, so trigger destruction right away */
428 blkcg_destroy_blkgs(blkcg);
429}
430
431#endif
432
390/** 433/**
391 * blkg_path - format cgroup path of blkg 434 * blkg_path - format cgroup path of blkg
392 * @blkg: blkg of interest 435 * @blkg: blkg of interest
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d6869e0e2b64..6980014357d4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -54,7 +54,7 @@ struct blk_stat_callback;
54 * Maximum number of blkcg policies allowed to be registered concurrently. 54 * Maximum number of blkcg policies allowed to be registered concurrently.
55 * Defined here to simplify include dependency. 55 * Defined here to simplify include dependency.
56 */ 56 */
57#define BLKCG_MAX_POLS 3 57#define BLKCG_MAX_POLS 5
58 58
59typedef void (rq_end_io_fn)(struct request *, blk_status_t); 59typedef void (rq_end_io_fn)(struct request *, blk_status_t);
60 60
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 763bbad1e258..4d36b27214fd 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,20 +79,6 @@
79#define __noretpoline __attribute__((indirect_branch("keep"))) 79#define __noretpoline __attribute__((indirect_branch("keep")))
80#endif 80#endif
81 81
82/*
83 * it doesn't make sense on ARM (currently the only user of __naked)
84 * to trace naked functions because then mcount is called without
85 * stack and frame pointer being set up and there is no chance to
86 * restore the lr register to the value before mcount was called.
87 *
88 * The asm() bodies of naked functions often depend on standard calling
89 * conventions, therefore they must be noinline and noclone.
90 *
91 * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
92 * See GCC PR44290.
93 */
94#define __naked __attribute__((naked)) noinline __noclone notrace
95
96#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 82#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
97 83
98#define __optimize(level) __attribute__((__optimize__(level))) 84#define __optimize(level) __attribute__((__optimize__(level)))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3525c179698c..db192becfec4 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -226,6 +226,14 @@ struct ftrace_likely_data {
226#define notrace __attribute__((no_instrument_function)) 226#define notrace __attribute__((no_instrument_function))
227#endif 227#endif
228 228
229/*
230 * it doesn't make sense on ARM (currently the only user of __naked)
231 * to trace naked functions because then mcount is called without
232 * stack and frame pointer being set up and there is no chance to
233 * restore the lr register to the value before mcount was called.
234 */
235#define __naked __attribute__((naked)) notrace
236
229#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) 237#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
230 238
231/* 239/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 33322702c910..6c0b4a1c22ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1763,6 +1763,7 @@ struct file_operations {
1763 u64); 1763 u64);
1764 int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, 1764 int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
1765 u64); 1765 u64);
1766 int (*fadvise)(struct file *, loff_t, loff_t, int);
1766} __randomize_layout; 1767} __randomize_layout;
1767 1768
1768struct inode_operations { 1769struct inode_operations {
@@ -3459,4 +3460,8 @@ static inline bool dir_relax_shared(struct inode *inode)
3459extern bool path_noexec(const struct path *path); 3460extern bool path_noexec(const struct path *path);
3460extern void inode_nohighmem(struct inode *inode); 3461extern void inode_nohighmem(struct inode *inode);
3461 3462
3463/* mm/fadvise.c */
3464extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
3465 int advice);
3466
3462#endif /* _LINUX_FS_H */ 3467#endif /* _LINUX_FS_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..25c08c6c7f99 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -83,10 +83,10 @@ struct partition {
83} __attribute__((packed)); 83} __attribute__((packed));
84 84
85struct disk_stats { 85struct disk_stats {
86 u64 nsecs[NR_STAT_GROUPS];
86 unsigned long sectors[NR_STAT_GROUPS]; 87 unsigned long sectors[NR_STAT_GROUPS];
87 unsigned long ios[NR_STAT_GROUPS]; 88 unsigned long ios[NR_STAT_GROUPS];
88 unsigned long merges[NR_STAT_GROUPS]; 89 unsigned long merges[NR_STAT_GROUPS];
89 unsigned long ticks[NR_STAT_GROUPS];
90 unsigned long io_ticks; 90 unsigned long io_ticks;
91 unsigned long time_in_queue; 91 unsigned long time_in_queue;
92}; 92};
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
354 354
355#endif /* CONFIG_SMP */ 355#endif /* CONFIG_SMP */
356 356
357#define part_stat_read_msecs(part, which) \
358 div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
359
357#define part_stat_read_accum(part, field) \ 360#define part_stat_read_accum(part, field) \
358 (part_stat_read(part, field[STAT_READ]) + \ 361 (part_stat_read(part, field[STAT_READ]) + \
359 part_stat_read(part, field[STAT_WRITE]) + \ 362 part_stat_read(part, field[STAT_WRITE]) + \
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 834e6461a690..d44a78362942 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -526,6 +526,7 @@ struct hid_input {
526 const char *name; 526 const char *name;
527 bool registered; 527 bool registered;
528 struct list_head reports; /* the list of reports */ 528 struct list_head reports; /* the list of reports */
529 unsigned int application; /* application usage for this input */
529}; 530};
530 531
531enum hid_type { 532enum hid_type {
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b79387fd57da..65b4eaed1d96 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
855} 855}
856 856
857u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); 857u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
858void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); 858void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
859 859
860int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); 860int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
861/** 861/**
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0205aee44ded..c926698040e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
733void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 733void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
734int kvm_vcpu_yield_to(struct kvm_vcpu *target); 734int kvm_vcpu_yield_to(struct kvm_vcpu *target);
735void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); 735void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
736void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
737void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
738 736
739void kvm_flush_remote_tlbs(struct kvm *kvm); 737void kvm_flush_remote_tlbs(struct kvm *kvm);
740void kvm_reload_remote_mmus(struct kvm *kvm); 738void kvm_reload_remote_mmus(struct kvm *kvm);
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
21/* 21/*
22 * Regulator configuration 22 * Regulator configuration
23 */ 23 */
24/* DA9063 regulator IDs */ 24/* DA9063 and DA9063L regulator IDs */
25enum { 25enum {
26 /* BUCKs */ 26 /* BUCKs */
27 DA9063_ID_BCORE1, 27 DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
37 DA9063_ID_BMEM_BIO_MERGED, 37 DA9063_ID_BMEM_BIO_MERGED,
38 /* When two BUCKs are merged, they cannot be reused separately */ 38 /* When two BUCKs are merged, they cannot be reused separately */
39 39
40 /* LDOs */ 40 /* LDOs on both DA9063 and DA9063L */
41 DA9063_ID_LDO3,
42 DA9063_ID_LDO7,
43 DA9063_ID_LDO8,
44 DA9063_ID_LDO9,
45 DA9063_ID_LDO11,
46
47 /* DA9063-only LDOs */
41 DA9063_ID_LDO1, 48 DA9063_ID_LDO1,
42 DA9063_ID_LDO2, 49 DA9063_ID_LDO2,
43 DA9063_ID_LDO3,
44 DA9063_ID_LDO4, 50 DA9063_ID_LDO4,
45 DA9063_ID_LDO5, 51 DA9063_ID_LDO5,
46 DA9063_ID_LDO6, 52 DA9063_ID_LDO6,
47 DA9063_ID_LDO7,
48 DA9063_ID_LDO8,
49 DA9063_ID_LDO9,
50 DA9063_ID_LDO10, 53 DA9063_ID_LDO10,
51 DA9063_ID_LDO11,
52}; 54};
53 55
54/* Regulators platform data */ 56/* Regulators platform data */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index a528747f8aed..e8338e5dc10b 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -78,9 +78,9 @@ enum {
78 BD71837_REG_TRANS_COND0 = 0x1F, 78 BD71837_REG_TRANS_COND0 = 0x1F,
79 BD71837_REG_TRANS_COND1 = 0x20, 79 BD71837_REG_TRANS_COND1 = 0x20,
80 BD71837_REG_VRFAULTEN = 0x21, 80 BD71837_REG_VRFAULTEN = 0x21,
81 BD71837_REG_MVRFLTMASK0 = 0x22, 81 BD718XX_REG_MVRFLTMASK0 = 0x22,
82 BD71837_REG_MVRFLTMASK1 = 0x23, 82 BD718XX_REG_MVRFLTMASK1 = 0x23,
83 BD71837_REG_MVRFLTMASK2 = 0x24, 83 BD718XX_REG_MVRFLTMASK2 = 0x24,
84 BD71837_REG_RCVCFG = 0x25, 84 BD71837_REG_RCVCFG = 0x25,
85 BD71837_REG_RCVNUM = 0x26, 85 BD71837_REG_RCVNUM = 0x26,
86 BD71837_REG_PWRONCONFIG0 = 0x27, 86 BD71837_REG_PWRONCONFIG0 = 0x27,
@@ -159,6 +159,33 @@ enum {
159#define BUCK8_MASK 0x3F 159#define BUCK8_MASK 0x3F
160#define BUCK8_DEFAULT 0x1E 160#define BUCK8_DEFAULT 0x1E
161 161
162/* BD718XX Voltage monitoring masks */
163#define BD718XX_BUCK1_VRMON80 0x1
164#define BD718XX_BUCK1_VRMON130 0x2
165#define BD718XX_BUCK2_VRMON80 0x4
166#define BD718XX_BUCK2_VRMON130 0x8
167#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
168#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
169#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
170#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
171#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
172#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
173#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
174#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
175#define BD718XX_LDO1_VRMON80 0x1
176#define BD718XX_LDO2_VRMON80 0x2
177#define BD718XX_LDO3_VRMON80 0x4
178#define BD718XX_LDO4_VRMON80 0x8
179#define BD718XX_LDO5_VRMON80 0x10
180#define BD718XX_LDO6_VRMON80 0x20
181
182/* BD71837 specific voltage monitoring masks */
183#define BD71837_BUCK3_VRMON80 0x10
184#define BD71837_BUCK3_VRMON130 0x20
185#define BD71837_BUCK4_VRMON80 0x40
186#define BD71837_BUCK4_VRMON130 0x80
187#define BD71837_LDO7_VRMON80 0x40
188
162/* BD71837_REG_IRQ bits */ 189/* BD71837_REG_IRQ bits */
163#define IRQ_SWRST 0x40 190#define IRQ_SWRST 0x40
164#define IRQ_PWRON_S 0x20 191#define IRQ_PWRON_S 0x20
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7a452716de4b..66d94b4557cf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
362struct mlx5_frag_buf_ctrl { 362struct mlx5_frag_buf_ctrl {
363 struct mlx5_frag_buf frag_buf; 363 struct mlx5_frag_buf frag_buf;
364 u32 sz_m1; 364 u32 sz_m1;
365 u32 frag_sz_m1; 365 u16 frag_sz_m1;
366 u32 strides_offset; 366 u16 strides_offset;
367 u8 log_sz; 367 u8 log_sz;
368 u8 log_stride; 368 u8 log_stride;
369 u8 log_frag_strides; 369 u8 log_frag_strides;
@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
995} 995}
996 996
997static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, 997static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
998 u32 strides_offset, 998 u16 strides_offset,
999 struct mlx5_frag_buf_ctrl *fbc) 999 struct mlx5_frag_buf_ctrl *fbc)
1000{ 1000{
1001 fbc->log_stride = log_stride; 1001 fbc->log_stride = log_stride;
@@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
1052void mlx5_health_cleanup(struct mlx5_core_dev *dev); 1052void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1053int mlx5_health_init(struct mlx5_core_dev *dev); 1053int mlx5_health_init(struct mlx5_core_dev *dev);
1054void mlx5_start_health_poll(struct mlx5_core_dev *dev); 1054void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1055void mlx5_stop_health_poll(struct mlx5_core_dev *dev); 1055void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1056void mlx5_drain_health_wq(struct mlx5_core_dev *dev); 1056void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1057void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 1057void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1058void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); 1058void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..7f5ca2cd3a32 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
90 90
91 u32 *rqn; 91 u32 *rqn;
92 u32 *sqn; 92 u32 *sqn;
93
94 bool peer_gone;
93}; 95};
94 96
95struct mlx5_hairpin * 97struct mlx5_hairpin *
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cd2bc939efd0..5ed8f6292a53 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct mm_struct {
341 struct { 341 struct {
342 struct vm_area_struct *mmap; /* list of VMAs */ 342 struct vm_area_struct *mmap; /* list of VMAs */
343 struct rb_root mm_rb; 343 struct rb_root mm_rb;
344 u32 vmacache_seqnum; /* per-thread vmacache */ 344 u64 vmacache_seqnum; /* per-thread vmacache */
345#ifdef CONFIG_MMU 345#ifdef CONFIG_MMU
346 unsigned long (*get_unmapped_area) (struct file *filp, 346 unsigned long (*get_unmapped_area) (struct file *filp,
347 unsigned long addr, unsigned long len, 347 unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
32#define VMACACHE_MASK (VMACACHE_SIZE - 1) 32#define VMACACHE_MASK (VMACACHE_SIZE - 1)
33 33
34struct vmacache { 34struct vmacache {
35 u32 seqnum; 35 u64 seqnum;
36 struct vm_area_struct *vmas[VMACACHE_SIZE]; 36 struct vm_area_struct *vmas[VMACACHE_SIZE];
37}; 37};
38 38
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1298a7daa57d..01797cb4587e 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -754,6 +754,7 @@ struct tb_service_id {
754 * struct typec_device_id - USB Type-C alternate mode identifiers 754 * struct typec_device_id - USB Type-C alternate mode identifiers
755 * @svid: Standard or Vendor ID 755 * @svid: Standard or Vendor ID
756 * @mode: Mode index 756 * @mode: Mode index
757 * @driver_data: Driver specific data
757 */ 758 */
758struct typec_device_id { 759struct typec_device_id {
759 __u16 svid; 760 __u16 svid;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..c7861e4b402c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
1730 * switch driver and used to set the phys state of the 1730 * switch driver and used to set the phys state of the
1731 * switch port. 1731 * switch port.
1732 * 1732 *
1733 * @wol_enabled: Wake-on-LAN is enabled
1734 *
1733 * FIXME: cleanup struct net_device such that network protocol info 1735 * FIXME: cleanup struct net_device such that network protocol info
1734 * moves out. 1736 * moves out.
1735 */ 1737 */
@@ -2014,6 +2016,7 @@ struct net_device {
2014 struct lock_class_key *qdisc_tx_busylock; 2016 struct lock_class_key *qdisc_tx_busylock;
2015 struct lock_class_key *qdisc_running_key; 2017 struct lock_class_key *qdisc_running_key;
2016 bool proto_down; 2018 bool proto_down;
2019 unsigned wol_enabled:1;
2017}; 2020};
2018#define to_net_dev(d) container_of(d, struct net_device, dev) 2021#define to_net_dev(d) container_of(d, struct net_device, dev)
2019 2022
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
215 break; 215 break;
216 case NFPROTO_ARP: 216 case NFPROTO_ARP:
217#ifdef CONFIG_NETFILTER_FAMILY_ARP 217#ifdef CONFIG_NETFILTER_FAMILY_ARP
218 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
219 break;
218 hook_head = rcu_dereference(net->nf.hooks_arp[hook]); 220 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
219#endif 221#endif
220 break; 222 break;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..3ef82d3a78db 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -49,8 +49,9 @@ struct netpoll_info {
49}; 49};
50 50
51#ifdef CONFIG_NETPOLL 51#ifdef CONFIG_NETPOLL
52extern void netpoll_poll_disable(struct net_device *dev); 52void netpoll_poll_dev(struct net_device *dev);
53extern void netpoll_poll_enable(struct net_device *dev); 53void netpoll_poll_disable(struct net_device *dev);
54void netpoll_poll_enable(struct net_device *dev);
54#else 55#else
55static inline void netpoll_poll_disable(struct net_device *dev) { return; } 56static inline void netpoll_poll_disable(struct net_device *dev) { return; }
56static inline void netpoll_poll_enable(struct net_device *dev) { return; } 57static inline void netpoll_poll_enable(struct net_device *dev) { return; }
diff --git a/include/linux/of.h b/include/linux/of.h
index 4d25e4f952d9..99b0ebf49632 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
256#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 256#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
257#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 257#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
258 258
259extern bool of_node_name_eq(const struct device_node *np, const char *name);
260extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
261
259static inline const char *of_node_full_name(const struct device_node *np) 262static inline const char *of_node_full_name(const struct device_node *np)
260{ 263{
261 return np ? np->full_name : "<no-node>"; 264 return np ? np->full_name : "<no-node>";
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
290extern struct device_node *of_get_next_available_child( 293extern struct device_node *of_get_next_available_child(
291 const struct device_node *node, struct device_node *prev); 294 const struct device_node *node, struct device_node *prev);
292 295
296extern struct device_node *of_get_compatible_child(const struct device_node *parent,
297 const char *compatible);
293extern struct device_node *of_get_child_by_name(const struct device_node *node, 298extern struct device_node *of_get_child_by_name(const struct device_node *node,
294 const char *name); 299 const char *name);
295 300
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
561 return NULL; 566 return NULL;
562} 567}
563 568
569static inline bool of_node_name_eq(const struct device_node *np, const char *name)
570{
571 return false;
572}
573
574static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
575{
576 return false;
577}
578
564static inline const char* of_node_full_name(const struct device_node *np) 579static inline const char* of_node_full_name(const struct device_node *np)
565{ 580{
566 return "<no-node>"; 581 return "<no-node>";
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void)
632 return false; 647 return false;
633} 648}
634 649
650static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
651 const char *compatible)
652{
653 return NULL;
654}
655
635static inline struct device_node *of_get_child_by_name( 656static inline struct device_node *of_get_child_by_name(
636 const struct device_node *node, 657 const struct device_node *node,
637 const char *name) 658 const char *name)
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node(
967 return of_find_matching_node_and_match(from, matches, NULL); 988 return of_find_matching_node_and_match(from, matches, NULL);
968} 989}
969 990
991static inline const char *of_node_get_device_type(const struct device_node *np)
992{
993 return of_get_property(np, "type", NULL);
994}
995
996static inline bool of_node_is_type(const struct device_node *np, const char *type)
997{
998 const char *match = of_node_get_device_type(np);
999
1000 return np && match && type && !strcmp(match, type);
1001}
1002
970/** 1003/**
971 * of_property_count_u8_elems - Count the number of u8 elements in a property 1004 * of_property_count_u8_elems - Count the number of u8 elements in a property
972 * 1005 *
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e72ca8dd6241..6925828f9f25 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
1235int devm_request_pci_bus_resources(struct device *dev, 1235int devm_request_pci_bus_resources(struct device *dev,
1236 struct list_head *resources); 1236 struct list_head *resources);
1237 1237
1238/* Temporary until new and working PCI SBR API in place */
1239int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1240
1238#define pci_bus_for_each_resource(bus, res, i) \ 1241#define pci_bus_for_each_resource(bus, res, i) \
1239 for (i = 0; \ 1242 for (i = 0; \
1240 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ 1243 (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 99d366cb0e9f..d157983b84cf 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -3084,4 +3084,6 @@
3084 3084
3085#define PCI_VENDOR_ID_OCZ 0x1b85 3085#define PCI_VENDOR_ID_OCZ 0x1b85
3086 3086
3087#define PCI_VENDOR_ID_NCUBE 0x10ff
3088
3087#endif /* _LINUX_PCI_IDS_H */ 3089#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b..9f0aa1b48c78 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Driver for Texas Instruments INA219, INA226 power monitor chips 2 * Driver for Texas Instruments INA219, INA226 power monitor chips
3 * 3 *
4 * Copyright (C) 2012 Lothar Felten <l-felten@ti.com> 4 * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/quota.h b/include/linux/quota.h
index ca9772c8e48b..f32dd270b8e3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -408,13 +408,7 @@ struct qc_type_state {
408 408
409struct qc_state { 409struct qc_state {
410 unsigned int s_incoredqs; /* Number of dquots in core */ 410 unsigned int s_incoredqs; /* Number of dquots in core */
411 /* 411 struct qc_type_state s_state[MAXQUOTAS]; /* Per quota type information */
412 * Per quota type information. The array should really have
413 * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
414 * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
415 * supports project quotas, this can be changed to MAXQUOTAS
416 */
417 struct qc_type_state s_state[XQM_MAXQUOTAS];
418}; 412};
419 413
420/* Structure for communicating via ->set_info */ 414/* Structure for communicating via ->set_info */
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3468703d663a..a459a5e973a7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -48,9 +48,9 @@ struct regulator;
48 * DISABLE_IN_SUSPEND - turn off regulator in suspend states 48 * DISABLE_IN_SUSPEND - turn off regulator in suspend states
49 * ENABLE_IN_SUSPEND - keep regulator on in suspend states 49 * ENABLE_IN_SUSPEND - keep regulator on in suspend states
50 */ 50 */
51#define DO_NOTHING_IN_SUSPEND (-1) 51#define DO_NOTHING_IN_SUSPEND 0
52#define DISABLE_IN_SUSPEND 0 52#define DISABLE_IN_SUSPEND 1
53#define ENABLE_IN_SUSPEND 1 53#define ENABLE_IN_SUSPEND 2
54 54
55/* Regulator active discharge flags */ 55/* Regulator active discharge flags */
56enum regulator_active_discharge { 56enum regulator_active_discharge {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index b2bd4b4127c4..69ee30456864 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
81 * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes 81 * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
82 * @data.buswidth: number of IO lanes used to send/receive the data 82 * @data.buswidth: number of IO lanes used to send/receive the data
83 * @data.dir: direction of the transfer 83 * @data.dir: direction of the transfer
84 * @data.buf.in: input buffer 84 * @data.nbytes: number of data bytes to send/receive. Can be zero if the
85 * @data.buf.out: output buffer 85 * operation does not involve transferring data
86 * @data.buf.in: input buffer (must be DMA-able)
87 * @data.buf.out: output buffer (must be DMA-able)
86 */ 88 */
87struct spi_mem_op { 89struct spi_mem_op {
88 struct { 90 struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
105 u8 buswidth; 107 u8 buswidth;
106 enum spi_mem_data_dir dir; 108 enum spi_mem_data_dir dir;
107 unsigned int nbytes; 109 unsigned int nbytes;
108 /* buf.{in,out} must be DMA-able. */
109 union { 110 union {
110 void *in; 111 void *in;
111 const void *out; 112 const void *out;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
30 30
31#define MTL_MAX_RX_QUEUES 8 31#define MTL_MAX_RX_QUEUES 8
32#define MTL_MAX_TX_QUEUES 8 32#define MTL_MAX_TX_QUEUES 8
33#define STMMAC_CH_MAX 8
33 34
34#define STMMAC_RX_COE_NONE 0 35#define STMMAC_RX_COE_NONE 0
35#define STMMAC_RX_COE_TYPE1 1 36#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 5d738804e3d6..a5a3cfc3c2fa 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
258extern int persistent_clock_is_local; 258extern int persistent_clock_is_local;
259 259
260extern void read_persistent_clock64(struct timespec64 *ts); 260extern void read_persistent_clock64(struct timespec64 *ts);
261void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock, 261void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
262 struct timespec64 *boot_offset); 262 struct timespec64 *boot_offset);
263extern int update_persistent_clock64(struct timespec64 now); 263extern int update_persistent_clock64(struct timespec64 now);
264 264
265/* 265/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 7f2e16e76ac4..041f7e56a289 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
158 * For rcuidle callers, use srcu since sched-rcu \ 158 * For rcuidle callers, use srcu since sched-rcu \
159 * doesn't work from the idle path. \ 159 * doesn't work from the idle path. \
160 */ \ 160 */ \
161 if (rcuidle) \ 161 if (rcuidle) { \
162 idx = srcu_read_lock_notrace(&tracepoint_srcu); \ 162 idx = srcu_read_lock_notrace(&tracepoint_srcu); \
163 rcu_irq_enter_irqson(); \
164 } \
163 \ 165 \
164 it_func_ptr = rcu_dereference_raw((tp)->funcs); \ 166 it_func_ptr = rcu_dereference_raw((tp)->funcs); \
165 \ 167 \
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
171 } while ((++it_func_ptr)->func); \ 173 } while ((++it_func_ptr)->func); \
172 } \ 174 } \
173 \ 175 \
174 if (rcuidle) \ 176 if (rcuidle) { \
177 rcu_irq_exit_irqson(); \
175 srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ 178 srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
179 } \
176 \ 180 \
177 preempt_enable_notrace(); \ 181 preempt_enable_notrace(); \
178 } while (0) 182 } while (0)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 409c845d4cd3..422b1c01ee0d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
172static __always_inline __must_check 172static __always_inline __must_check
173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) 173size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174{ 174{
175 if (unlikely(!check_copy_size(addr, bytes, false))) 175 if (unlikely(!check_copy_size(addr, bytes, true)))
176 return 0; 176 return 0;
177 else 177 else
178 return _copy_to_iter_mcsafe(addr, bytes, i); 178 return _copy_to_iter_mcsafe(addr, bytes, i);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
133 * @can_switch: check if the device is in a position to switch now. 133 * @can_switch: check if the device is in a position to switch now.
134 * Mandatory. The client should return false if a user space process 134 * Mandatory. The client should return false if a user space process
135 * has one of its device files open 135 * has one of its device files open
136 * @gpu_bound: notify the client id to audio client when the GPU is bound.
136 * 137 *
137 * Client callbacks. A client can be either a GPU or an audio device on a GPU. 138 * Client callbacks. A client can be either a GPU or an audio device on a GPU.
138 * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be 139 * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
139 * set to NULL. For audio clients, the @reprobe member is bogus. 140 * set to NULL. For audio clients, the @reprobe member is bogus.
141 * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
140 */ 142 */
141struct vga_switcheroo_client_ops { 143struct vga_switcheroo_client_ops {
142 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state); 144 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
143 void (*reprobe)(struct pci_dev *dev); 145 void (*reprobe)(struct pci_dev *dev);
144 bool (*can_switch)(struct pci_dev *dev); 146 bool (*can_switch)(struct pci_dev *dev);
147 void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
145}; 148};
146 149
147#if defined(CONFIG_VGA_SWITCHEROO) 150#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
105#ifdef CONFIG_DEBUG_VM_VMACACHE 105#ifdef CONFIG_DEBUG_VM_VMACACHE
106 VMACACHE_FIND_CALLS, 106 VMACACHE_FIND_CALLS,
107 VMACACHE_FIND_HITS, 107 VMACACHE_FIND_HITS,
108 VMACACHE_FULL_FLUSHES,
109#endif 108#endif
110#ifdef CONFIG_SWAP 109#ifdef CONFIG_SWAP
111 SWAP_RA, 110 SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index 3e9a963edd6a..6fce268a4588 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); 10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
11} 11}
12 12
13extern void vmacache_flush_all(struct mm_struct *mm);
14extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); 13extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
15extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, 14extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
16 unsigned long addr); 15 unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
24static inline void vmacache_invalidate(struct mm_struct *mm) 23static inline void vmacache_invalidate(struct mm_struct *mm)
25{ 24{
26 mm->vmacache_seqnum++; 25 mm->vmacache_seqnum++;
27
28 /* deal with overflows */
29 if (unlikely(mm->vmacache_seqnum == 0))
30 vmacache_flush_all(mm);
31} 26}
32 27
33#endif /* __LINUX_VMACACHE_H */ 28#endif /* __LINUX_VMACACHE_H */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index ea73fef8bdc0..8586cfb49828 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
38 * @prio: priority of the file handler, as defined by &enum v4l2_priority 38 * @prio: priority of the file handler, as defined by &enum v4l2_priority
39 * 39 *
40 * @wait: event' s wait queue 40 * @wait: event' s wait queue
41 * @subscribe_lock: serialise changes to the subscribed list; guarantee that
42 * the add and del event callbacks are orderly called
41 * @subscribed: list of subscribed events 43 * @subscribed: list of subscribed events
42 * @available: list of events waiting to be dequeued 44 * @available: list of events waiting to be dequeued
43 * @navailable: number of available events at @available list 45 * @navailable: number of available events at @available list
44 * @sequence: event sequence number 46 * @sequence: event sequence number
47 *
45 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx 48 * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
46 */ 49 */
47struct v4l2_fh { 50struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
52 55
53 /* Events */ 56 /* Events */
54 wait_queue_head_t wait; 57 wait_queue_head_t wait;
58 struct mutex subscribe_lock;
55 struct list_head subscribed; 59 struct list_head subscribed;
56 struct list_head available; 60 struct list_head available;
57 unsigned int navailable; 61 unsigned int navailable;
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1ad5b19e83a9..970303448c90 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,13 +23,11 @@ struct tc_action {
23 const struct tc_action_ops *ops; 23 const struct tc_action_ops *ops;
24 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 24 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
25 __u32 order; 25 __u32 order;
26 struct list_head list;
27 struct tcf_idrinfo *idrinfo; 26 struct tcf_idrinfo *idrinfo;
28 27
29 u32 tcfa_index; 28 u32 tcfa_index;
30 refcount_t tcfa_refcnt; 29 refcount_t tcfa_refcnt;
31 atomic_t tcfa_bindcnt; 30 atomic_t tcfa_bindcnt;
32 u32 tcfa_capab;
33 int tcfa_action; 31 int tcfa_action;
34 struct tcf_t tcfa_tm; 32 struct tcf_t tcfa_tm;
35 struct gnet_stats_basic_packed tcfa_bstats; 33 struct gnet_stats_basic_packed tcfa_bstats;
@@ -44,7 +42,6 @@ struct tc_action {
44#define tcf_index common.tcfa_index 42#define tcf_index common.tcfa_index
45#define tcf_refcnt common.tcfa_refcnt 43#define tcf_refcnt common.tcfa_refcnt
46#define tcf_bindcnt common.tcfa_bindcnt 44#define tcf_bindcnt common.tcfa_bindcnt
47#define tcf_capab common.tcfa_capab
48#define tcf_action common.tcfa_action 45#define tcf_action common.tcfa_action
49#define tcf_tm common.tcfa_tm 46#define tcf_tm common.tcfa_tm
50#define tcf_bstats common.tcfa_bstats 47#define tcf_bstats common.tcfa_bstats
@@ -102,7 +99,6 @@ struct tc_action_ops {
102 size_t (*get_fill_size)(const struct tc_action *act); 99 size_t (*get_fill_size)(const struct tc_action *act);
103 struct net_device *(*get_dev)(const struct tc_action *a); 100 struct net_device *(*get_dev)(const struct tc_action *a);
104 void (*put_dev)(struct net_device *dev); 101 void (*put_dev)(struct net_device *dev);
105 int (*delete)(struct net *net, u32 index);
106}; 102};
107 103
108struct tc_action_net { 104struct tc_action_net {
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
148 const struct tc_action_ops *ops, 144 const struct tc_action_ops *ops,
149 struct netlink_ext_ack *extack); 145 struct netlink_ext_ack *extack);
150int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index); 146int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
151bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
152 int bind);
153int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 147int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
154 struct tc_action **a, const struct tc_action_ops *ops, 148 struct tc_action **a, const struct tc_action_ops *ops,
155 int bind, bool cpustats); 149 int bind, bool cpustats);
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
158void tcf_idr_cleanup(struct tc_action_net *tn, u32 index); 152void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
159int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 153int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
160 struct tc_action **a, int bind); 154 struct tc_action **a, int bind);
161int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
162int __tcf_idr_release(struct tc_action *a, bool bind, bool strict); 155int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
163 156
164static inline int tcf_idr_release(struct tc_action *a, bool bind) 157static inline int tcf_idr_release(struct tc_action *a, bool bind)
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a2d058170ea3..b46d68acf701 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
139 int mode; 139 int mode;
140}; 140};
141 141
142struct netdev_notify_work {
143 struct delayed_work work;
144 struct net_device *dev;
145 struct netdev_bonding_info bonding_info;
146};
147
148struct slave { 142struct slave {
149 struct net_device *dev; /* first - useful for panic debug */ 143 struct net_device *dev; /* first - useful for panic debug */
150 struct bonding *bond; /* our master */ 144 struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
172#ifdef CONFIG_NET_POLL_CONTROLLER 166#ifdef CONFIG_NET_POLL_CONTROLLER
173 struct netpoll *np; 167 struct netpoll *np;
174#endif 168#endif
169 struct delayed_work notify_work;
175 struct kobject kobj; 170 struct kobject kobj;
176 struct rtnl_link_stats64 slave_stats; 171 struct rtnl_link_stats64 slave_stats;
177}; 172};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9a850973e09a..4de121e24ce5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
4852 * 4852 *
4853 * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried. 4853 * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
4854 * @freq: the freqency(in MHz) to be queried. 4854 * @freq: the freqency(in MHz) to be queried.
4855 * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
4856 * irrelevant). This can be used later for deduplication.
4857 * @rule: pointer to store the wmm rule from the regulatory db. 4855 * @rule: pointer to store the wmm rule from the regulatory db.
4858 * 4856 *
4859 * Self-managed wireless drivers can use this function to query 4857 * Self-managed wireless drivers can use this function to query
@@ -4865,8 +4863,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
4865 * 4863 *
4866 * Return: 0 on success. -ENODATA. 4864 * Return: 0 on success. -ENODATA.
4867 */ 4865 */
4868int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr, 4866int reg_query_regdb_wmm(char *alpha2, int freq,
4869 struct ieee80211_wmm_rule *rule); 4867 struct ieee80211_reg_rule *rule);
4870 4868
4871/* 4869/*
4872 * callbacks for asynchronous cfg80211 methods, notification 4870 * callbacks for asynchronous cfg80211 methods, notification
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index e03b93360f33..a80fd0ac4563 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
130 return sk->sk_bound_dev_if; 130 return sk->sk_bound_dev_if;
131} 131}
132 132
133static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
134{
135 return rcu_dereference_check(ireq->ireq_opt,
136 refcount_read(&ireq->req.rsk_refcnt) > 0);
137}
138
139struct inet_cork { 133struct inet_cork {
140 unsigned int flags; 134 unsigned int flags;
141 __be32 addr; 135 __be32 addr;
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d5f62cc6c2ae..3394d75e1c80 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
30}; 30};
31 31
32static inline unsigned int * 32static inline unsigned int *
33nf_ct_timeout_data(struct nf_conn_timeout *t) 33nf_ct_timeout_data(const struct nf_conn_timeout *t)
34{ 34{
35 struct nf_ct_timeout *timeout; 35 struct nf_ct_timeout *timeout;
36 36
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0c154f98e987..39e1d875d507 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -153,7 +153,7 @@
153 * nla_find() find attribute in stream of attributes 153 * nla_find() find attribute in stream of attributes
154 * nla_find_nested() find attribute in nested attributes 154 * nla_find_nested() find attribute in nested attributes
155 * nla_parse() parse and validate stream of attrs 155 * nla_parse() parse and validate stream of attrs
156 * nla_parse_nested() parse nested attribuets 156 * nla_parse_nested() parse nested attributes
157 * nla_for_each_attr() loop over all attributes 157 * nla_for_each_attr() loop over all attributes
158 * nla_for_each_nested() loop over the nested attributes 158 * nla_for_each_nested() loop over the nested attributes
159 *========================================================================= 159 *=========================================================================
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
87 * According to specification 102 622 chapter 4.4 Pipes, 87 * According to specification 102 622 chapter 4.4 Pipes,
88 * the pipe identifier is 7 bits long. 88 * the pipe identifier is 7 bits long.
89 */ 89 */
90#define NFC_HCI_MAX_PIPES 127 90#define NFC_HCI_MAX_PIPES 128
91struct nfc_hci_init_data { 91struct nfc_hci_init_data {
92 u8 gate_count; 92 u8 gate_count;
93 struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; 93 struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ef727f71336e..75a3f3fdb359 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
298#endif 298#endif
299} 299}
300 300
301static inline void tcf_exts_to_list(const struct tcf_exts *exts,
302 struct list_head *actions)
303{
304#ifdef CONFIG_NET_CLS_ACT 301#ifdef CONFIG_NET_CLS_ACT
305 int i; 302#define tcf_exts_for_each_action(i, a, exts) \
306 303 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
307 for (i = 0; i < exts->nr_actions; i++) { 304#else
308 struct tc_action *a = exts->actions[i]; 305#define tcf_exts_for_each_action(i, a, exts) \
309 306 for (; 0; (void)(i), (void)(a), (void)(exts))
310 list_add_tail(&a->list, actions);
311 }
312#endif 307#endif
313}
314 308
315static inline void 309static inline void
316tcf_exts_stats_update(const struct tcf_exts *exts, 310tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
361#endif 355#endif
362} 356}
363 357
358static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
359{
360#ifdef CONFIG_NET_CLS_ACT
361 return exts->actions[0];
362#else
363 return NULL;
364#endif
365}
366
364/** 367/**
365 * tcf_exts_exec - execute tc filter extensions 368 * tcf_exts_exec - execute tc filter extensions
366 * @skb: socket buffer 369 * @skb: socket buffer
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index 60f8cc86a447..3469750df0f4 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
217struct ieee80211_reg_rule { 217struct ieee80211_reg_rule {
218 struct ieee80211_freq_range freq_range; 218 struct ieee80211_freq_range freq_range;
219 struct ieee80211_power_rule power_rule; 219 struct ieee80211_power_rule power_rule;
220 struct ieee80211_wmm_rule *wmm_rule; 220 struct ieee80211_wmm_rule wmm_rule;
221 u32 flags; 221 u32 flags;
222 u32 dfs_cac_ms; 222 u32 dfs_cac_ms;
223 bool has_wmm;
223}; 224};
224 225
225struct ieee80211_regdomain { 226struct ieee80211_regdomain {
226 struct rcu_head rcu_head; 227 struct rcu_head rcu_head;
227 u32 n_reg_rules; 228 u32 n_reg_rules;
228 u32 n_wmm_rules;
229 char alpha2[3]; 229 char alpha2[3];
230 enum nl80211_dfs_regions dfs_region; 230 enum nl80211_dfs_regions dfs_region;
231 struct ieee80211_reg_rule reg_rules[]; 231 struct ieee80211_reg_rule reg_rules[];
diff --git a/include/net/tls.h b/include/net/tls.h
index d5c683e8bb22..0a769cf2f5f3 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -171,15 +171,14 @@ struct cipher_context {
171 char *rec_seq; 171 char *rec_seq;
172}; 172};
173 173
174union tls_crypto_context {
175 struct tls_crypto_info info;
176 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
177};
178
174struct tls_context { 179struct tls_context {
175 union { 180 union tls_crypto_context crypto_send;
176 struct tls_crypto_info crypto_send; 181 union tls_crypto_context crypto_recv;
177 struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
178 };
179 union {
180 struct tls_crypto_info crypto_recv;
181 struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
182 };
183 182
184 struct list_head list; 183 struct list_head list;
185 struct net_device *netdev; 184 struct net_device *netdev;
@@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
367 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 366 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
368 */ 367 */
369 buf[0] = record_type; 368 buf[0] = record_type;
370 buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version); 369 buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
371 buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version); 370 buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
372 /* we can use IV for nonce explicit according to spec */ 371 /* we can use IV for nonce explicit according to spec */
373 buf[3] = pkt_len >> 8; 372 buf[3] = pkt_len >> 8;
374 buf[4] = pkt_len & 0xFF; 373 buf[4] = pkt_len & 0xFF;
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6f1e1f3b3063..cd1773d0e08f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
412void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus); 412void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
413void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus); 413void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
414void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); 414void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
415int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
415 416
416void snd_hdac_bus_update_rirb(struct hdac_bus *bus); 417void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
417int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, 418int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af9ef16cc34d..fdaaafdc7a00 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
407int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); 407int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
408void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); 408void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
409int snd_soc_dapm_new_pcm(struct snd_soc_card *card, 409int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
410 struct snd_soc_pcm_runtime *rtd,
410 const struct snd_soc_pcm_stream *params, 411 const struct snd_soc_pcm_stream *params,
411 unsigned int num_params, 412 unsigned int num_params,
412 struct snd_soc_dapm_widget *source, 413 struct snd_soc_dapm_widget *source,
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 196587b8f204..837393fa897b 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
56 rxrpc_peer_new, 56 rxrpc_peer_new,
57 rxrpc_peer_processing, 57 rxrpc_peer_processing,
58 rxrpc_peer_put, 58 rxrpc_peer_put,
59 rxrpc_peer_queued_error,
60}; 59};
61 60
62enum rxrpc_conn_trace { 61enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
257 EM(rxrpc_peer_got, "GOT") \ 256 EM(rxrpc_peer_got, "GOT") \
258 EM(rxrpc_peer_new, "NEW") \ 257 EM(rxrpc_peer_new, "NEW") \
259 EM(rxrpc_peer_processing, "PRO") \ 258 EM(rxrpc_peer_processing, "PRO") \
260 EM(rxrpc_peer_put, "PUT") \ 259 E_(rxrpc_peer_put, "PUT")
261 E_(rxrpc_peer_queued_error, "QER")
262 260
263#define rxrpc_conn_traces \ 261#define rxrpc_conn_traces \
264 EM(rxrpc_conn_got, "GOT") \ 262 EM(rxrpc_conn_got, "GOT") \
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 07548de5c988..251be353f950 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
952#define KVM_CAP_S390_HPAGE_1M 156 952#define KVM_CAP_S390_HPAGE_1M 156
953#define KVM_CAP_NESTED_STATE 157 953#define KVM_CAP_NESTED_STATE 157
954#define KVM_CAP_ARM_INJECT_SERROR_ESR 158 954#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
955#define KVM_CAP_MSR_PLATFORM_INFO 159
955 956
956#ifdef KVM_CAP_IRQ_ROUTING 957#ifdef KVM_CAP_IRQ_ROUTING
957 958
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index eeb787b1c53c..f35eb72739c0 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146 146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, 147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
148}; 148};
149 149
150/* 150/*
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index dc520e1a4123..8b73cb603c5f 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -37,6 +37,7 @@
37 37
38#include <linux/types.h> 38#include <linux/types.h>
39#include <linux/socket.h> /* For __kernel_sockaddr_storage. */ 39#include <linux/socket.h> /* For __kernel_sockaddr_storage. */
40#include <linux/in6.h> /* For struct in6_addr. */
40 41
41#define RDS_IB_ABI_VERSION 0x301 42#define RDS_IB_ABI_VERSION 0x301
42 43
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index b1e22c40c4b6..84c3de89696a 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -176,7 +176,7 @@ struct vhost_memory {
176#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1 176#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
177 177
178#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64) 178#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
179#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64) 179#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
180 180
181/* VHOST_NET specific defines */ 181/* VHOST_NET specific defines */
182 182
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f58cafa42f18..f39352cef382 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -10,6 +10,8 @@
10#ifndef __HDA_TPLG_INTERFACE_H__ 10#ifndef __HDA_TPLG_INTERFACE_H__
11#define __HDA_TPLG_INTERFACE_H__ 11#define __HDA_TPLG_INTERFACE_H__
12 12
13#include <linux/types.h>
14
13/* 15/*
14 * Default types range from 0~12. type can range from 0 to 0xff 16 * Default types range from 0~12. type can range from 0 to 0xff
15 * SST types start at higher to avoid any overlapping in future 17 * SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
143}; 145};
144 146
145struct skl_dfw_algo_data { 147struct skl_dfw_algo_data {
146 u32 set_params:2; 148 __u32 set_params:2;
147 u32 rsvd:30; 149 __u32 rsvd:30;
148 u32 param_id; 150 __u32 param_id;
149 u32 max; 151 __u32 max;
150 char params[0]; 152 char params[0];
151} __packed; 153} __packed;
152 154
@@ -163,68 +165,68 @@ enum skl_tuple_type {
163/* v4 configuration data */ 165/* v4 configuration data */
164 166
165struct skl_dfw_v4_module_pin { 167struct skl_dfw_v4_module_pin {
166 u16 module_id; 168 __u16 module_id;
167 u16 instance_id; 169 __u16 instance_id;
168} __packed; 170} __packed;
169 171
170struct skl_dfw_v4_module_fmt { 172struct skl_dfw_v4_module_fmt {
171 u32 channels; 173 __u32 channels;
172 u32 freq; 174 __u32 freq;
173 u32 bit_depth; 175 __u32 bit_depth;
174 u32 valid_bit_depth; 176 __u32 valid_bit_depth;
175 u32 ch_cfg; 177 __u32 ch_cfg;
176 u32 interleaving_style; 178 __u32 interleaving_style;
177 u32 sample_type; 179 __u32 sample_type;
178 u32 ch_map; 180 __u32 ch_map;
179} __packed; 181} __packed;
180 182
181struct skl_dfw_v4_module_caps { 183struct skl_dfw_v4_module_caps {
182 u32 set_params:2; 184 __u32 set_params:2;
183 u32 rsvd:30; 185 __u32 rsvd:30;
184 u32 param_id; 186 __u32 param_id;
185 u32 caps_size; 187 __u32 caps_size;
186 u32 caps[HDA_SST_CFG_MAX]; 188 __u32 caps[HDA_SST_CFG_MAX];
187} __packed; 189} __packed;
188 190
189struct skl_dfw_v4_pipe { 191struct skl_dfw_v4_pipe {
190 u8 pipe_id; 192 __u8 pipe_id;
191 u8 pipe_priority; 193 __u8 pipe_priority;
192 u16 conn_type:4; 194 __u16 conn_type:4;
193 u16 rsvd:4; 195 __u16 rsvd:4;
194 u16 memory_pages:8; 196 __u16 memory_pages:8;
195} __packed; 197} __packed;
196 198
197struct skl_dfw_v4_module { 199struct skl_dfw_v4_module {
198 char uuid[SKL_UUID_STR_SZ]; 200 char uuid[SKL_UUID_STR_SZ];
199 201
200 u16 module_id; 202 __u16 module_id;
201 u16 instance_id; 203 __u16 instance_id;
202 u32 max_mcps; 204 __u32 max_mcps;
203 u32 mem_pages; 205 __u32 mem_pages;
204 u32 obs; 206 __u32 obs;
205 u32 ibs; 207 __u32 ibs;
206 u32 vbus_id; 208 __u32 vbus_id;
207 209
208 u32 max_in_queue:8; 210 __u32 max_in_queue:8;
209 u32 max_out_queue:8; 211 __u32 max_out_queue:8;
210 u32 time_slot:8; 212 __u32 time_slot:8;
211 u32 core_id:4; 213 __u32 core_id:4;
212 u32 rsvd1:4; 214 __u32 rsvd1:4;
213 215
214 u32 module_type:8; 216 __u32 module_type:8;
215 u32 conn_type:4; 217 __u32 conn_type:4;
216 u32 dev_type:4; 218 __u32 dev_type:4;
217 u32 hw_conn_type:4; 219 __u32 hw_conn_type:4;
218 u32 rsvd2:12; 220 __u32 rsvd2:12;
219 221
220 u32 params_fixup:8; 222 __u32 params_fixup:8;
221 u32 converter:8; 223 __u32 converter:8;
222 u32 input_pin_type:1; 224 __u32 input_pin_type:1;
223 u32 output_pin_type:1; 225 __u32 output_pin_type:1;
224 u32 is_dynamic_in_pin:1; 226 __u32 is_dynamic_in_pin:1;
225 u32 is_dynamic_out_pin:1; 227 __u32 is_dynamic_out_pin:1;
226 u32 is_loadable:1; 228 __u32 is_loadable:1;
227 u32 rsvd3:11; 229 __u32 rsvd3:11;
228 230
229 struct skl_dfw_v4_pipe pipe; 231 struct skl_dfw_v4_pipe pipe;
230 struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE]; 232 struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
index 80b52b4945e9..a2ab516fcd2c 100644
--- a/include/xen/mem-reservation.h
+++ b/include/xen/mem-reservation.h
@@ -17,11 +17,12 @@
17 17
18#include <xen/page.h> 18#include <xen/page.h>
19 19
20extern bool xen_scrub_pages;
21
20static inline void xenmem_reservation_scrub_page(struct page *page) 22static inline void xenmem_reservation_scrub_page(struct page *page)
21{ 23{
22#ifdef CONFIG_XEN_SCRUB_PAGES 24 if (xen_scrub_pages)
23 clear_highpage(page); 25 clear_highpage(page);
24#endif
25} 26}
26 27
27#ifdef CONFIG_XEN_HAVE_PVMMU 28#ifdef CONFIG_XEN_HAVE_PVMMU
diff --git a/ipc/shm.c b/ipc/shm.c
index b0eb3757ab89..4cd402e4cfeb 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,6 +199,7 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
199 } 199 }
200 200
201 ipc_unlock_object(ipcp); 201 ipc_unlock_object(ipcp);
202 ipcp = ERR_PTR(-EIDRM);
202err: 203err:
203 rcu_read_unlock(); 204 rcu_read_unlock();
204 /* 205 /*
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2590700237c1..138f0302692e 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
1844 1844
1845 hdr = &btf->hdr; 1845 hdr = &btf->hdr;
1846 cur = btf->nohdr_data + hdr->type_off; 1846 cur = btf->nohdr_data + hdr->type_off;
1847 end = btf->nohdr_data + hdr->type_len; 1847 end = cur + hdr->type_len;
1848 1848
1849 env->log_type_id = 1; 1849 env->log_type_id = 1;
1850 while (cur < end) { 1850 while (cur < end) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 04b8eda94e7d..03cc59ee9c95 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -15,6 +15,7 @@
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <linux/filter.h> 16#include <linux/filter.h>
17#include <linux/rculist_nulls.h> 17#include <linux/rculist_nulls.h>
18#include <linux/random.h>
18#include <uapi/linux/btf.h> 19#include <uapi/linux/btf.h>
19#include "percpu_freelist.h" 20#include "percpu_freelist.h"
20#include "bpf_lru_list.h" 21#include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
41 atomic_t count; /* number of elements in this hashtable */ 42 atomic_t count; /* number of elements in this hashtable */
42 u32 n_buckets; /* number of hash buckets */ 43 u32 n_buckets; /* number of hash buckets */
43 u32 elem_size; /* size of each element in bytes */ 44 u32 elem_size; /* size of each element in bytes */
45 u32 hashrnd;
44}; 46};
45 47
46/* each htab element is struct htab_elem + key + value */ 48/* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
371 if (!htab->buckets) 373 if (!htab->buckets)
372 goto free_htab; 374 goto free_htab;
373 375
376 htab->hashrnd = get_random_int();
374 for (i = 0; i < htab->n_buckets; i++) { 377 for (i = 0; i < htab->n_buckets; i++) {
375 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); 378 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
376 raw_spin_lock_init(&htab->buckets[i].lock); 379 raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
402 return ERR_PTR(err); 405 return ERR_PTR(err);
403} 406}
404 407
405static inline u32 htab_map_hash(const void *key, u32 key_len) 408static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
406{ 409{
407 return jhash(key, key_len, 0); 410 return jhash(key, key_len, hashrnd);
408} 411}
409 412
410static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) 413static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
470 473
471 key_size = map->key_size; 474 key_size = map->key_size;
472 475
473 hash = htab_map_hash(key, key_size); 476 hash = htab_map_hash(key, key_size, htab->hashrnd);
474 477
475 head = select_bucket(htab, hash); 478 head = select_bucket(htab, hash);
476 479
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
597 if (!key) 600 if (!key)
598 goto find_first_elem; 601 goto find_first_elem;
599 602
600 hash = htab_map_hash(key, key_size); 603 hash = htab_map_hash(key, key_size, htab->hashrnd);
601 604
602 head = select_bucket(htab, hash); 605 head = select_bucket(htab, hash);
603 606
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
824 827
825 key_size = map->key_size; 828 key_size = map->key_size;
826 829
827 hash = htab_map_hash(key, key_size); 830 hash = htab_map_hash(key, key_size, htab->hashrnd);
828 831
829 b = __select_bucket(htab, hash); 832 b = __select_bucket(htab, hash);
830 head = &b->head; 833 head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
880 883
881 key_size = map->key_size; 884 key_size = map->key_size;
882 885
883 hash = htab_map_hash(key, key_size); 886 hash = htab_map_hash(key, key_size, htab->hashrnd);
884 887
885 b = __select_bucket(htab, hash); 888 b = __select_bucket(htab, hash);
886 head = &b->head; 889 head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
945 948
946 key_size = map->key_size; 949 key_size = map->key_size;
947 950
948 hash = htab_map_hash(key, key_size); 951 hash = htab_map_hash(key, key_size, htab->hashrnd);
949 952
950 b = __select_bucket(htab, hash); 953 b = __select_bucket(htab, hash);
951 head = &b->head; 954 head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
998 1001
999 key_size = map->key_size; 1002 key_size = map->key_size;
1000 1003
1001 hash = htab_map_hash(key, key_size); 1004 hash = htab_map_hash(key, key_size, htab->hashrnd);
1002 1005
1003 b = __select_bucket(htab, hash); 1006 b = __select_bucket(htab, hash);
1004 head = &b->head; 1007 head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
1071 1074
1072 key_size = map->key_size; 1075 key_size = map->key_size;
1073 1076
1074 hash = htab_map_hash(key, key_size); 1077 hash = htab_map_hash(key, key_size, htab->hashrnd);
1075 b = __select_bucket(htab, hash); 1078 b = __select_bucket(htab, hash);
1076 head = &b->head; 1079 head = &b->head;
1077 1080
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1103 1106
1104 key_size = map->key_size; 1107 key_size = map->key_size;
1105 1108
1106 hash = htab_map_hash(key, key_size); 1109 hash = htab_map_hash(key, key_size, htab->hashrnd);
1107 b = __select_bucket(htab, hash); 1110 b = __select_bucket(htab, hash);
1108 head = &b->head; 1111 head = &b->head;
1109 1112
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98e621a29e8e..0a0f2ec75370 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -132,6 +132,7 @@ struct smap_psock {
132 struct work_struct gc_work; 132 struct work_struct gc_work;
133 133
134 struct proto *sk_proto; 134 struct proto *sk_proto;
135 void (*save_unhash)(struct sock *sk);
135 void (*save_close)(struct sock *sk, long timeout); 136 void (*save_close)(struct sock *sk, long timeout);
136 void (*save_data_ready)(struct sock *sk); 137 void (*save_data_ready)(struct sock *sk);
137 void (*save_write_space)(struct sock *sk); 138 void (*save_write_space)(struct sock *sk);
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
143static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 144static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
144static int bpf_tcp_sendpage(struct sock *sk, struct page *page, 145static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
145 int offset, size_t size, int flags); 146 int offset, size_t size, int flags);
147static void bpf_tcp_unhash(struct sock *sk);
146static void bpf_tcp_close(struct sock *sk, long timeout); 148static void bpf_tcp_close(struct sock *sk, long timeout);
147 149
148static inline struct smap_psock *smap_psock_sk(const struct sock *sk) 150static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
184 struct proto *base) 186 struct proto *base)
185{ 187{
186 prot[SOCKMAP_BASE] = *base; 188 prot[SOCKMAP_BASE] = *base;
189 prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash;
187 prot[SOCKMAP_BASE].close = bpf_tcp_close; 190 prot[SOCKMAP_BASE].close = bpf_tcp_close;
188 prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; 191 prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
189 prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; 192 prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
217 return -EBUSY; 220 return -EBUSY;
218 } 221 }
219 222
223 psock->save_unhash = sk->sk_prot->unhash;
220 psock->save_close = sk->sk_prot->close; 224 psock->save_close = sk->sk_prot->close;
221 psock->sk_proto = sk->sk_prot; 225 psock->sk_proto = sk->sk_prot;
222 226
@@ -236,7 +240,7 @@ static int bpf_tcp_init(struct sock *sk)
236} 240}
237 241
238static void smap_release_sock(struct smap_psock *psock, struct sock *sock); 242static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
239static int free_start_sg(struct sock *sk, struct sk_msg_buff *md); 243static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
240 244
241static void bpf_tcp_release(struct sock *sk) 245static void bpf_tcp_release(struct sock *sk)
242{ 246{
@@ -248,7 +252,7 @@ static void bpf_tcp_release(struct sock *sk)
248 goto out; 252 goto out;
249 253
250 if (psock->cork) { 254 if (psock->cork) {
251 free_start_sg(psock->sock, psock->cork); 255 free_start_sg(psock->sock, psock->cork, true);
252 kfree(psock->cork); 256 kfree(psock->cork);
253 psock->cork = NULL; 257 psock->cork = NULL;
254 } 258 }
@@ -305,39 +309,21 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
305 return e; 309 return e;
306} 310}
307 311
308static void bpf_tcp_close(struct sock *sk, long timeout) 312static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
309{ 313{
310 void (*close_fun)(struct sock *sk, long timeout);
311 struct smap_psock_map_entry *e; 314 struct smap_psock_map_entry *e;
312 struct sk_msg_buff *md, *mtmp; 315 struct sk_msg_buff *md, *mtmp;
313 struct smap_psock *psock;
314 struct sock *osk; 316 struct sock *osk;
315 317
316 lock_sock(sk);
317 rcu_read_lock();
318 psock = smap_psock_sk(sk);
319 if (unlikely(!psock)) {
320 rcu_read_unlock();
321 release_sock(sk);
322 return sk->sk_prot->close(sk, timeout);
323 }
324
325 /* The psock may be destroyed anytime after exiting the RCU critial
326 * section so by the time we use close_fun the psock may no longer
327 * be valid. However, bpf_tcp_close is called with the sock lock
328 * held so the close hook and sk are still valid.
329 */
330 close_fun = psock->save_close;
331
332 if (psock->cork) { 318 if (psock->cork) {
333 free_start_sg(psock->sock, psock->cork); 319 free_start_sg(psock->sock, psock->cork, true);
334 kfree(psock->cork); 320 kfree(psock->cork);
335 psock->cork = NULL; 321 psock->cork = NULL;
336 } 322 }
337 323
338 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { 324 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
339 list_del(&md->list); 325 list_del(&md->list);
340 free_start_sg(psock->sock, md); 326 free_start_sg(psock->sock, md, true);
341 kfree(md); 327 kfree(md);
342 } 328 }
343 329
@@ -369,7 +355,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
369 /* If another thread deleted this object skip deletion. 355 /* If another thread deleted this object skip deletion.
370 * The refcnt on psock may or may not be zero. 356 * The refcnt on psock may or may not be zero.
371 */ 357 */
372 if (l) { 358 if (l && l == link) {
373 hlist_del_rcu(&link->hash_node); 359 hlist_del_rcu(&link->hash_node);
374 smap_release_sock(psock, link->sk); 360 smap_release_sock(psock, link->sk);
375 free_htab_elem(htab, link); 361 free_htab_elem(htab, link);
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
379 kfree(e); 365 kfree(e);
380 e = psock_map_pop(sk, psock); 366 e = psock_map_pop(sk, psock);
381 } 367 }
368}
369
370static void bpf_tcp_unhash(struct sock *sk)
371{
372 void (*unhash_fun)(struct sock *sk);
373 struct smap_psock *psock;
374
375 rcu_read_lock();
376 psock = smap_psock_sk(sk);
377 if (unlikely(!psock)) {
378 rcu_read_unlock();
379 if (sk->sk_prot->unhash)
380 sk->sk_prot->unhash(sk);
381 return;
382 }
383 unhash_fun = psock->save_unhash;
384 bpf_tcp_remove(sk, psock);
385 rcu_read_unlock();
386 unhash_fun(sk);
387}
388
389static void bpf_tcp_close(struct sock *sk, long timeout)
390{
391 void (*close_fun)(struct sock *sk, long timeout);
392 struct smap_psock *psock;
393
394 lock_sock(sk);
395 rcu_read_lock();
396 psock = smap_psock_sk(sk);
397 if (unlikely(!psock)) {
398 rcu_read_unlock();
399 release_sock(sk);
400 return sk->sk_prot->close(sk, timeout);
401 }
402 close_fun = psock->save_close;
403 bpf_tcp_remove(sk, psock);
382 rcu_read_unlock(); 404 rcu_read_unlock();
383 release_sock(sk); 405 release_sock(sk);
384 close_fun(sk, timeout); 406 close_fun(sk, timeout);
@@ -570,14 +592,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
570 md->sg_start = i; 592 md->sg_start = i;
571} 593}
572 594
573static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) 595static int free_sg(struct sock *sk, int start,
596 struct sk_msg_buff *md, bool charge)
574{ 597{
575 struct scatterlist *sg = md->sg_data; 598 struct scatterlist *sg = md->sg_data;
576 int i = start, free = 0; 599 int i = start, free = 0;
577 600
578 while (sg[i].length) { 601 while (sg[i].length) {
579 free += sg[i].length; 602 free += sg[i].length;
580 sk_mem_uncharge(sk, sg[i].length); 603 if (charge)
604 sk_mem_uncharge(sk, sg[i].length);
581 if (!md->skb) 605 if (!md->skb)
582 put_page(sg_page(&sg[i])); 606 put_page(sg_page(&sg[i]));
583 sg[i].length = 0; 607 sg[i].length = 0;
@@ -594,9 +618,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
594 return free; 618 return free;
595} 619}
596 620
597static int free_start_sg(struct sock *sk, struct sk_msg_buff *md) 621static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
598{ 622{
599 int free = free_sg(sk, md->sg_start, md); 623 int free = free_sg(sk, md->sg_start, md, charge);
600 624
601 md->sg_start = md->sg_end; 625 md->sg_start = md->sg_end;
602 return free; 626 return free;
@@ -604,7 +628,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
604 628
605static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md) 629static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
606{ 630{
607 return free_sg(sk, md->sg_curr, md); 631 return free_sg(sk, md->sg_curr, md, true);
608} 632}
609 633
610static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md) 634static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +742,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
718 list_add_tail(&r->list, &psock->ingress); 742 list_add_tail(&r->list, &psock->ingress);
719 sk->sk_data_ready(sk); 743 sk->sk_data_ready(sk);
720 } else { 744 } else {
721 free_start_sg(sk, r); 745 free_start_sg(sk, r, true);
722 kfree(r); 746 kfree(r);
723 } 747 }
724 748
@@ -752,14 +776,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
752 release_sock(sk); 776 release_sock(sk);
753 } 777 }
754 smap_release_sock(psock, sk); 778 smap_release_sock(psock, sk);
755 if (unlikely(err)) 779 return err;
756 goto out;
757 return 0;
758out_rcu: 780out_rcu:
759 rcu_read_unlock(); 781 rcu_read_unlock();
760out: 782 return 0;
761 free_bytes_sg(NULL, send, md, false);
762 return err;
763} 783}
764 784
765static inline void bpf_md_init(struct smap_psock *psock) 785static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +842,7 @@ more_data:
822 case __SK_PASS: 842 case __SK_PASS:
823 err = bpf_tcp_push(sk, send, m, flags, true); 843 err = bpf_tcp_push(sk, send, m, flags, true);
824 if (unlikely(err)) { 844 if (unlikely(err)) {
825 *copied -= free_start_sg(sk, m); 845 *copied -= free_start_sg(sk, m, true);
826 break; 846 break;
827 } 847 }
828 848
@@ -845,16 +865,17 @@ more_data:
845 lock_sock(sk); 865 lock_sock(sk);
846 866
847 if (unlikely(err < 0)) { 867 if (unlikely(err < 0)) {
848 free_start_sg(sk, m); 868 int free = free_start_sg(sk, m, false);
869
849 psock->sg_size = 0; 870 psock->sg_size = 0;
850 if (!cork) 871 if (!cork)
851 *copied -= send; 872 *copied -= free;
852 } else { 873 } else {
853 psock->sg_size -= send; 874 psock->sg_size -= send;
854 } 875 }
855 876
856 if (cork) { 877 if (cork) {
857 free_start_sg(sk, m); 878 free_start_sg(sk, m, true);
858 psock->sg_size = 0; 879 psock->sg_size = 0;
859 kfree(m); 880 kfree(m);
860 m = NULL; 881 m = NULL;
@@ -912,6 +933,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
912 933
913 if (unlikely(flags & MSG_ERRQUEUE)) 934 if (unlikely(flags & MSG_ERRQUEUE))
914 return inet_recv_error(sk, msg, len, addr_len); 935 return inet_recv_error(sk, msg, len, addr_len);
936 if (!skb_queue_empty(&sk->sk_receive_queue))
937 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
915 938
916 rcu_read_lock(); 939 rcu_read_lock();
917 psock = smap_psock_sk(sk); 940 psock = smap_psock_sk(sk);
@@ -922,9 +945,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
922 goto out; 945 goto out;
923 rcu_read_unlock(); 946 rcu_read_unlock();
924 947
925 if (!skb_queue_empty(&sk->sk_receive_queue))
926 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
927
928 lock_sock(sk); 948 lock_sock(sk);
929bytes_ready: 949bytes_ready:
930 while (copied != len) { 950 while (copied != len) {
@@ -1122,7 +1142,7 @@ wait_for_memory:
1122 err = sk_stream_wait_memory(sk, &timeo); 1142 err = sk_stream_wait_memory(sk, &timeo);
1123 if (err) { 1143 if (err) {
1124 if (m && m != psock->cork) 1144 if (m && m != psock->cork)
1125 free_start_sg(sk, m); 1145 free_start_sg(sk, m, true);
1126 goto out_err; 1146 goto out_err;
1127 } 1147 }
1128 } 1148 }
@@ -1427,12 +1447,15 @@ out:
1427static void smap_write_space(struct sock *sk) 1447static void smap_write_space(struct sock *sk)
1428{ 1448{
1429 struct smap_psock *psock; 1449 struct smap_psock *psock;
1450 void (*write_space)(struct sock *sk);
1430 1451
1431 rcu_read_lock(); 1452 rcu_read_lock();
1432 psock = smap_psock_sk(sk); 1453 psock = smap_psock_sk(sk);
1433 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) 1454 if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
1434 schedule_work(&psock->tx_work); 1455 schedule_work(&psock->tx_work);
1456 write_space = psock->save_write_space;
1435 rcu_read_unlock(); 1457 rcu_read_unlock();
1458 write_space(sk);
1436} 1459}
1437 1460
1438static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) 1461static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -1461,10 +1484,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
1461 schedule_work(&psock->gc_work); 1484 schedule_work(&psock->gc_work);
1462} 1485}
1463 1486
1487static bool psock_is_smap_sk(struct sock *sk)
1488{
1489 return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
1490}
1491
1464static void smap_release_sock(struct smap_psock *psock, struct sock *sock) 1492static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1465{ 1493{
1466 if (refcount_dec_and_test(&psock->refcnt)) { 1494 if (refcount_dec_and_test(&psock->refcnt)) {
1467 tcp_cleanup_ulp(sock); 1495 if (psock_is_smap_sk(sock))
1496 tcp_cleanup_ulp(sock);
1468 write_lock_bh(&sock->sk_callback_lock); 1497 write_lock_bh(&sock->sk_callback_lock);
1469 smap_stop_sock(psock, sock); 1498 smap_stop_sock(psock, sock);
1470 write_unlock_bh(&sock->sk_callback_lock); 1499 write_unlock_bh(&sock->sk_callback_lock);
@@ -1578,13 +1607,13 @@ static void smap_gc_work(struct work_struct *w)
1578 bpf_prog_put(psock->bpf_tx_msg); 1607 bpf_prog_put(psock->bpf_tx_msg);
1579 1608
1580 if (psock->cork) { 1609 if (psock->cork) {
1581 free_start_sg(psock->sock, psock->cork); 1610 free_start_sg(psock->sock, psock->cork, true);
1582 kfree(psock->cork); 1611 kfree(psock->cork);
1583 } 1612 }
1584 1613
1585 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) { 1614 list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
1586 list_del(&md->list); 1615 list_del(&md->list);
1587 free_start_sg(psock->sock, md); 1616 free_start_sg(psock->sock, md, true);
1588 kfree(md); 1617 kfree(md);
1589 } 1618 }
1590 1619
@@ -1891,6 +1920,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
1891 * doesn't update user data. 1920 * doesn't update user data.
1892 */ 1921 */
1893 if (psock) { 1922 if (psock) {
1923 if (!psock_is_smap_sk(sock)) {
1924 err = -EBUSY;
1925 goto out_progs;
1926 }
1894 if (READ_ONCE(psock->bpf_parse) && parse) { 1927 if (READ_ONCE(psock->bpf_parse) && parse) {
1895 err = -EBUSY; 1928 err = -EBUSY;
1896 goto out_progs; 1929 goto out_progs;
@@ -2086,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
2086 return -EINVAL; 2119 return -EINVAL;
2087 } 2120 }
2088 2121
2122 /* ULPs are currently supported only for TCP sockets in ESTABLISHED
2123 * state.
2124 */
2089 if (skops.sk->sk_type != SOCK_STREAM || 2125 if (skops.sk->sk_type != SOCK_STREAM ||
2090 skops.sk->sk_protocol != IPPROTO_TCP) { 2126 skops.sk->sk_protocol != IPPROTO_TCP ||
2127 skops.sk->sk_state != TCP_ESTABLISHED) {
2091 fput(socket->file); 2128 fput(socket->file);
2092 return -EOPNOTSUPP; 2129 return -EOPNOTSUPP;
2093 } 2130 }
@@ -2140,7 +2177,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2140 return ERR_PTR(-EPERM); 2177 return ERR_PTR(-EPERM);
2141 2178
2142 /* check sanity of attributes */ 2179 /* check sanity of attributes */
2143 if (attr->max_entries == 0 || attr->value_size != 4 || 2180 if (attr->max_entries == 0 ||
2181 attr->key_size == 0 ||
2182 attr->value_size != 4 ||
2144 attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 2183 attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
2145 return ERR_PTR(-EINVAL); 2184 return ERR_PTR(-EINVAL);
2146 2185
@@ -2267,8 +2306,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
2267 } 2306 }
2268 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, 2307 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
2269 htab->map.numa_node); 2308 htab->map.numa_node);
2270 if (!l_new) 2309 if (!l_new) {
2310 atomic_dec(&htab->count);
2271 return ERR_PTR(-ENOMEM); 2311 return ERR_PTR(-ENOMEM);
2312 }
2272 2313
2273 memcpy(l_new->key, key, key_size); 2314 memcpy(l_new->key, key, key_size);
2274 l_new->sk = sk; 2315 l_new->sk = sk;
@@ -2438,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
2438 return -EINVAL; 2479 return -EINVAL;
2439 } 2480 }
2440 2481
2482 /* ULPs are currently supported only for TCP sockets in ESTABLISHED
2483 * state.
2484 */
2485 if (skops.sk->sk_type != SOCK_STREAM ||
2486 skops.sk->sk_protocol != IPPROTO_TCP ||
2487 skops.sk->sk_state != TCP_ESTABLISHED) {
2488 fput(socket->file);
2489 return -EOPNOTSUPP;
2490 }
2491
2441 lock_sock(skops.sk); 2492 lock_sock(skops.sk);
2442 preempt_disable(); 2493 preempt_disable();
2443 rcu_read_lock(); 2494 rcu_read_lock();
@@ -2528,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
2528 .map_check_btf = map_check_no_btf, 2579 .map_check_btf = map_check_no_btf,
2529}; 2580};
2530 2581
2582static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
2583{
2584 return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
2585 ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
2586}
2531BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, 2587BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
2532 struct bpf_map *, map, void *, key, u64, flags) 2588 struct bpf_map *, map, void *, key, u64, flags)
2533{ 2589{
2534 WARN_ON_ONCE(!rcu_read_lock_held()); 2590 WARN_ON_ONCE(!rcu_read_lock_held());
2591
2592 /* ULPs are currently supported only for TCP sockets in ESTABLISHED
2593 * state. This checks that the sock ops triggering the update is
2594 * one indicating we are (or will be soon) in an ESTABLISHED state.
2595 */
2596 if (!bpf_is_valid_sock_op(bpf_sock))
2597 return -EOPNOTSUPP;
2535 return sock_map_ctx_update_elem(bpf_sock, map, key, flags); 2598 return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
2536} 2599}
2537 2600
@@ -2550,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
2550 struct bpf_map *, map, void *, key, u64, flags) 2613 struct bpf_map *, map, void *, key, u64, flags)
2551{ 2614{
2552 WARN_ON_ONCE(!rcu_read_lock_held()); 2615 WARN_ON_ONCE(!rcu_read_lock_held());
2616
2617 if (!bpf_is_valid_sock_op(bpf_sock))
2618 return -EOPNOTSUPP;
2553 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); 2619 return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
2554} 2620}
2555 2621
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 92246117d2b0..bb07e74b34a2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3163,7 +3163,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
3163 * an arbitrary scalar. Disallow all math except 3163 * an arbitrary scalar. Disallow all math except
3164 * pointer subtraction 3164 * pointer subtraction
3165 */ 3165 */
3166 if (opcode == BPF_SUB){ 3166 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
3167 mark_reg_unknown(env, regs, insn->dst_reg); 3167 mark_reg_unknown(env, regs, insn->dst_reg);
3168 return 0; 3168 return 0;
3169 } 3169 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index ed44d7d34c2d..0097acec1c71 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
102 * @name: Name of the step 102 * @name: Name of the step
103 * @startup: Startup function of the step 103 * @startup: Startup function of the step
104 * @teardown: Teardown function of the step 104 * @teardown: Teardown function of the step
105 * @skip_onerr: Do not invoke the functions on error rollback
106 * Will go away once the notifiers are gone
107 * @cant_stop: Bringup/teardown can't be stopped at this step 105 * @cant_stop: Bringup/teardown can't be stopped at this step
108 */ 106 */
109struct cpuhp_step { 107struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
119 struct hlist_node *node); 117 struct hlist_node *node);
120 } teardown; 118 } teardown;
121 struct hlist_head list; 119 struct hlist_head list;
122 bool skip_onerr;
123 bool cant_stop; 120 bool cant_stop;
124 bool multi_instance; 121 bool multi_instance;
125}; 122};
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
550 547
551static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) 548static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
552{ 549{
553 for (st->state--; st->state > st->target; st->state--) { 550 for (st->state--; st->state > st->target; st->state--)
554 struct cpuhp_step *step = cpuhp_get_step(st->state); 551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
555
556 if (!step->skip_onerr)
557 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
558 }
559} 552}
560 553
561static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 554static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -614,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
614 bool bringup = st->bringup; 607 bool bringup = st->bringup;
615 enum cpuhp_state state; 608 enum cpuhp_state state;
616 609
610 if (WARN_ON_ONCE(!st->should_run))
611 return;
612
617 /* 613 /*
618 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures 614 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
619 * that if we see ->should_run we also see the rest of the state. 615 * that if we see ->should_run we also see the rest of the state.
620 */ 616 */
621 smp_mb(); 617 smp_mb();
622 618
623 if (WARN_ON_ONCE(!st->should_run))
624 return;
625
626 cpuhp_lock_acquire(bringup); 619 cpuhp_lock_acquire(bringup);
627 620
628 if (st->single) { 621 if (st->single) {
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
644 637
645 WARN_ON_ONCE(!cpuhp_is_ap_state(state)); 638 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
646 639
647 if (st->rollback) {
648 struct cpuhp_step *step = cpuhp_get_step(state);
649 if (step->skip_onerr)
650 goto next;
651 }
652
653 if (cpuhp_is_atomic_state(state)) { 640 if (cpuhp_is_atomic_state(state)) {
654 local_irq_disable(); 641 local_irq_disable();
655 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); 642 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
673 st->should_run = false; 660 st->should_run = false;
674 } 661 }
675 662
676next:
677 cpuhp_lock_release(bringup); 663 cpuhp_lock_release(bringup);
678 664
679 if (!st->should_run) 665 if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
916 902
917static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) 903static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
918{ 904{
919 for (st->state++; st->state < st->target; st->state++) { 905 for (st->state++; st->state < st->target; st->state++)
920 struct cpuhp_step *step = cpuhp_get_step(st->state); 906 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
921
922 if (!step->skip_onerr)
923 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
924 }
925} 907}
926 908
927static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 909static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -934,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
934 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
935 if (ret) { 917 if (ret) {
936 st->target = prev_state; 918 st->target = prev_state;
937 undo_cpu_down(cpu, st); 919 if (st->state < prev_state)
920 undo_cpu_down(cpu, st);
938 break; 921 break;
939 } 922 }
940 } 923 }
@@ -987,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
987 * to do the further cleanups. 970 * to do the further cleanups.
988 */ 971 */
989 ret = cpuhp_down_callbacks(cpu, st, target); 972 ret = cpuhp_down_callbacks(cpu, st, target);
990 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { 973 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
991 cpuhp_reset_state(st, prev_state); 974 cpuhp_reset_state(st, prev_state);
992 __cpuhp_kick_ap(st); 975 __cpuhp_kick_ap(st);
993 } 976 }
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 9bd54304446f..1b1d63b3634b 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
23 bool 23 bool
24 select NEED_DMA_MAP_STATE 24 select NEED_DMA_MAP_STATE
25 25
26config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
27 bool
28
26config DMA_DIRECT_OPS 29config DMA_DIRECT_OPS
27 bool 30 bool
28 depends on HAS_DMA 31 depends on HAS_DMA
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 1c35b7b945d0..de87b0282e74 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
168int dma_direct_supported(struct device *dev, u64 mask) 168int dma_direct_supported(struct device *dev, u64 mask)
169{ 169{
170#ifdef CONFIG_ZONE_DMA 170#ifdef CONFIG_ZONE_DMA
171 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 171 if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
172 return 0; 172 return 0;
173#else 173#else
174 /* 174 /*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
177 * memory, or by providing a ZONE_DMA32. If neither is the case, the 177 * memory, or by providing a ZONE_DMA32. If neither is the case, the
178 * architecture needs to use an IOMMU instead of the direct mapping. 178 * architecture needs to use an IOMMU instead of the direct mapping.
179 */ 179 */
180 if (mask < DMA_BIT_MASK(32)) 180 if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
181 return 0; 181 return 0;
182#endif 182#endif
183 /* 183 /*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2a62b96600ad..dcb093e7b377 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
2867 _perf_event_disable(bp); 2867 _perf_event_disable(bp);
2868 2868
2869 err = modify_user_hw_breakpoint_check(bp, attr, true); 2869 err = modify_user_hw_breakpoint_check(bp, attr, true);
2870 if (err) {
2871 if (!bp->attr.disabled)
2872 _perf_event_enable(bp);
2873 2870
2874 return err; 2871 if (!bp->attr.disabled)
2875 }
2876
2877 if (!attr->disabled)
2878 _perf_event_enable(bp); 2872 _perf_event_enable(bp);
2879 return 0; 2873
2874 return err;
2880} 2875}
2881 2876
2882static int perf_event_modify_attr(struct perf_event *event, 2877static int perf_event_modify_attr(struct perf_event *event,
@@ -3940,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
3940 goto out; 3935 goto out;
3941 } 3936 }
3942 3937
3938 /* If this is a pinned event it must be running on this CPU */
3939 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
3940 ret = -EBUSY;
3941 goto out;
3942 }
3943
3943 /* 3944 /*
3944 * If the event is currently on this CPU, its either a per-task event, 3945 * If the event is currently on this CPU, its either a per-task event,
3945 * or local to this CPU. Furthermore it means its ACTIVE (otherwise 3946 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
@@ -5948,6 +5949,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5948 unsigned long sp; 5949 unsigned long sp;
5949 unsigned int rem; 5950 unsigned int rem;
5950 u64 dyn_size; 5951 u64 dyn_size;
5952 mm_segment_t fs;
5951 5953
5952 /* 5954 /*
5953 * We dump: 5955 * We dump:
@@ -5965,7 +5967,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5965 5967
5966 /* Data. */ 5968 /* Data. */
5967 sp = perf_user_stack_pointer(regs); 5969 sp = perf_user_stack_pointer(regs);
5970 fs = get_fs();
5971 set_fs(USER_DS);
5968 rem = __output_copy_user(handle, (void *) sp, dump_size); 5972 rem = __output_copy_user(handle, (void *) sp, dump_size);
5973 set_fs(fs);
5969 dyn_size = dump_size - rem; 5974 dyn_size = dump_size - rem;
5970 5975
5971 perf_output_skip(handle, rem); 5976 perf_output_skip(handle, rem);
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b3814fce5ecb..d6b56180827c 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a
509 */ 509 */
510int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) 510int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
511{ 511{
512 int err;
513
512 /* 514 /*
513 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it 515 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
514 * will not be possible to raise IPIs that invoke __perf_event_disable. 516 * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -520,15 +522,12 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
520 else 522 else
521 perf_event_disable(bp); 523 perf_event_disable(bp);
522 524
523 if (!attr->disabled) { 525 err = modify_user_hw_breakpoint_check(bp, attr, false);
524 int err = modify_user_hw_breakpoint_check(bp, attr, false);
525 526
526 if (err) 527 if (!bp->attr.disabled)
527 return err;
528 perf_event_enable(bp); 528 perf_event_enable(bp);
529 bp->attr.disabled = 0; 529
530 } 530 return err;
531 return 0;
532} 531}
533EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); 532EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
534 533
diff --git a/kernel/fork.c b/kernel/fork.c
index d896e9ca38b0..f0b58479534f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
550 goto out; 550 goto out;
551 } 551 }
552 /* a new mm has just been created */ 552 /* a new mm has just been created */
553 arch_dup_mmap(oldmm, mm); 553 retval = arch_dup_mmap(oldmm, mm);
554 retval = 0;
555out: 554out:
556 up_write(&mm->mmap_sem); 555 up_write(&mm->mmap_sem);
557 flush_tlb_mm(oldmm); 556 flush_tlb_mm(oldmm);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 01ebdf1f9f40..2e62503bea0d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
678 case MODULE_STATE_COMING: 678 case MODULE_STATE_COMING:
679 ret = jump_label_add_module(mod); 679 ret = jump_label_add_module(mod);
680 if (ret) { 680 if (ret) {
681 WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n"); 681 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
682 jump_label_del_module(mod); 682 jump_label_del_module(mod);
683 } 683 }
684 break; 684 break;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e406c5fdb41e..dd13f865ad40 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -55,7 +55,6 @@
55 55
56#include "lockdep_internals.h" 56#include "lockdep_internals.h"
57 57
58#include <trace/events/preemptirq.h>
59#define CREATE_TRACE_POINTS 58#define CREATE_TRACE_POINTS
60#include <trace/events/lock.h> 59#include <trace/events/lock.h>
61 60
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 1a81a1257b3f..3f8a35104285 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
389 /* 389 /*
390 * wake_up_process() paired with set_current_state() 390 * wake_up_process() paired with set_current_state()
391 * inserts sufficient barriers to make sure @owner either sees 391 * inserts sufficient barriers to make sure @owner either sees
392 * it's wounded in __ww_mutex_lock_check_stamp() or has a 392 * it's wounded in __ww_mutex_check_kill() or has a
393 * wakeup pending to re-read the wounded state. 393 * wakeup pending to re-read the wounded state.
394 */ 394 */
395 if (owner != current) 395 if (owner != current)
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
946 } 946 }
947 947
948 debug_mutex_lock_common(lock, &waiter); 948 debug_mutex_lock_common(lock, &waiter);
949 debug_mutex_add_waiter(lock, &waiter, current);
950 949
951 lock_contended(&lock->dep_map, ip); 950 lock_contended(&lock->dep_map, ip);
952 951
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5b915b370d5a..0be047dbd897 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -324,7 +324,7 @@ static int __test_cycle(unsigned int nthreads)
324 if (!cycle->result) 324 if (!cycle->result)
325 continue; 325 continue;
326 326
327 pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n", 327 pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
328 n, nthreads, cycle->result); 328 n, nthreads, cycle->result);
329 ret = -EINVAL; 329 ret = -EINVAL;
330 break; 330 break;
diff --git a/kernel/pid.c b/kernel/pid.c
index de1cfc4f75a2..cdf63e53a014 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
195 idr_preload_end(); 195 idr_preload_end();
196 196
197 if (nr < 0) { 197 if (nr < 0) {
198 retval = nr; 198 retval = (nr == -ENOSPC) ? -EAGAIN : nr;
199 goto out_free; 199 goto out_free;
200 } 200 }
201 201
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 924e37fb1620..9bf5404397e0 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -38,7 +38,6 @@
38#include <linux/kmsg_dump.h> 38#include <linux/kmsg_dump.h>
39#include <linux/syslog.h> 39#include <linux/syslog.h>
40#include <linux/cpu.h> 40#include <linux/cpu.h>
41#include <linux/notifier.h>
42#include <linux/rculist.h> 41#include <linux/rculist.h>
43#include <linux/poll.h> 42#include <linux/poll.h>
44#include <linux/irq_work.h> 43#include <linux/irq_work.h>
@@ -352,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
352 */ 351 */
353 352
354enum log_flags { 353enum log_flags {
355 LOG_NOCONS = 1, /* suppress print, do not print to console */
356 LOG_NEWLINE = 2, /* text ended with a newline */ 354 LOG_NEWLINE = 2, /* text ended with a newline */
357 LOG_PREFIX = 4, /* text started with a prefix */ 355 LOG_PREFIX = 4, /* text started with a prefix */
358 LOG_CONT = 8, /* text is a fragment of a continuation line */ 356 LOG_CONT = 8, /* text is a fragment of a continuation line */
@@ -1882,9 +1880,6 @@ int vprintk_store(int facility, int level,
1882 if (dict) 1880 if (dict)
1883 lflags |= LOG_PREFIX|LOG_NEWLINE; 1881 lflags |= LOG_PREFIX|LOG_NEWLINE;
1884 1882
1885 if (suppress_message_printing(level))
1886 lflags |= LOG_NOCONS;
1887
1888 return log_output(facility, level, lflags, 1883 return log_output(facility, level, lflags,
1889 dict, dictlen, text, text_len); 1884 dict, dictlen, text, text_len);
1890} 1885}
@@ -2033,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
2033 const char *text, size_t len) {} 2028 const char *text, size_t len) {}
2034static size_t msg_print_text(const struct printk_log *msg, 2029static size_t msg_print_text(const struct printk_log *msg,
2035 bool syslog, char *buf, size_t size) { return 0; } 2030 bool syslog, char *buf, size_t size) { return 0; }
2031static bool suppress_message_printing(int level) { return false; }
2036 2032
2037#endif /* CONFIG_PRINTK */ 2033#endif /* CONFIG_PRINTK */
2038 2034
@@ -2369,10 +2365,11 @@ skip:
2369 break; 2365 break;
2370 2366
2371 msg = log_from_idx(console_idx); 2367 msg = log_from_idx(console_idx);
2372 if (msg->flags & LOG_NOCONS) { 2368 if (suppress_message_printing(msg->level)) {
2373 /* 2369 /*
2374 * Skip record if !ignore_loglevel, and 2370 * Skip record we have buffered and already printed
2375 * record has level above the console loglevel. 2371 * directly to the console when we received it, and
2372 * record that has level above the console loglevel.
2376 */ 2373 */
2377 console_idx = log_next(console_idx); 2374 console_idx = log_next(console_idx);
2378 console_seq++; 2375 console_seq++;
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index a0a74c533e4b..0913b4d385de 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
306 return printk_safe_log_store(s, fmt, args); 306 return printk_safe_log_store(s, fmt, args);
307} 307}
308 308
309void printk_nmi_enter(void) 309void notrace printk_nmi_enter(void)
310{ 310{
311 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); 311 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
312} 312}
313 313
314void printk_nmi_exit(void) 314void notrace printk_nmi_exit(void)
315{ 315{
316 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); 316 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
317} 317}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 60caf1fb94e0..6383aa6a60ca 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
89 89
90static void sched_feat_disable(int i) 90static void sched_feat_disable(int i)
91{ 91{
92 static_key_disable(&sched_feat_keys[i]); 92 static_key_disable_cpuslocked(&sched_feat_keys[i]);
93} 93}
94 94
95static void sched_feat_enable(int i) 95static void sched_feat_enable(int i)
96{ 96{
97 static_key_enable(&sched_feat_keys[i]); 97 static_key_enable_cpuslocked(&sched_feat_keys[i]);
98} 98}
99#else 99#else
100static void sched_feat_disable(int i) { }; 100static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
146 146
147 /* Ensure the static_key remains in a consistent state */ 147 /* Ensure the static_key remains in a consistent state */
148 inode = file_inode(filp); 148 inode = file_inode(filp);
149 cpus_read_lock();
149 inode_lock(inode); 150 inode_lock(inode);
150 ret = sched_feat_set(cmp); 151 ret = sched_feat_set(cmp);
151 inode_unlock(inode); 152 inode_unlock(inode);
153 cpus_read_unlock();
152 if (ret < 0) 154 if (ret < 0)
153 return ret; 155 return ret;
154 156
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b39fb596f6c1..f808ddf2a868 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
3362 * attach_entity_load_avg - attach this entity to its cfs_rq load avg 3362 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3363 * @cfs_rq: cfs_rq to attach to 3363 * @cfs_rq: cfs_rq to attach to
3364 * @se: sched_entity to attach 3364 * @se: sched_entity to attach
3365 * @flags: migration hints
3365 * 3366 *
3366 * Must call update_cfs_rq_load_avg() before this, since we rely on 3367 * Must call update_cfs_rq_load_avg() before this, since we rely on
3367 * cfs_rq->avg.last_update_time being current. 3368 * cfs_rq->avg.last_update_time being current.
@@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
7263{ 7264{
7264 struct rq *rq = cpu_rq(cpu); 7265 struct rq *rq = cpu_rq(cpu);
7265 struct cfs_rq *cfs_rq, *pos; 7266 struct cfs_rq *cfs_rq, *pos;
7267 const struct sched_class *curr_class;
7266 struct rq_flags rf; 7268 struct rq_flags rf;
7267 bool done = true; 7269 bool done = true;
7268 7270
@@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
7299 if (cfs_rq_has_blocked(cfs_rq)) 7301 if (cfs_rq_has_blocked(cfs_rq))
7300 done = false; 7302 done = false;
7301 } 7303 }
7302 update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); 7304
7303 update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); 7305 curr_class = rq->curr->sched_class;
7306 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7307 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
7304 update_irq_load_avg(rq, 0); 7308 update_irq_load_avg(rq, 0);
7305 /* Don't need periodic decay once load/util_avg are null */ 7309 /* Don't need periodic decay once load/util_avg are null */
7306 if (others_have_blocked(rq)) 7310 if (others_have_blocked(rq))
@@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
7365{ 7369{
7366 struct rq *rq = cpu_rq(cpu); 7370 struct rq *rq = cpu_rq(cpu);
7367 struct cfs_rq *cfs_rq = &rq->cfs; 7371 struct cfs_rq *cfs_rq = &rq->cfs;
7372 const struct sched_class *curr_class;
7368 struct rq_flags rf; 7373 struct rq_flags rf;
7369 7374
7370 rq_lock_irqsave(rq, &rf); 7375 rq_lock_irqsave(rq, &rf);
7371 update_rq_clock(rq); 7376 update_rq_clock(rq);
7372 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); 7377 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
7373 update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); 7378
7374 update_dl_rq_load_avg(rq_clock_task(rq), rq, 0); 7379 curr_class = rq->curr->sched_class;
7380 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7381 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
7375 update_irq_load_avg(rq, 0); 7382 update_irq_load_avg(rq, 0);
7376#ifdef CONFIG_NO_HZ_COMMON 7383#ifdef CONFIG_NO_HZ_COMMON
7377 rq->last_blocked_load_update_tick = jiffies; 7384 rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
7482 return load_idx; 7489 return load_idx;
7483} 7490}
7484 7491
7485static unsigned long scale_rt_capacity(int cpu) 7492static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
7486{ 7493{
7487 struct rq *rq = cpu_rq(cpu); 7494 struct rq *rq = cpu_rq(cpu);
7488 unsigned long max = arch_scale_cpu_capacity(NULL, cpu); 7495 unsigned long max = arch_scale_cpu_capacity(sd, cpu);
7489 unsigned long used, free; 7496 unsigned long used, free;
7490 unsigned long irq; 7497 unsigned long irq;
7491 7498
@@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
7507 7514
7508static void update_cpu_capacity(struct sched_domain *sd, int cpu) 7515static void update_cpu_capacity(struct sched_domain *sd, int cpu)
7509{ 7516{
7510 unsigned long capacity = scale_rt_capacity(cpu); 7517 unsigned long capacity = scale_rt_capacity(sd, cpu);
7511 struct sched_group *sdg = sd->groups; 7518 struct sched_group *sdg = sd->groups;
7512 7519
7513 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu); 7520 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
8269force_balance: 8276force_balance:
8270 /* Looks like there is an imbalance. Compute it */ 8277 /* Looks like there is an imbalance. Compute it */
8271 calculate_imbalance(env, &sds); 8278 calculate_imbalance(env, &sds);
8272 return sds.busiest; 8279 return env->imbalance ? sds.busiest : NULL;
8273 8280
8274out_balanced: 8281out_balanced:
8275 env->imbalance = 0; 8282 env->imbalance = 0;
@@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
9638 * - A task which has been woken up by try_to_wake_up() and 9645 * - A task which has been woken up by try_to_wake_up() and
9639 * waiting for actually being woken up by sched_ttwu_pending(). 9646 * waiting for actually being woken up by sched_ttwu_pending().
9640 */ 9647 */
9641 if (!se->sum_exec_runtime || p->state == TASK_WAKING) 9648 if (!se->sum_exec_runtime ||
9649 (p->state == TASK_WAKING && p->sched_remote_wakeup))
9642 return true; 9650 return true;
9643 9651
9644 return false; 9652 return false;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 56a0fed30c0a..505a41c42b96 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
1295 1295
1296 n = sched_max_numa_distance; 1296 n = sched_max_numa_distance;
1297 1297
1298 if (sched_domains_numa_levels <= 1) { 1298 if (sched_domains_numa_levels <= 2) {
1299 sched_numa_topology_type = NUMA_DIRECT; 1299 sched_numa_topology_type = NUMA_DIRECT;
1300 return; 1300 return;
1301 } 1301 }
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
1380 break; 1380 break;
1381 } 1381 }
1382 1382
1383 if (!level)
1384 return;
1385
1386 /* 1383 /*
1387 * 'level' contains the number of unique distances 1384 * 'level' contains the number of unique distances
1388 * 1385 *
diff --git a/kernel/sys.c b/kernel/sys.c
index cf5c67533ff1..123bd73046ec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -71,9 +71,6 @@
71#include <asm/io.h> 71#include <asm/io.h>
72#include <asm/unistd.h> 72#include <asm/unistd.h>
73 73
74/* Hardening for Spectre-v1 */
75#include <linux/nospec.h>
76
77#include "uid16.h" 74#include "uid16.h"
78 75
79#ifndef SET_UNALIGN_CTL 76#ifndef SET_UNALIGN_CTL
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f74fb00d8064..0e6e97a01942 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
133 spin_unlock_irqrestore(&watchdog_lock, *flags); 133 spin_unlock_irqrestore(&watchdog_lock, *flags);
134} 134}
135 135
136static int clocksource_watchdog_kthread(void *data);
137static void __clocksource_change_rating(struct clocksource *cs, int rating);
138
136/* 139/*
137 * Interval: 0.5sec Threshold: 0.0625s 140 * Interval: 0.5sec Threshold: 0.0625s
138 */ 141 */
139#define WATCHDOG_INTERVAL (HZ >> 1) 142#define WATCHDOG_INTERVAL (HZ >> 1)
140#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4) 143#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
141 144
145static void clocksource_watchdog_work(struct work_struct *work)
146{
147 /*
148 * We cannot directly run clocksource_watchdog_kthread() here, because
149 * clocksource_select() calls timekeeping_notify() which uses
150 * stop_machine(). One cannot use stop_machine() from a workqueue() due
151 * lock inversions wrt CPU hotplug.
152 *
153 * Also, we only ever run this work once or twice during the lifetime
154 * of the kernel, so there is no point in creating a more permanent
155 * kthread for this.
156 *
157 * If kthread_run fails the next watchdog scan over the
158 * watchdog_list will find the unstable clock again.
159 */
160 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
161}
162
142static void __clocksource_unstable(struct clocksource *cs) 163static void __clocksource_unstable(struct clocksource *cs)
143{ 164{
144 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 165 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
145 cs->flags |= CLOCK_SOURCE_UNSTABLE; 166 cs->flags |= CLOCK_SOURCE_UNSTABLE;
146 167
147 /* 168 /*
148 * If the clocksource is registered clocksource_watchdog_work() will 169 * If the clocksource is registered clocksource_watchdog_kthread() will
149 * re-rate and re-select. 170 * re-rate and re-select.
150 */ 171 */
151 if (list_empty(&cs->list)) { 172 if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
156 if (cs->mark_unstable) 177 if (cs->mark_unstable)
157 cs->mark_unstable(cs); 178 cs->mark_unstable(cs);
158 179
159 /* kick clocksource_watchdog_work() */ 180 /* kick clocksource_watchdog_kthread() */
160 if (finished_booting) 181 if (finished_booting)
161 schedule_work(&watchdog_work); 182 schedule_work(&watchdog_work);
162} 183}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
166 * @cs: clocksource to be marked unstable 187 * @cs: clocksource to be marked unstable
167 * 188 *
168 * This function is called by the x86 TSC code to mark clocksources as unstable; 189 * This function is called by the x86 TSC code to mark clocksources as unstable;
169 * it defers demotion and re-selection to a work. 190 * it defers demotion and re-selection to a kthread.
170 */ 191 */
171void clocksource_mark_unstable(struct clocksource *cs) 192void clocksource_mark_unstable(struct clocksource *cs)
172{ 193{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
391 } 412 }
392} 413}
393 414
394static void __clocksource_change_rating(struct clocksource *cs, int rating); 415static int __clocksource_watchdog_kthread(void)
395
396static int __clocksource_watchdog_work(void)
397{ 416{
398 struct clocksource *cs, *tmp; 417 struct clocksource *cs, *tmp;
399 unsigned long flags; 418 unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
418 return select; 437 return select;
419} 438}
420 439
421static void clocksource_watchdog_work(struct work_struct *work) 440static int clocksource_watchdog_kthread(void *data)
422{ 441{
423 mutex_lock(&clocksource_mutex); 442 mutex_lock(&clocksource_mutex);
424 if (__clocksource_watchdog_work()) 443 if (__clocksource_watchdog_kthread())
425 clocksource_select(); 444 clocksource_select();
426 mutex_unlock(&clocksource_mutex); 445 mutex_unlock(&clocksource_mutex);
446 return 0;
427} 447}
428 448
429static bool clocksource_is_watchdog(struct clocksource *cs) 449static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
442static void clocksource_select_watchdog(bool fallback) { } 462static void clocksource_select_watchdog(bool fallback) { }
443static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } 463static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
444static inline void clocksource_resume_watchdog(void) { } 464static inline void clocksource_resume_watchdog(void) { }
445static inline int __clocksource_watchdog_work(void) { return 0; } 465static inline int __clocksource_watchdog_kthread(void) { return 0; }
446static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 466static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
447void clocksource_mark_unstable(struct clocksource *cs) { } 467void clocksource_mark_unstable(struct clocksource *cs) { }
448 468
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
810 /* 830 /*
811 * Run the watchdog first to eliminate unstable clock sources 831 * Run the watchdog first to eliminate unstable clock sources
812 */ 832 */
813 __clocksource_watchdog_work(); 833 __clocksource_watchdog_kthread();
814 clocksource_select(); 834 clocksource_select();
815 mutex_unlock(&clocksource_mutex); 835 mutex_unlock(&clocksource_mutex);
816 return 0; 836 return 0;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d92d4a982fd..65bd4616220d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1546 tmp_iter_page = first_page; 1546 tmp_iter_page = first_page;
1547 1547
1548 do { 1548 do {
1549 cond_resched();
1550
1549 to_remove_page = tmp_iter_page; 1551 to_remove_page = tmp_iter_page;
1550 rb_inc_page(cpu_buffer, &tmp_iter_page); 1552 rb_inc_page(cpu_buffer, &tmp_iter_page);
1551 1553
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5470dce212c0..977918d5d350 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
261 * entering idle state. This should only be used for scheduler events. 261 * entering idle state. This should only be used for scheduler events.
262 * Use touch_softlockup_watchdog() for everything else. 262 * Use touch_softlockup_watchdog() for everything else.
263 */ 263 */
264void touch_softlockup_watchdog_sched(void) 264notrace void touch_softlockup_watchdog_sched(void)
265{ 265{
266 /* 266 /*
267 * Preemption can be enabled. It doesn't matter which CPU's timestamp 267 * Preemption can be enabled. It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
270 raw_cpu_write(watchdog_touch_ts, 0); 270 raw_cpu_write(watchdog_touch_ts, 0);
271} 271}
272 272
273void touch_softlockup_watchdog(void) 273notrace void touch_softlockup_watchdog(void)
274{ 274{
275 touch_softlockup_watchdog_sched(); 275 touch_softlockup_watchdog_sched();
276 wq_watchdog_touch(raw_smp_processor_id()); 276 wq_watchdog_touch(raw_smp_processor_id());
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 1f7020d65d0a..71381168dede 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
29static unsigned long hardlockup_allcpu_dumped; 29static unsigned long hardlockup_allcpu_dumped;
30static atomic_t watchdog_cpus = ATOMIC_INIT(0); 30static atomic_t watchdog_cpus = ATOMIC_INIT(0);
31 31
32void arch_touch_nmi_watchdog(void) 32notrace void arch_touch_nmi_watchdog(void)
33{ 33{
34 /* 34 /*
35 * Using __raw here because some code paths have 35 * Using __raw here because some code paths have
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 60e80198c3df..0280deac392e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
5574 mod_timer(&wq_watchdog_timer, jiffies + thresh); 5574 mod_timer(&wq_watchdog_timer, jiffies + thresh);
5575} 5575}
5576 5576
5577void wq_watchdog_touch(int cpu) 5577notrace void wq_watchdog_touch(int cpu)
5578{ 5578{
5579 if (cpu >= 0) 5579 if (cpu >= 0)
5580 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; 5580 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 613316724c6a..4966c4fbe7f7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
1277 time. This is really bad from a security perspective, and 1277 time. This is really bad from a security perspective, and
1278 so architecture maintainers really need to do what they can 1278 so architecture maintainers really need to do what they can
1279 to get the CRNG seeded sooner after the system is booted. 1279 to get the CRNG seeded sooner after the system is booted.
1280 However, since users can not do anything actionble to 1280 However, since users cannot do anything actionable to
1281 address this, by default the kernel will issue only a single 1281 address this, by default the kernel will issue only a single
1282 warning for the first use of unseeded randomness. 1282 warning for the first use of unseeded randomness.
1283 1283
1284 Say Y here if you want to receive warnings for all uses of 1284 Say Y here if you want to receive warnings for all uses of
1285 unseeded randomness. This will be of use primarily for 1285 unseeded randomness. This will be of use primarily for
1286 those developers interersted in improving the security of 1286 those developers interested in improving the security of
1287 Linux kernels running on their architecture (or 1287 Linux kernels running on their architecture (or
1288 subarchitecture). 1288 subarchitecture).
1289 1289
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c72577e472f2..a66595ba5543 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -4,7 +4,6 @@
4 */ 4 */
5 5
6#include <linux/percpu_counter.h> 6#include <linux/percpu_counter.h>
7#include <linux/notifier.h>
8#include <linux/mutex.h> 7#include <linux/mutex.h>
9#include <linux/init.h> 8#include <linux/init.h>
10#include <linux/cpu.h> 9#include <linux/cpu.h>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 310e29b51507..30526afa8343 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -28,7 +28,6 @@
28#include <linux/rhashtable.h> 28#include <linux/rhashtable.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/rhashtable.h>
32 31
33#define HASH_DEFAULT_SIZE 64UL 32#define HASH_DEFAULT_SIZE 64UL
34#define HASH_MIN_SIZE 4U 33#define HASH_MIN_SIZE 4U
diff --git a/mm/Kconfig b/mm/Kconfig
index a550635ea5c3..de64ea658716 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
637 depends on NO_BOOTMEM 637 depends on NO_BOOTMEM
638 depends on SPARSEMEM 638 depends on SPARSEMEM
639 depends on !NEED_PER_CPU_KM 639 depends on !NEED_PER_CPU_KM
640 depends on 64BIT
640 help 641 help
641 Ordinarily all struct pages are initialised during early boot in a 642 Ordinarily all struct pages are initialised during early boot in a
642 single thread. On very large machines this can take a considerable 643 single thread. On very large machines this can take a considerable
diff --git a/mm/Makefile b/mm/Makefile
index 8716bdabe1e6..26ef77a3883b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH
32mmu-$(CONFIG_MMU) += process_vm_access.o 32mmu-$(CONFIG_MMU) += process_vm_access.o
33endif 33endif
34 34
35obj-y := filemap.o mempool.o oom_kill.o \ 35obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
36 maccess.o page_alloc.o page-writeback.o \ 36 maccess.o page_alloc.o page-writeback.o \
37 readahead.o swap.o truncate.o vmscan.o shmem.o \ 37 readahead.o swap.o truncate.o vmscan.o shmem.o \
38 util.o mmzone.o vmstat.o backing-dev.o \ 38 util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
49 obj-y += bootmem.o 49 obj-y += bootmem.o
50endif 50endif
51 51
52obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o
53ifdef CONFIG_MMU 52ifdef CONFIG_MMU
54 obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o 53 obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
55endif 54endif
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f5981e9d6ae2..8a8bb8796c6c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
491{ 491{
492 struct bdi_writeback *wb = container_of(work, struct bdi_writeback, 492 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
493 release_work); 493 release_work);
494 struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
494 495
495 mutex_lock(&wb->bdi->cgwb_release_mutex); 496 mutex_lock(&wb->bdi->cgwb_release_mutex);
496 wb_shutdown(wb); 497 wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
499 css_put(wb->blkcg_css); 500 css_put(wb->blkcg_css);
500 mutex_unlock(&wb->bdi->cgwb_release_mutex); 501 mutex_unlock(&wb->bdi->cgwb_release_mutex);
501 502
503 /* triggers blkg destruction if cgwb_refcnt becomes zero */
504 blkcg_cgwb_put(blkcg);
505
502 fprop_local_destroy_percpu(&wb->memcg_completions); 506 fprop_local_destroy_percpu(&wb->memcg_completions);
503 percpu_ref_exit(&wb->refcnt); 507 percpu_ref_exit(&wb->refcnt);
504 wb_exit(wb); 508 wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
597 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); 601 list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
598 list_add(&wb->memcg_node, memcg_cgwb_list); 602 list_add(&wb->memcg_node, memcg_cgwb_list);
599 list_add(&wb->blkcg_node, blkcg_cgwb_list); 603 list_add(&wb->blkcg_node, blkcg_cgwb_list);
604 blkcg_cgwb_get(blkcg);
600 css_get(memcg_css); 605 css_get(memcg_css);
601 css_get(blkcg_css); 606 css_get(blkcg_css);
602 } 607 }
diff --git a/mm/debug.c b/mm/debug.c
index 38c926520c97..bd10aad8539a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
114 114
115void dump_mm(const struct mm_struct *mm) 115void dump_mm(const struct mm_struct *mm)
116{ 116{
117 pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" 117 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
118#ifdef CONFIG_MMU 118#ifdef CONFIG_MMU
119 "get_unmapped_area %px\n" 119 "get_unmapped_area %px\n"
120#endif 120#endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
142 "tlb_flush_pending %d\n" 142 "tlb_flush_pending %d\n"
143 "def_flags: %#lx(%pGv)\n", 143 "def_flags: %#lx(%pGv)\n",
144 144
145 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, 145 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
146#ifdef CONFIG_MMU 146#ifdef CONFIG_MMU
147 mm->get_unmapped_area, 147 mm->get_unmapped_area,
148#endif 148#endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 2d8376e3c640..467bcd032037 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -27,9 +27,9 @@
27 * deactivate the pages and clear PG_Referenced. 27 * deactivate the pages and clear PG_Referenced.
28 */ 28 */
29 29
30int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) 30static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
31 int advice)
31{ 32{
32 struct fd f = fdget(fd);
33 struct inode *inode; 33 struct inode *inode;
34 struct address_space *mapping; 34 struct address_space *mapping;
35 struct backing_dev_info *bdi; 35 struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
37 pgoff_t start_index; 37 pgoff_t start_index;
38 pgoff_t end_index; 38 pgoff_t end_index;
39 unsigned long nrpages; 39 unsigned long nrpages;
40 int ret = 0;
41
42 if (!f.file)
43 return -EBADF;
44 40
45 inode = file_inode(f.file); 41 inode = file_inode(file);
46 if (S_ISFIFO(inode->i_mode)) { 42 if (S_ISFIFO(inode->i_mode))
47 ret = -ESPIPE; 43 return -ESPIPE;
48 goto out;
49 }
50 44
51 mapping = f.file->f_mapping; 45 mapping = file->f_mapping;
52 if (!mapping || len < 0) { 46 if (!mapping || len < 0)
53 ret = -EINVAL; 47 return -EINVAL;
54 goto out;
55 }
56 48
57 bdi = inode_to_bdi(mapping->host); 49 bdi = inode_to_bdi(mapping->host);
58 50
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
67 /* no bad return value, but ignore advice */ 59 /* no bad return value, but ignore advice */
68 break; 60 break;
69 default: 61 default:
70 ret = -EINVAL; 62 return -EINVAL;
71 } 63 }
72 goto out; 64 return 0;
73 } 65 }
74 66
75 /* 67 /*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
85 77
86 switch (advice) { 78 switch (advice) {
87 case POSIX_FADV_NORMAL: 79 case POSIX_FADV_NORMAL:
88 f.file->f_ra.ra_pages = bdi->ra_pages; 80 file->f_ra.ra_pages = bdi->ra_pages;
89 spin_lock(&f.file->f_lock); 81 spin_lock(&file->f_lock);
90 f.file->f_mode &= ~FMODE_RANDOM; 82 file->f_mode &= ~FMODE_RANDOM;
91 spin_unlock(&f.file->f_lock); 83 spin_unlock(&file->f_lock);
92 break; 84 break;
93 case POSIX_FADV_RANDOM: 85 case POSIX_FADV_RANDOM:
94 spin_lock(&f.file->f_lock); 86 spin_lock(&file->f_lock);
95 f.file->f_mode |= FMODE_RANDOM; 87 file->f_mode |= FMODE_RANDOM;
96 spin_unlock(&f.file->f_lock); 88 spin_unlock(&file->f_lock);
97 break; 89 break;
98 case POSIX_FADV_SEQUENTIAL: 90 case POSIX_FADV_SEQUENTIAL:
99 f.file->f_ra.ra_pages = bdi->ra_pages * 2; 91 file->f_ra.ra_pages = bdi->ra_pages * 2;
100 spin_lock(&f.file->f_lock); 92 spin_lock(&file->f_lock);
101 f.file->f_mode &= ~FMODE_RANDOM; 93 file->f_mode &= ~FMODE_RANDOM;
102 spin_unlock(&f.file->f_lock); 94 spin_unlock(&file->f_lock);
103 break; 95 break;
104 case POSIX_FADV_WILLNEED: 96 case POSIX_FADV_WILLNEED:
105 /* First and last PARTIAL page! */ 97 /* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
115 * Ignore return value because fadvise() shall return 107 * Ignore return value because fadvise() shall return
116 * success even if filesystem can't retrieve a hint, 108 * success even if filesystem can't retrieve a hint,
117 */ 109 */
118 force_page_cache_readahead(mapping, f.file, start_index, 110 force_page_cache_readahead(mapping, file, start_index, nrpages);
119 nrpages);
120 break; 111 break;
121 case POSIX_FADV_NOREUSE: 112 case POSIX_FADV_NOREUSE:
122 break; 113 break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
183 } 174 }
184 break; 175 break;
185 default: 176 default:
186 ret = -EINVAL; 177 return -EINVAL;
187 } 178 }
188out: 179 return 0;
180}
181
182int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
183{
184 if (file->f_op->fadvise)
185 return file->f_op->fadvise(file, offset, len, advice);
186
187 return generic_fadvise(file, offset, len, advice);
188}
189EXPORT_SYMBOL(vfs_fadvise);
190
191#ifdef CONFIG_ADVISE_SYSCALLS
192
193int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
194{
195 struct fd f = fdget(fd);
196 int ret;
197
198 if (!f.file)
199 return -EBADF;
200
201 ret = vfs_fadvise(f.file, offset, len, advice);
202
189 fdput(f); 203 fdput(f);
190 return ret; 204 return ret;
191} 205}
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
203} 217}
204 218
205#endif 219#endif
220#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c3bc7e9c9a2a..533f9b00147d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
821 * but we need to be consistent with PTEs and architectures that 821 * but we need to be consistent with PTEs and architectures that
822 * can't support a 'special' bit. 822 * can't support a 'special' bit.
823 */ 823 */
824 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 824 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
825 !pfn_t_devmap(pfn));
825 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 826 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
826 (VM_PFNMAP|VM_MIXEDMAP)); 827 (VM_PFNMAP|VM_MIXEDMAP));
827 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 828 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
828 BUG_ON(!pfn_t_devmap(pfn));
829 829
830 if (addr < vma->vm_start || addr >= vma->vm_end) 830 if (addr < vma->vm_start || addr >= vma->vm_end)
831 return VM_FAULT_SIGBUS; 831 return VM_FAULT_SIGBUS;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9a085d525bbc..17dd883198ae 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
2097 2097
2098 kmemleak_initialized = 1; 2098 kmemleak_initialized = 1;
2099 2099
2100 dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2101 &kmemleak_fops);
2102 if (!dentry)
2103 pr_warn("Failed to create the debugfs kmemleak file\n");
2104
2100 if (kmemleak_error) { 2105 if (kmemleak_error) {
2101 /* 2106 /*
2102 * Some error occurred and kmemleak was disabled. There is a 2107 * Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
2108 return -ENOMEM; 2113 return -ENOMEM;
2109 } 2114 }
2110 2115
2111 dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2112 &kmemleak_fops);
2113 if (!dentry)
2114 pr_warn("Failed to create the debugfs kmemleak file\n");
2115 mutex_lock(&scan_mutex); 2116 mutex_lock(&scan_mutex);
2116 start_scan_thread(); 2117 start_scan_thread();
2117 mutex_unlock(&scan_mutex); 2118 mutex_unlock(&scan_mutex);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4ead5a4817de..e79cb59552d9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
1701 if (mem_cgroup_out_of_memory(memcg, mask, order)) 1701 if (mem_cgroup_out_of_memory(memcg, mask, order))
1702 return OOM_SUCCESS; 1702 return OOM_SUCCESS;
1703 1703
1704 WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
1705 "This looks like a misconfiguration or a kernel bug.");
1706 return OOM_FAILED; 1704 return OOM_FAILED;
1707} 1705}
1708 1706
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9eea6e809a4e..38d94b703e9d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1333 if (__PageMovable(page)) 1333 if (__PageMovable(page))
1334 return pfn; 1334 return pfn;
1335 if (PageHuge(page)) { 1335 if (PageHuge(page)) {
1336 if (page_huge_active(page)) 1336 if (hugepage_migration_supported(page_hstate(page)) &&
1337 page_huge_active(page))
1337 return pfn; 1338 return pfn;
1338 else 1339 else
1339 pfn = round_up(pfn + 1, 1340 pfn = round_up(pfn + 1,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b5b25e4dcbbb..f10aa5360616 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
522 522
523 tlb_gather_mmu(&tlb, mm, start, end); 523 tlb_gather_mmu(&tlb, mm, start, end);
524 if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) { 524 if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
525 tlb_finish_mmu(&tlb, start, end);
525 ret = false; 526 ret = false;
526 continue; 527 continue;
527 } 528 }
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
1103 } 1104 }
1104 1105
1105 select_bad_process(oc); 1106 select_bad_process(oc);
1106 /* Found nothing?!?! Either we hang forever, or we panic. */ 1107 /* Found nothing?!?! */
1107 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) { 1108 if (!oc->chosen) {
1108 dump_header(oc, NULL); 1109 dump_header(oc, NULL);
1109 panic("Out of memory and no killable processes...\n"); 1110 pr_warn("Out of memory and no killable processes...\n");
1111 /*
1112 * If we got here due to an actual allocation at the
1113 * system level, we cannot survive this and will enter
1114 * an endless loop in the allocator. Bail out now.
1115 */
1116 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1117 panic("System is deadlocked on memory\n");
1110 } 1118 }
1111 if (oc->chosen && oc->chosen != (void *)-1UL) 1119 if (oc->chosen && oc->chosen != (void *)-1UL)
1112 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" : 1120 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 6551d3b0dc30..84ae9bf5858a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -27,7 +27,6 @@
27#include <linux/mpage.h> 27#include <linux/mpage.h>
28#include <linux/rmap.h> 28#include <linux/rmap.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h> 30#include <linux/smp.h>
32#include <linux/sysctl.h> 31#include <linux/sysctl.h>
33#include <linux/cpu.h> 32#include <linux/cpu.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e75865d58ba7..89d2a2ab3fe6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -32,7 +32,6 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/ratelimit.h> 33#include <linux/ratelimit.h>
34#include <linux/oom.h> 34#include <linux/oom.h>
35#include <linux/notifier.h>
36#include <linux/topology.h> 35#include <linux/topology.h>
37#include <linux/sysctl.h> 36#include <linux/sysctl.h>
38#include <linux/cpu.h> 37#include <linux/cpu.h>
@@ -7709,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7709 * handle each tail page individually in migration. 7708 * handle each tail page individually in migration.
7710 */ 7709 */
7711 if (PageHuge(page)) { 7710 if (PageHuge(page)) {
7711
7712 if (!hugepage_migration_supported(page_hstate(page)))
7713 goto unmovable;
7714
7712 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 7715 iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
7713 continue; 7716 continue;
7714 } 7717 }
diff --git a/mm/readahead.c b/mm/readahead.c
index a59ea70527b9..4e630143a0ba 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -20,6 +20,7 @@
20#include <linux/file.h> 20#include <linux/file.h>
21#include <linux/mm_inline.h> 21#include <linux/mm_inline.h>
22#include <linux/blk-cgroup.h> 22#include <linux/blk-cgroup.h>
23#include <linux/fadvise.h>
23 24
24#include "internal.h" 25#include "internal.h"
25 26
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
575} 576}
576EXPORT_SYMBOL_GPL(page_cache_async_readahead); 577EXPORT_SYMBOL_GPL(page_cache_async_readahead);
577 578
578static ssize_t
579do_readahead(struct address_space *mapping, struct file *filp,
580 pgoff_t index, unsigned long nr)
581{
582 if (!mapping || !mapping->a_ops)
583 return -EINVAL;
584
585 /*
586 * Readahead doesn't make sense for DAX inodes, but we don't want it
587 * to report a failure either. Instead, we just return success and
588 * don't do any work.
589 */
590 if (dax_mapping(mapping))
591 return 0;
592
593 return force_page_cache_readahead(mapping, filp, index, nr);
594}
595
596ssize_t ksys_readahead(int fd, loff_t offset, size_t count) 579ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
597{ 580{
598 ssize_t ret; 581 ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
600 583
601 ret = -EBADF; 584 ret = -EBADF;
602 f = fdget(fd); 585 f = fdget(fd);
603 if (f.file) { 586 if (!f.file || !(f.file->f_mode & FMODE_READ))
604 if (f.file->f_mode & FMODE_READ) { 587 goto out;
605 struct address_space *mapping = f.file->f_mapping; 588
606 pgoff_t start = offset >> PAGE_SHIFT; 589 /*
607 pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; 590 * The readahead() syscall is intended to run only on files
608 unsigned long len = end - start + 1; 591 * that can execute readahead. If readahead is not possible
609 ret = do_readahead(mapping, f.file, start, len); 592 * on this file, then we must return -EINVAL.
610 } 593 */
611 fdput(f); 594 ret = -EINVAL;
612 } 595 if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
596 !S_ISREG(file_inode(f.file)->i_mode))
597 goto out;
598
599 ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
600out:
601 fdput(f);
613 return ret; 602 return ret;
614} 603}
615 604
diff --git a/mm/shmem.c b/mm/shmem.c
index 0376c124b043..446942677cd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
2227 mpol_shared_policy_init(&info->policy, NULL); 2227 mpol_shared_policy_init(&info->policy, NULL);
2228 break; 2228 break;
2229 } 2229 }
2230
2231 lockdep_annotate_inode_mutex_key(inode);
2230 } else 2232 } else
2231 shmem_free_inode(sb); 2233 shmem_free_inode(sb);
2232 return inode; 2234 return inode;
diff --git a/mm/slub.c b/mm/slub.c
index ce2b9e5cea77..8da34a8af53d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,7 +19,6 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include "slab.h" 20#include "slab.h"
21#include <linux/proc_fs.h> 21#include <linux/proc_fs.h>
22#include <linux/notifier.h>
23#include <linux/seq_file.h> 22#include <linux/seq_file.h>
24#include <linux/kasan.h> 23#include <linux/kasan.h>
25#include <linux/cpu.h> 24#include <linux/cpu.h>
diff --git a/mm/util.c b/mm/util.c
index d2890a407332..9e3ebd2ef65f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
435EXPORT_SYMBOL(kvmalloc_node); 435EXPORT_SYMBOL(kvmalloc_node);
436 436
437/** 437/**
438 * kvfree - free memory allocated with kvmalloc 438 * kvfree() - Free memory.
439 * @addr: pointer returned by kvmalloc 439 * @addr: Pointer to allocated memory.
440 * 440 *
441 * If the memory is allocated from vmalloc area it is freed with vfree(). 441 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
442 * Otherwise kfree() is used. 442 * It is slightly more efficient to use kfree() or vfree() if you are certain
443 * that you know which one to use.
444 *
445 * Context: Any context except NMI.
443 */ 446 */
444void kvfree(const void *addr) 447void kvfree(const void *addr)
445{ 448{
diff --git a/mm/vmacache.c b/mm/vmacache.c
index ea517bef7dc5..cdc32a3b02fa 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -20,44 +20,6 @@
20#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) 20#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
21 21
22/* 22/*
23 * Flush vma caches for threads that share a given mm.
24 *
25 * The operation is safe because the caller holds the mmap_sem
26 * exclusively and other threads accessing the vma cache will
27 * have mmap_sem held at least for read, so no extra locking
28 * is required to maintain the vma cache.
29 */
30void vmacache_flush_all(struct mm_struct *mm)
31{
32 struct task_struct *g, *p;
33
34 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
35
36 /*
37 * Single threaded tasks need not iterate the entire
38 * list of process. We can avoid the flushing as well
39 * since the mm's seqnum was increased and don't have
40 * to worry about other threads' seqnum. Current's
41 * flush will occur upon the next lookup.
42 */
43 if (atomic_read(&mm->mm_users) == 1)
44 return;
45
46 rcu_read_lock();
47 for_each_process_thread(g, p) {
48 /*
49 * Only flush the vmacache pointers as the
50 * mm seqnum is already set and curr's will
51 * be set upon invalidation when the next
52 * lookup is done.
53 */
54 if (mm == p->mm)
55 vmacache_flush(p);
56 }
57 rcu_read_unlock();
58}
59
60/*
61 * This task may be accessing a foreign mm via (for example) 23 * This task may be accessing a foreign mm via (for example)
62 * get_user_pages()->find_vma(). The vmacache is task-local and this 24 * get_user_pages()->find_vma(). The vmacache is task-local and this
63 * task's vmacache pertains to a different mm (ie, its own). There is 25 * task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e7d25504651..c7ce2c161225 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
476 delta = freeable >> priority; 476 delta = freeable >> priority;
477 delta *= 4; 477 delta *= 4;
478 do_div(delta, shrinker->seeks); 478 do_div(delta, shrinker->seeks);
479
480 /*
481 * Make sure we apply some minimal pressure on default priority
482 * even on small cgroups. Stale objects are not only consuming memory
483 * by themselves, but can also hold a reference to a dying cgroup,
484 * preventing it from being reclaimed. A dying cgroup with all
485 * corresponding structures like per-cpu stats and kmem caches
486 * can be really big, so it may lead to a significant waste of memory.
487 */
488 delta = max_t(unsigned long long, delta, min(freeable, batch_size));
489
479 total_scan += delta; 490 total_scan += delta;
480 if (total_scan < 0) { 491 if (total_scan < 0) {
481 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", 492 pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 71c20c1d4002..9f481cfdf77d 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
241 * the packet to be exactly of that size to make the link 241 * the packet to be exactly of that size to make the link
242 * throughput estimation effective. 242 * throughput estimation effective.
243 */ 243 */
244 skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len); 244 skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
245 245
246 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 246 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
247 "Sending unicast (probe) ELP packet on interface %s to %pM\n", 247 "Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
268 struct batadv_priv *bat_priv; 268 struct batadv_priv *bat_priv;
269 struct sk_buff *skb; 269 struct sk_buff *skb;
270 u32 elp_interval; 270 u32 elp_interval;
271 bool ret;
271 272
272 bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); 273 bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
273 hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); 274 hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
329 * may sleep and that is not allowed in an rcu protected 330 * may sleep and that is not allowed in an rcu protected
330 * context. Therefore schedule a task for that. 331 * context. Therefore schedule a task for that.
331 */ 332 */
332 queue_work(batadv_event_workqueue, 333 ret = queue_work(batadv_event_workqueue,
333 &hardif_neigh->bat_v.metric_work); 334 &hardif_neigh->bat_v.metric_work);
335
336 if (!ret)
337 batadv_hardif_neigh_put(hardif_neigh);
334 } 338 }
335 rcu_read_unlock(); 339 rcu_read_unlock();
336 340
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ff9659af6b91..5f1aeeded0e3 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1772{ 1772{
1773 struct batadv_bla_backbone_gw *backbone_gw; 1773 struct batadv_bla_backbone_gw *backbone_gw;
1774 struct ethhdr *ethhdr; 1774 struct ethhdr *ethhdr;
1775 bool ret;
1775 1776
1776 ethhdr = eth_hdr(skb); 1777 ethhdr = eth_hdr(skb);
1777 1778
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1795 if (unlikely(!backbone_gw)) 1796 if (unlikely(!backbone_gw))
1796 return true; 1797 return true;
1797 1798
1798 queue_work(batadv_event_workqueue, &backbone_gw->report_work); 1799 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1799 /* backbone_gw is unreferenced in the report work function function */ 1800
1801 /* backbone_gw is unreferenced in the report work function function
1802 * if queue_work() call was successful
1803 */
1804 if (!ret)
1805 batadv_backbone_gw_put(backbone_gw);
1800 1806
1801 return true; 1807 return true;
1802} 1808}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 8b198ee798c9..140c61a3f1ec 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -32,6 +32,7 @@
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/kref.h> 33#include <linux/kref.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/lockdep.h>
35#include <linux/netdevice.h> 36#include <linux/netdevice.h>
36#include <linux/netlink.h> 37#include <linux/netlink.h>
37#include <linux/rculist.h> 38#include <linux/rculist.h>
@@ -348,6 +349,9 @@ out:
348 * @bat_priv: the bat priv with all the soft interface information 349 * @bat_priv: the bat priv with all the soft interface information
349 * @orig_node: originator announcing gateway capabilities 350 * @orig_node: originator announcing gateway capabilities
350 * @gateway: announced bandwidth information 351 * @gateway: announced bandwidth information
352 *
353 * Has to be called with the appropriate locks being acquired
354 * (gw.list_lock).
351 */ 355 */
352static void batadv_gw_node_add(struct batadv_priv *bat_priv, 356static void batadv_gw_node_add(struct batadv_priv *bat_priv,
353 struct batadv_orig_node *orig_node, 357 struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
355{ 359{
356 struct batadv_gw_node *gw_node; 360 struct batadv_gw_node *gw_node;
357 361
362 lockdep_assert_held(&bat_priv->gw.list_lock);
363
358 if (gateway->bandwidth_down == 0) 364 if (gateway->bandwidth_down == 0)
359 return; 365 return;
360 366
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
369 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); 375 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
370 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); 376 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
371 377
372 spin_lock_bh(&bat_priv->gw.list_lock);
373 kref_get(&gw_node->refcount); 378 kref_get(&gw_node->refcount);
374 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); 379 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
375 spin_unlock_bh(&bat_priv->gw.list_lock);
376 380
377 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 381 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
378 "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", 382 "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
428{ 432{
429 struct batadv_gw_node *gw_node, *curr_gw = NULL; 433 struct batadv_gw_node *gw_node, *curr_gw = NULL;
430 434
435 spin_lock_bh(&bat_priv->gw.list_lock);
431 gw_node = batadv_gw_node_get(bat_priv, orig_node); 436 gw_node = batadv_gw_node_get(bat_priv, orig_node);
432 if (!gw_node) { 437 if (!gw_node) {
433 batadv_gw_node_add(bat_priv, orig_node, gateway); 438 batadv_gw_node_add(bat_priv, orig_node, gateway);
439 spin_unlock_bh(&bat_priv->gw.list_lock);
434 goto out; 440 goto out;
435 } 441 }
442 spin_unlock_bh(&bat_priv->gw.list_lock);
436 443
437 if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && 444 if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
438 gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) 445 gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 8da3c9336111..3ccc75ee719c 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,7 +25,7 @@
25#define BATADV_DRIVER_DEVICE "batman-adv" 25#define BATADV_DRIVER_DEVICE "batman-adv"
26 26
27#ifndef BATADV_SOURCE_VERSION 27#ifndef BATADV_SOURCE_VERSION
28#define BATADV_SOURCE_VERSION "2018.2" 28#define BATADV_SOURCE_VERSION "2018.3"
29#endif 29#endif
30 30
31/* B.A.T.M.A.N. parameters */ 31/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index c3578444f3cb..34caf129a9bf 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
854 spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ 854 spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
855 struct list_head *list; 855 struct list_head *list;
856 856
857 /* Select ingoing or outgoing coding node */
858 if (in_coding) {
859 lock = &orig_neigh_node->in_coding_list_lock;
860 list = &orig_neigh_node->in_coding_list;
861 } else {
862 lock = &orig_neigh_node->out_coding_list_lock;
863 list = &orig_neigh_node->out_coding_list;
864 }
865
866 spin_lock_bh(lock);
867
857 /* Check if nc_node is already added */ 868 /* Check if nc_node is already added */
858 nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); 869 nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
859 870
860 /* Node found */ 871 /* Node found */
861 if (nc_node) 872 if (nc_node)
862 return nc_node; 873 goto unlock;
863 874
864 nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); 875 nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
865 if (!nc_node) 876 if (!nc_node)
866 return NULL; 877 goto unlock;
867 878
868 /* Initialize nc_node */ 879 /* Initialize nc_node */
869 INIT_LIST_HEAD(&nc_node->list); 880 INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
872 kref_get(&orig_neigh_node->refcount); 883 kref_get(&orig_neigh_node->refcount);
873 nc_node->orig_node = orig_neigh_node; 884 nc_node->orig_node = orig_neigh_node;
874 885
875 /* Select ingoing or outgoing coding node */
876 if (in_coding) {
877 lock = &orig_neigh_node->in_coding_list_lock;
878 list = &orig_neigh_node->in_coding_list;
879 } else {
880 lock = &orig_neigh_node->out_coding_list_lock;
881 list = &orig_neigh_node->out_coding_list;
882 }
883
884 batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", 886 batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
885 nc_node->addr, nc_node->orig_node->orig); 887 nc_node->addr, nc_node->orig_node->orig);
886 888
887 /* Add nc_node to orig_node */ 889 /* Add nc_node to orig_node */
888 spin_lock_bh(lock);
889 kref_get(&nc_node->refcount); 890 kref_get(&nc_node->refcount);
890 list_add_tail_rcu(&nc_node->list, list); 891 list_add_tail_rcu(&nc_node->list, list);
892
893unlock:
891 spin_unlock_bh(lock); 894 spin_unlock_bh(lock);
892 895
893 return nc_node; 896 return nc_node;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 1485263a348b..626ddca332db 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
574 struct batadv_softif_vlan *vlan; 574 struct batadv_softif_vlan *vlan;
575 int err; 575 int err;
576 576
577 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
578
577 vlan = batadv_softif_vlan_get(bat_priv, vid); 579 vlan = batadv_softif_vlan_get(bat_priv, vid);
578 if (vlan) { 580 if (vlan) {
579 batadv_softif_vlan_put(vlan); 581 batadv_softif_vlan_put(vlan);
582 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
580 return -EEXIST; 583 return -EEXIST;
581 } 584 }
582 585
583 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); 586 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
584 if (!vlan) 587 if (!vlan) {
588 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
585 return -ENOMEM; 589 return -ENOMEM;
590 }
586 591
587 vlan->bat_priv = bat_priv; 592 vlan->bat_priv = bat_priv;
588 vlan->vid = vid; 593 vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
590 595
591 atomic_set(&vlan->ap_isolation, 0); 596 atomic_set(&vlan->ap_isolation, 0);
592 597
598 kref_get(&vlan->refcount);
599 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
600 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
601
602 /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
603 * sleeping behavior of the sysfs functions and the fs_reclaim lock
604 */
593 err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); 605 err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
594 if (err) { 606 if (err) {
595 kfree(vlan); 607 /* ref for the function */
608 batadv_softif_vlan_put(vlan);
609
610 /* ref for the list */
611 batadv_softif_vlan_put(vlan);
596 return err; 612 return err;
597 } 613 }
598 614
599 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
600 kref_get(&vlan->refcount);
601 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
602 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
603
604 /* add a new TT local entry. This one will be marked with the NOPURGE 615 /* add a new TT local entry. This one will be marked with the NOPURGE
605 * flag 616 * flag
606 */ 617 */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index f2eef43bd2ec..09427fc6494a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
188 \ 188 \
189 return __batadv_store_uint_attr(buff, count, _min, _max, \ 189 return __batadv_store_uint_attr(buff, count, _min, _max, \
190 _post_func, attr, \ 190 _post_func, attr, \
191 &bat_priv->_var, net_dev); \ 191 &bat_priv->_var, net_dev, \
192 NULL); \
192} 193}
193 194
194#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ 195#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
262 \ 263 \
263 length = __batadv_store_uint_attr(buff, count, _min, _max, \ 264 length = __batadv_store_uint_attr(buff, count, _min, _max, \
264 _post_func, attr, \ 265 _post_func, attr, \
265 &hard_iface->_var, net_dev); \ 266 &hard_iface->_var, \
267 hard_iface->soft_iface, \
268 net_dev); \
266 \ 269 \
267 batadv_hardif_put(hard_iface); \ 270 batadv_hardif_put(hard_iface); \
268 return length; \ 271 return length; \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
356 359
357static int batadv_store_uint_attr(const char *buff, size_t count, 360static int batadv_store_uint_attr(const char *buff, size_t count,
358 struct net_device *net_dev, 361 struct net_device *net_dev,
362 struct net_device *slave_dev,
359 const char *attr_name, 363 const char *attr_name,
360 unsigned int min, unsigned int max, 364 unsigned int min, unsigned int max,
361 atomic_t *attr) 365 atomic_t *attr)
362{ 366{
367 char ifname[IFNAMSIZ + 3] = "";
363 unsigned long uint_val; 368 unsigned long uint_val;
364 int ret; 369 int ret;
365 370
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
385 if (atomic_read(attr) == uint_val) 390 if (atomic_read(attr) == uint_val)
386 return count; 391 return count;
387 392
388 batadv_info(net_dev, "%s: Changing from: %i to: %lu\n", 393 if (slave_dev)
389 attr_name, atomic_read(attr), uint_val); 394 snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
395
396 batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
397 attr_name, ifname, atomic_read(attr), uint_val);
390 398
391 atomic_set(attr, uint_val); 399 atomic_set(attr, uint_val);
392 return count; 400 return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
397 void (*post_func)(struct net_device *), 405 void (*post_func)(struct net_device *),
398 const struct attribute *attr, 406 const struct attribute *attr,
399 atomic_t *attr_store, 407 atomic_t *attr_store,
400 struct net_device *net_dev) 408 struct net_device *net_dev,
409 struct net_device *slave_dev)
401{ 410{
402 int ret; 411 int ret;
403 412
404 ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max, 413 ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
405 attr_store); 414 attr->name, min, max, attr_store);
406 if (post_func && ret) 415 if (post_func && ret)
407 post_func(net_dev); 416 post_func(net_dev);
408 417
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
571 return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, 580 return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
572 batadv_post_gw_reselect, attr, 581 batadv_post_gw_reselect, attr,
573 &bat_priv->gw.sel_class, 582 &bat_priv->gw.sel_class,
574 bat_priv->soft_iface); 583 bat_priv->soft_iface, NULL);
575} 584}
576 585
577static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, 586static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1090 if (old_tp_override == tp_override) 1099 if (old_tp_override == tp_override)
1091 goto out; 1100 goto out;
1092 1101
1093 batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n", 1102 batadv_info(hard_iface->soft_iface,
1094 "throughput_override", 1103 "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
1104 "throughput_override", net_dev->name,
1095 old_tp_override / 10, old_tp_override % 10, 1105 old_tp_override / 10, old_tp_override % 10,
1096 tp_override / 10, tp_override % 10); 1106 tp_override / 10, tp_override % 10);
1097 1107
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 12a2b7d21376..d21624c44665 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1613{ 1613{
1614 struct batadv_tt_orig_list_entry *orig_entry; 1614 struct batadv_tt_orig_list_entry *orig_entry;
1615 1615
1616 spin_lock_bh(&tt_global->list_lock);
1617
1616 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); 1618 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
1617 if (orig_entry) { 1619 if (orig_entry) {
1618 /* refresh the ttvn: the current value could be a bogus one that 1620 /* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1635 orig_entry->flags = flags; 1637 orig_entry->flags = flags;
1636 kref_init(&orig_entry->refcount); 1638 kref_init(&orig_entry->refcount);
1637 1639
1638 spin_lock_bh(&tt_global->list_lock);
1639 kref_get(&orig_entry->refcount); 1640 kref_get(&orig_entry->refcount);
1640 hlist_add_head_rcu(&orig_entry->list, 1641 hlist_add_head_rcu(&orig_entry->list,
1641 &tt_global->orig_list); 1642 &tt_global->orig_list);
1642 spin_unlock_bh(&tt_global->list_lock);
1643 atomic_inc(&tt_global->orig_list_count); 1643 atomic_inc(&tt_global->orig_list_count);
1644 1644
1645sync_flags: 1645sync_flags:
@@ -1647,6 +1647,8 @@ sync_flags:
1647out: 1647out:
1648 if (orig_entry) 1648 if (orig_entry)
1649 batadv_tt_orig_list_entry_put(orig_entry); 1649 batadv_tt_orig_list_entry_put(orig_entry);
1650
1651 spin_unlock_bh(&tt_global->list_lock);
1650} 1652}
1651 1653
1652/** 1654/**
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index a637458205d1..40e69c9346d2 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
529{ 529{
530 struct batadv_tvlv_handler *tvlv_handler; 530 struct batadv_tvlv_handler *tvlv_handler;
531 531
532 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
533
532 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); 534 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
533 if (tvlv_handler) { 535 if (tvlv_handler) {
536 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
534 batadv_tvlv_handler_put(tvlv_handler); 537 batadv_tvlv_handler_put(tvlv_handler);
535 return; 538 return;
536 } 539 }
537 540
538 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); 541 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
539 if (!tvlv_handler) 542 if (!tvlv_handler) {
543 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
540 return; 544 return;
545 }
541 546
542 tvlv_handler->ogm_handler = optr; 547 tvlv_handler->ogm_handler = optr;
543 tvlv_handler->unicast_handler = uptr; 548 tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
547 kref_init(&tvlv_handler->refcount); 552 kref_init(&tvlv_handler->refcount);
548 INIT_HLIST_NODE(&tvlv_handler->list); 553 INIT_HLIST_NODE(&tvlv_handler->list);
549 554
550 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
551 kref_get(&tvlv_handler->refcount); 555 kref_get(&tvlv_handler->refcount);
552 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); 556 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
553 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); 557 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3bdc8f3ca259..ccce954f8146 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2434 /* LE address type */ 2434 /* LE address type */
2435 addr_type = le_addr_type(cp->addr.type); 2435 addr_type = le_addr_type(cp->addr.type);
2436 2436
2437 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); 2437 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2438 2438 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2439 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2440 if (err < 0) { 2439 if (err < 0) {
2441 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 2440 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 MGMT_STATUS_NOT_PAIRED, &rp, 2441 MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2450 goto done; 2449 goto done;
2451 } 2450 }
2452 2451
2453 /* Abort any ongoing SMP pairing */
2454 smp_cancel_pairing(conn);
2455 2452
2456 /* Defer clearing up the connection parameters until closing to 2453 /* Defer clearing up the connection parameters until closing to
2457 * give a chance of keeping them if a repairing happens. 2454 * give a chance of keeping them if a repairing happens.
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ae91e2d40056..73f7211d0431 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -83,6 +83,7 @@ enum {
83 83
84struct smp_dev { 84struct smp_dev {
85 /* Secure Connections OOB data */ 85 /* Secure Connections OOB data */
86 bool local_oob;
86 u8 local_pk[64]; 87 u8 local_pk[64];
87 u8 local_rand[16]; 88 u8 local_rand[16];
88 bool debug_key; 89 bool debug_key;
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
599 600
600 memcpy(rand, smp->local_rand, 16); 601 memcpy(rand, smp->local_rand, 16);
601 602
603 smp->local_oob = true;
604
602 return 0; 605 return 0;
603} 606}
604 607
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
1785 * successfully received our local OOB data - therefore set the 1788 * successfully received our local OOB data - therefore set the
1786 * flag to indicate that local OOB is in use. 1789 * flag to indicate that local OOB is in use.
1787 */ 1790 */
1788 if (req->oob_flag == SMP_OOB_PRESENT) 1791 if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
1789 set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); 1792 set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
1790 1793
1791 /* SMP over BR/EDR requires special treatment */ 1794 /* SMP over BR/EDR requires special treatment */
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
1967 * successfully received our local OOB data - therefore set the 1970 * successfully received our local OOB data - therefore set the
1968 * flag to indicate that local OOB is in use. 1971 * flag to indicate that local OOB is in use.
1969 */ 1972 */
1970 if (rsp->oob_flag == SMP_OOB_PRESENT) 1973 if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
1971 set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); 1974 set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
1972 1975
1973 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 1976 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -2419,30 +2422,51 @@ unlock:
2419 return ret; 2422 return ret;
2420} 2423}
2421 2424
2422void smp_cancel_pairing(struct hci_conn *hcon) 2425int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
2426 u8 addr_type)
2423{ 2427{
2424 struct l2cap_conn *conn = hcon->l2cap_data; 2428 struct hci_conn *hcon;
2429 struct l2cap_conn *conn;
2425 struct l2cap_chan *chan; 2430 struct l2cap_chan *chan;
2426 struct smp_chan *smp; 2431 struct smp_chan *smp;
2432 int err;
2433
2434 err = hci_remove_ltk(hdev, bdaddr, addr_type);
2435 hci_remove_irk(hdev, bdaddr, addr_type);
2436
2437 hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
2438 if (!hcon)
2439 goto done;
2427 2440
2441 conn = hcon->l2cap_data;
2428 if (!conn) 2442 if (!conn)
2429 return; 2443 goto done;
2430 2444
2431 chan = conn->smp; 2445 chan = conn->smp;
2432 if (!chan) 2446 if (!chan)
2433 return; 2447 goto done;
2434 2448
2435 l2cap_chan_lock(chan); 2449 l2cap_chan_lock(chan);
2436 2450
2437 smp = chan->data; 2451 smp = chan->data;
2438 if (smp) { 2452 if (smp) {
2453 /* Set keys to NULL to make sure smp_failure() does not try to
2454 * remove and free already invalidated rcu list entries. */
2455 smp->ltk = NULL;
2456 smp->slave_ltk = NULL;
2457 smp->remote_irk = NULL;
2458
2439 if (test_bit(SMP_FLAG_COMPLETE, &smp->flags)) 2459 if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
2440 smp_failure(conn, 0); 2460 smp_failure(conn, 0);
2441 else 2461 else
2442 smp_failure(conn, SMP_UNSPECIFIED); 2462 smp_failure(conn, SMP_UNSPECIFIED);
2463 err = 0;
2443 } 2464 }
2444 2465
2445 l2cap_chan_unlock(chan); 2466 l2cap_chan_unlock(chan);
2467
2468done:
2469 return err;
2446} 2470}
2447 2471
2448static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) 2472static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -2697,7 +2721,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
2697 * key was set/generated. 2721 * key was set/generated.
2698 */ 2722 */
2699 if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { 2723 if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
2700 struct smp_dev *smp_dev = chan->data; 2724 struct l2cap_chan *hchan = hdev->smp_data;
2725 struct smp_dev *smp_dev;
2726
2727 if (!hchan || !hchan->data)
2728 return SMP_UNSPECIFIED;
2729
2730 smp_dev = hchan->data;
2701 2731
2702 tfm_ecdh = smp_dev->tfm_ecdh; 2732 tfm_ecdh = smp_dev->tfm_ecdh;
2703 } else { 2733 } else {
@@ -3230,6 +3260,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
3230 return ERR_CAST(tfm_ecdh); 3260 return ERR_CAST(tfm_ecdh);
3231 } 3261 }
3232 3262
3263 smp->local_oob = false;
3233 smp->tfm_aes = tfm_aes; 3264 smp->tfm_aes = tfm_aes;
3234 smp->tfm_cmac = tfm_cmac; 3265 smp->tfm_cmac = tfm_cmac;
3235 smp->tfm_ecdh = tfm_ecdh; 3266 smp->tfm_ecdh = tfm_ecdh;
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 0ff6247eaa6c..121edadd5f8d 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -181,7 +181,8 @@ enum smp_key_pref {
181}; 181};
182 182
183/* SMP Commands */ 183/* SMP Commands */
184void smp_cancel_pairing(struct hci_conn *hcon); 184int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
185 u8 addr_type);
185bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, 186bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
186 enum smp_key_pref key_pref); 187 enum smp_key_pref key_pref);
187int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); 188int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 6e0dc6bcd32a..37278dc280eb 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
835 struct sk_buff *skb, 835 struct sk_buff *skb,
836 const struct nf_hook_state *state) 836 const struct nf_hook_state *state)
837{ 837{
838 if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) { 838 if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
839 !netif_is_l3_master(skb->dev)) {
839 state->okfn(state->net, state->sk, skb); 840 state->okfn(state->net, state->sk, skb);
840 return NF_STOLEN; 841 return NF_STOLEN;
841 } 842 }
diff --git a/net/core/dev.c b/net/core/dev.c
index 325fc5088370..82114e1111e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -93,7 +93,6 @@
93#include <linux/netdevice.h> 93#include <linux/netdevice.h>
94#include <linux/etherdevice.h> 94#include <linux/etherdevice.h>
95#include <linux/ethtool.h> 95#include <linux/ethtool.h>
96#include <linux/notifier.h>
97#include <linux/skbuff.h> 96#include <linux/skbuff.h>
98#include <linux/bpf.h> 97#include <linux/bpf.h>
99#include <linux/bpf_trace.h> 98#include <linux/bpf_trace.h>
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 65fc366a78a4..8c0ed225e280 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2592,7 +2592,7 @@ send_done:
2592 if (!nlh) { 2592 if (!nlh) {
2593 err = devlink_dpipe_send_and_alloc_skb(&skb, info); 2593 err = devlink_dpipe_send_and_alloc_skb(&skb, info);
2594 if (err) 2594 if (err)
2595 goto err_skb_send_alloc; 2595 return err;
2596 goto send_done; 2596 goto send_done;
2597 } 2597 }
2598 return genlmsg_reply(skb, info); 2598 return genlmsg_reply(skb, info);
@@ -2600,7 +2600,6 @@ send_done:
2600nla_put_failure: 2600nla_put_failure:
2601 err = -EMSGSIZE; 2601 err = -EMSGSIZE;
2602err_resource_put: 2602err_resource_put:
2603err_skb_send_alloc:
2604 nlmsg_free(skb); 2603 nlmsg_free(skb);
2605 return err; 2604 return err;
2606} 2605}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c9993c6c2fd4..0762aaf8e964 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1483static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1483static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1484{ 1484{
1485 struct ethtool_wolinfo wol; 1485 struct ethtool_wolinfo wol;
1486 int ret;
1486 1487
1487 if (!dev->ethtool_ops->set_wol) 1488 if (!dev->ethtool_ops->set_wol)
1488 return -EOPNOTSUPP; 1489 return -EOPNOTSUPP;
@@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1490 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1491 if (copy_from_user(&wol, useraddr, sizeof(wol)))
1491 return -EFAULT; 1492 return -EFAULT;
1492 1493
1493 return dev->ethtool_ops->set_wol(dev, &wol); 1494 ret = dev->ethtool_ops->set_wol(dev, &wol);
1495 if (ret)
1496 return ret;
1497
1498 dev->wol_enabled = !!wol.wolopts;
1499
1500 return 0;
1494} 1501}
1495 1502
1496static int ethtool_get_eee(struct net_device *dev, char __user *useraddr) 1503static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
@@ -2624,6 +2631,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2624 case ETHTOOL_GPHYSTATS: 2631 case ETHTOOL_GPHYSTATS:
2625 case ETHTOOL_GTSO: 2632 case ETHTOOL_GTSO:
2626 case ETHTOOL_GPERMADDR: 2633 case ETHTOOL_GPERMADDR:
2634 case ETHTOOL_GUFO:
2627 case ETHTOOL_GGSO: 2635 case ETHTOOL_GGSO:
2628 case ETHTOOL_GGRO: 2636 case ETHTOOL_GGRO:
2629 case ETHTOOL_GFLAGS: 2637 case ETHTOOL_GFLAGS:
diff --git a/net/core/filter.c b/net/core/filter.c
index c25eb36f1320..5e00f2b85a56 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2282 .arg2_type = ARG_ANYTHING, 2282 .arg2_type = ARG_ANYTHING,
2283}; 2283};
2284 2284
2285#define sk_msg_iter_var(var) \
2286 do { \
2287 var++; \
2288 if (var == MAX_SKB_FRAGS) \
2289 var = 0; \
2290 } while (0)
2291
2285BPF_CALL_4(bpf_msg_pull_data, 2292BPF_CALL_4(bpf_msg_pull_data,
2286 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags) 2293 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
2287{ 2294{
2288 unsigned int len = 0, offset = 0, copy = 0; 2295 unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
2296 int bytes = end - start, bytes_sg_total;
2289 struct scatterlist *sg = msg->sg_data; 2297 struct scatterlist *sg = msg->sg_data;
2290 int first_sg, last_sg, i, shift; 2298 int first_sg, last_sg, i, shift;
2291 unsigned char *p, *to, *from; 2299 unsigned char *p, *to, *from;
2292 int bytes = end - start;
2293 struct page *page; 2300 struct page *page;
2294 2301
2295 if (unlikely(flags || end <= start)) 2302 if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
2299 i = msg->sg_start; 2306 i = msg->sg_start;
2300 do { 2307 do {
2301 len = sg[i].length; 2308 len = sg[i].length;
2302 offset += len;
2303 if (start < offset + len) 2309 if (start < offset + len)
2304 break; 2310 break;
2305 i++; 2311 offset += len;
2306 if (i == MAX_SKB_FRAGS) 2312 sk_msg_iter_var(i);
2307 i = 0;
2308 } while (i != msg->sg_end); 2313 } while (i != msg->sg_end);
2309 2314
2310 if (unlikely(start >= offset + len)) 2315 if (unlikely(start >= offset + len))
2311 return -EINVAL; 2316 return -EINVAL;
2312 2317
2313 if (!msg->sg_copy[i] && bytes <= len)
2314 goto out;
2315
2316 first_sg = i; 2318 first_sg = i;
2319 /* The start may point into the sg element so we need to also
2320 * account for the headroom.
2321 */
2322 bytes_sg_total = start - offset + bytes;
2323 if (!msg->sg_copy[i] && bytes_sg_total <= len)
2324 goto out;
2317 2325
2318 /* At this point we need to linearize multiple scatterlist 2326 /* At this point we need to linearize multiple scatterlist
2319 * elements or a single shared page. Either way we need to 2327 * elements or a single shared page. Either way we need to
@@ -2327,37 +2335,33 @@ BPF_CALL_4(bpf_msg_pull_data,
2327 */ 2335 */
2328 do { 2336 do {
2329 copy += sg[i].length; 2337 copy += sg[i].length;
2330 i++; 2338 sk_msg_iter_var(i);
2331 if (i == MAX_SKB_FRAGS) 2339 if (bytes_sg_total <= copy)
2332 i = 0;
2333 if (bytes < copy)
2334 break; 2340 break;
2335 } while (i != msg->sg_end); 2341 } while (i != msg->sg_end);
2336 last_sg = i; 2342 last_sg = i;
2337 2343
2338 if (unlikely(copy < end - start)) 2344 if (unlikely(bytes_sg_total > copy))
2339 return -EINVAL; 2345 return -EINVAL;
2340 2346
2341 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy)); 2347 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2348 get_order(copy));
2342 if (unlikely(!page)) 2349 if (unlikely(!page))
2343 return -ENOMEM; 2350 return -ENOMEM;
2344 p = page_address(page); 2351 p = page_address(page);
2345 offset = 0;
2346 2352
2347 i = first_sg; 2353 i = first_sg;
2348 do { 2354 do {
2349 from = sg_virt(&sg[i]); 2355 from = sg_virt(&sg[i]);
2350 len = sg[i].length; 2356 len = sg[i].length;
2351 to = p + offset; 2357 to = p + poffset;
2352 2358
2353 memcpy(to, from, len); 2359 memcpy(to, from, len);
2354 offset += len; 2360 poffset += len;
2355 sg[i].length = 0; 2361 sg[i].length = 0;
2356 put_page(sg_page(&sg[i])); 2362 put_page(sg_page(&sg[i]));
2357 2363
2358 i++; 2364 sk_msg_iter_var(i);
2359 if (i == MAX_SKB_FRAGS)
2360 i = 0;
2361 } while (i != last_sg); 2365 } while (i != last_sg);
2362 2366
2363 sg[first_sg].length = copy; 2367 sg[first_sg].length = copy;
@@ -2367,11 +2371,15 @@ BPF_CALL_4(bpf_msg_pull_data,
2367 * had a single entry though we can just replace it and 2371 * had a single entry though we can just replace it and
2368 * be done. Otherwise walk the ring and shift the entries. 2372 * be done. Otherwise walk the ring and shift the entries.
2369 */ 2373 */
2370 shift = last_sg - first_sg - 1; 2374 WARN_ON_ONCE(last_sg == first_sg);
2375 shift = last_sg > first_sg ?
2376 last_sg - first_sg - 1 :
2377 MAX_SKB_FRAGS - first_sg + last_sg - 1;
2371 if (!shift) 2378 if (!shift)
2372 goto out; 2379 goto out;
2373 2380
2374 i = first_sg + 1; 2381 i = first_sg;
2382 sk_msg_iter_var(i);
2375 do { 2383 do {
2376 int move_from; 2384 int move_from;
2377 2385
@@ -2388,15 +2396,13 @@ BPF_CALL_4(bpf_msg_pull_data,
2388 sg[move_from].page_link = 0; 2396 sg[move_from].page_link = 0;
2389 sg[move_from].offset = 0; 2397 sg[move_from].offset = 0;
2390 2398
2391 i++; 2399 sk_msg_iter_var(i);
2392 if (i == MAX_SKB_FRAGS)
2393 i = 0;
2394 } while (1); 2400 } while (1);
2395 msg->sg_end -= shift; 2401 msg->sg_end -= shift;
2396 if (msg->sg_end < 0) 2402 if (msg->sg_end < 0)
2397 msg->sg_end += MAX_SKB_FRAGS; 2403 msg->sg_end += MAX_SKB_FRAGS;
2398out: 2404out:
2399 msg->data = sg_virt(&sg[i]) + start - offset; 2405 msg->data = sg_virt(&sg[first_sg]) + start - offset;
2400 msg->data_end = msg->data + bytes; 2406 msg->data_end = msg->data + bytes;
2401 2407
2402 return 0; 2408 return 0;
@@ -7281,7 +7287,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
7281 break; 7287 break;
7282 7288
7283 case offsetof(struct sk_reuseport_md, ip_protocol): 7289 case offsetof(struct sk_reuseport_md, ip_protocol):
7284 BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE); 7290 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
7285 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, 7291 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
7286 BPF_W, 0); 7292 BPF_W, 0);
7287 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); 7293 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index aa19d86937af..91592fceeaad 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1180 lladdr = neigh->ha; 1180 lladdr = neigh->ha;
1181 } 1181 }
1182 1182
1183 /* Update confirmed timestamp for neighbour entry after we
1184 * received ARP packet even if it doesn't change IP to MAC binding.
1185 */
1186 if (new & NUD_CONNECTED)
1187 neigh->confirmed = jiffies;
1188
1183 /* If entry was valid and address is not changed, 1189 /* If entry was valid and address is not changed,
1184 do not change entry state, if new one is STALE. 1190 do not change entry state, if new one is STALE.
1185 */ 1191 */
@@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1201 } 1207 }
1202 } 1208 }
1203 1209
1204 /* Update timestamps only once we know we will make a change to the 1210 /* Update timestamp only once we know we will make a change to the
1205 * neighbour entry. Otherwise we risk to move the locktime window with 1211 * neighbour entry. Otherwise we risk to move the locktime window with
1206 * noop updates and ignore relevant ARP updates. 1212 * noop updates and ignore relevant ARP updates.
1207 */ 1213 */
1208 if (new != old || lladdr != neigh->ha) { 1214 if (new != old || lladdr != neigh->ha)
1209 if (new & NUD_CONNECTED)
1210 neigh->confirmed = jiffies;
1211 neigh->updated = jiffies; 1215 neigh->updated = jiffies;
1212 }
1213 1216
1214 if (new != old) { 1217 if (new != old) {
1215 neigh_del_timer(neigh); 1218 neigh_del_timer(neigh);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 57557a6a950c..de1d1ba92f2d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
135 } 135 }
136} 136}
137 137
138/*
139 * Check whether delayed processing was scheduled for our NIC. If so,
140 * we attempt to grab the poll lock and use ->poll() to pump the card.
141 * If this fails, either we've recursed in ->poll() or it's already
142 * running on another CPU.
143 *
144 * Note: we don't mask interrupts with this lock because we're using
145 * trylock here and interrupts are already disabled in the softirq
146 * case. Further, we test the poll_owner to avoid recursion on UP
147 * systems where the lock doesn't exist.
148 */
149static void poll_one_napi(struct napi_struct *napi) 138static void poll_one_napi(struct napi_struct *napi)
150{ 139{
151 int work = 0; 140 int work;
152
153 /* net_rx_action's ->poll() invocations and our's are
154 * synchronized by this test which is only made while
155 * holding the napi->poll_lock.
156 */
157 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158 return;
159 141
160 /* If we set this bit but see that it has already been set, 142 /* If we set this bit but see that it has already been set,
161 * that indicates that napi has been disabled and we need 143 * that indicates that napi has been disabled and we need
@@ -187,16 +169,16 @@ static void poll_napi(struct net_device *dev)
187 } 169 }
188} 170}
189 171
190static void netpoll_poll_dev(struct net_device *dev) 172void netpoll_poll_dev(struct net_device *dev)
191{ 173{
192 const struct net_device_ops *ops;
193 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); 174 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
175 const struct net_device_ops *ops;
194 176
195 /* Don't do any rx activity if the dev_lock mutex is held 177 /* Don't do any rx activity if the dev_lock mutex is held
196 * the dev_open/close paths use this to block netpoll activity 178 * the dev_open/close paths use this to block netpoll activity
197 * while changing device state 179 * while changing device state
198 */ 180 */
199 if (down_trylock(&ni->dev_lock)) 181 if (!ni || down_trylock(&ni->dev_lock))
200 return; 182 return;
201 183
202 if (!netif_running(dev)) { 184 if (!netif_running(dev)) {
@@ -205,13 +187,8 @@ static void netpoll_poll_dev(struct net_device *dev)
205 } 187 }
206 188
207 ops = dev->netdev_ops; 189 ops = dev->netdev_ops;
208 if (!ops->ndo_poll_controller) { 190 if (ops->ndo_poll_controller)
209 up(&ni->dev_lock); 191 ops->ndo_poll_controller(dev);
210 return;
211 }
212
213 /* Process pending work on NIC */
214 ops->ndo_poll_controller(dev);
215 192
216 poll_napi(dev); 193 poll_napi(dev);
217 194
@@ -219,6 +196,7 @@ static void netpoll_poll_dev(struct net_device *dev)
219 196
220 zap_completion_queue(); 197 zap_completion_queue();
221} 198}
199EXPORT_SYMBOL(netpoll_poll_dev);
222 200
223void netpoll_poll_disable(struct net_device *dev) 201void netpoll_poll_disable(struct net_device *dev)
224{ 202{
@@ -334,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
334 /* It is up to the caller to keep npinfo alive. */ 312 /* It is up to the caller to keep npinfo alive. */
335 struct netpoll_info *npinfo; 313 struct netpoll_info *npinfo;
336 314
315 rcu_read_lock_bh();
337 lockdep_assert_irqs_disabled(); 316 lockdep_assert_irqs_disabled();
338 317
339 npinfo = rcu_dereference_bh(np->dev->npinfo); 318 npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -378,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
378 skb_queue_tail(&npinfo->txq, skb); 357 skb_queue_tail(&npinfo->txq, skb);
379 schedule_delayed_work(&npinfo->tx_work,0); 358 schedule_delayed_work(&npinfo->tx_work,0);
380 } 359 }
360 rcu_read_unlock_bh();
381} 361}
382EXPORT_SYMBOL(netpoll_send_skb_on_dev); 362EXPORT_SYMBOL(netpoll_send_skb_on_dev);
383 363
@@ -613,8 +593,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ); 593 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
614 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); 594 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
615 595
616 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 596 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
617 !ndev->netdev_ops->ndo_poll_controller) {
618 np_err(np, "%s doesn't support polling, aborting\n", 597 np_err(np, "%s doesn't support polling, aborting\n",
619 np->dev_name); 598 np->dev_name);
620 err = -ENOTSUPP; 599 err = -ENOTSUPP;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 24431e578310..448703312fed 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
324 324
325 rtnl_lock(); 325 rtnl_lock();
326 tab = rtnl_msg_handlers[protocol]; 326 tab = rtnl_msg_handlers[protocol];
327 if (!tab) {
328 rtnl_unlock();
329 return;
330 }
327 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); 331 RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
328 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) { 332 for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
329 link = tab[msgindex]; 333 link = tab[msgindex];
@@ -1894,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1894 if (tb[IFLA_IF_NETNSID]) { 1898 if (tb[IFLA_IF_NETNSID]) {
1895 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); 1899 netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
1896 tgt_net = get_target_net(skb->sk, netnsid); 1900 tgt_net = get_target_net(skb->sk, netnsid);
1897 if (IS_ERR(tgt_net)) { 1901 if (IS_ERR(tgt_net))
1898 tgt_net = net; 1902 return PTR_ERR(tgt_net);
1899 netnsid = -1;
1900 }
1901 } 1903 }
1902 1904
1903 if (tb[IFLA_EXT_MASK]) 1905 if (tb[IFLA_EXT_MASK])
@@ -2806,7 +2808,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2806 } 2808 }
2807 2809
2808 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { 2810 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2809 __dev_notify_flags(dev, old_flags, 0U); 2811 __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
2810 } else { 2812 } else {
2811 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2813 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2812 __dev_notify_flags(dev, old_flags, ~0U); 2814 __dev_notify_flags(dev, old_flags, ~0U);
@@ -2833,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
2833 else if (ops->get_num_rx_queues) 2835 else if (ops->get_num_rx_queues)
2834 num_rx_queues = ops->get_num_rx_queues(); 2836 num_rx_queues = ops->get_num_rx_queues();
2835 2837
2838 if (num_tx_queues < 1 || num_tx_queues > 4096)
2839 return ERR_PTR(-EINVAL);
2840
2841 if (num_rx_queues < 1 || num_rx_queues > 4096)
2842 return ERR_PTR(-EINVAL);
2843
2836 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, 2844 dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
2837 ops->setup, num_tx_queues, num_rx_queues); 2845 ops->setup, num_tx_queues, num_rx_queues);
2838 if (!dev) 2846 if (!dev)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c996c09d095f..b2c807f67aba 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
939 939
940 WARN_ON_ONCE(!in_task()); 940 WARN_ON_ONCE(!in_task());
941 941
942 if (!sock_flag(sk, SOCK_ZEROCOPY))
943 return NULL;
944
945 skb = sock_omalloc(sk, 0, GFP_KERNEL); 942 skb = sock_omalloc(sk, 0, GFP_KERNEL);
946 if (!skb) 943 if (!skb)
947 return NULL; 944 return NULL;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index d28d46bff6ab..85d6c879383d 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
606 if (sk->sk_state == DCCP_LISTEN) { 606 if (sk->sk_state == DCCP_LISTEN) {
607 if (dh->dccph_type == DCCP_PKT_REQUEST) { 607 if (dh->dccph_type == DCCP_PKT_REQUEST) {
608 /* It is possible that we process SYN packets from backlog, 608 /* It is possible that we process SYN packets from backlog,
609 * so we need to make sure to disable BH right there. 609 * so we need to make sure to disable BH and RCU right there.
610 */ 610 */
611 rcu_read_lock();
611 local_bh_disable(); 612 local_bh_disable();
612 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; 613 acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
613 local_bh_enable(); 614 local_bh_enable();
615 rcu_read_unlock();
614 if (!acceptable) 616 if (!acceptable)
615 return 1; 617 return 1;
616 consume_skb(skb); 618 consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b08feb219b44..8e08cea6f178 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
493 493
494 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr, 494 dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
495 ireq->ir_rmt_addr); 495 ireq->ir_rmt_addr);
496 rcu_read_lock();
496 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 497 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
497 ireq->ir_rmt_addr, 498 ireq->ir_rmt_addr,
498 ireq_opt_deref(ireq)); 499 rcu_dereference(ireq->ireq_opt));
500 rcu_read_unlock();
499 err = net_xmit_eval(err); 501 err = net_xmit_eval(err);
500 } 502 }
501 503
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index e63c554e0623..9f3209ff7ffd 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -19,12 +19,10 @@
19#include <linux/of_mdio.h> 19#include <linux/of_mdio.h>
20#include <linux/of_platform.h> 20#include <linux/of_platform.h>
21#include <linux/of_net.h> 21#include <linux/of_net.h>
22#include <linux/of_gpio.h>
23#include <linux/netdevice.h> 22#include <linux/netdevice.h>
24#include <linux/sysfs.h> 23#include <linux/sysfs.h>
25#include <linux/phy_fixed.h> 24#include <linux/phy_fixed.h>
26#include <linux/ptp_classify.h> 25#include <linux/ptp_classify.h>
27#include <linux/gpio/consumer.h>
28#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
29 27
30#include "dsa_priv.h" 28#include "dsa_priv.h"
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 962c4fd338ba..1c45c1d6d241 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
767 const struct tc_action *a; 767 const struct tc_action *a;
768 struct dsa_port *to_dp; 768 struct dsa_port *to_dp;
769 int err = -EOPNOTSUPP; 769 int err = -EOPNOTSUPP;
770 LIST_HEAD(actions);
771 770
772 if (!ds->ops->port_mirror_add) 771 if (!ds->ops->port_mirror_add)
773 return err; 772 return err;
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
775 if (!tcf_exts_has_one_action(cls->exts)) 774 if (!tcf_exts_has_one_action(cls->exts))
776 return err; 775 return err;
777 776
778 tcf_exts_to_list(cls->exts, &actions); 777 a = tcf_exts_first_action(cls->exts);
779 a = list_first_entry(&actions, struct tc_action, list);
780 778
781 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { 779 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
782 struct dsa_mall_mirror_tc_entry *mirror; 780 struct dsa_mall_mirror_tc_entry *mirror;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 20fda8fb8ffd..1fbe2f815474 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1377 if (encap) 1377 if (encap)
1378 skb_reset_inner_headers(skb); 1378 skb_reset_inner_headers(skb);
1379 skb->network_header = (u8 *)iph - skb->head; 1379 skb->network_header = (u8 *)iph - skb->head;
1380 skb_reset_mac_len(skb);
1380 } while ((skb = skb->next)); 1381 } while ((skb = skb->next));
1381 1382
1382out: 1383out:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index cf75f8944b05..4da39446da2d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
820 spin_lock(&im->lock); 820 spin_lock(&im->lock);
821 im->tm_running = 0; 821 im->tm_running = 0;
822 822
823 if (im->unsolicit_count) { 823 if (im->unsolicit_count && --im->unsolicit_count)
824 im->unsolicit_count--;
825 igmp_start_timer(im, unsolicited_report_interval(in_dev)); 824 igmp_start_timer(im, unsolicited_report_interval(in_dev));
826 } 825
827 im->reporter = 1; 826 im->reporter = 1;
828 spin_unlock(&im->lock); 827 spin_unlock(&im->lock);
829 828
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
1308 1307
1309 if (in_dev->dead) 1308 if (in_dev->dead)
1310 return; 1309 return;
1310
1311 im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
1311 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { 1312 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1312 spin_lock_bh(&im->lock); 1313 spin_lock_bh(&im->lock);
1313 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); 1314 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1391 unsigned int mode) 1392 unsigned int mode)
1392{ 1393{
1393 struct ip_mc_list *im; 1394 struct ip_mc_list *im;
1394#ifdef CONFIG_IP_MULTICAST
1395 struct net *net = dev_net(in_dev->dev);
1396#endif
1397 1395
1398 ASSERT_RTNL(); 1396 ASSERT_RTNL();
1399 1397
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1420 spin_lock_init(&im->lock); 1418 spin_lock_init(&im->lock);
1421#ifdef CONFIG_IP_MULTICAST 1419#ifdef CONFIG_IP_MULTICAST
1422 timer_setup(&im->timer, igmp_timer_expire, 0); 1420 timer_setup(&im->timer, igmp_timer_expire, 0);
1423 im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
1424#endif 1421#endif
1425 1422
1426 im->next_rcu = in_dev->mc_list; 1423 im->next_rcu = in_dev->mc_list;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index dfd5009f96ef..15e7f7915a21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
544 struct ip_options_rcu *opt; 544 struct ip_options_rcu *opt;
545 struct rtable *rt; 545 struct rtable *rt;
546 546
547 opt = ireq_opt_deref(ireq); 547 rcu_read_lock();
548 opt = rcu_dereference(ireq->ireq_opt);
548 549
549 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, 550 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
550 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 551 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
558 goto no_route; 559 goto no_route;
559 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) 560 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
560 goto route_err; 561 goto route_err;
562 rcu_read_unlock();
561 return &rt->dst; 563 return &rt->dst;
562 564
563route_err: 565route_err:
564 ip_rt_put(rt); 566 ip_rt_put(rt);
565no_route: 567no_route:
568 rcu_read_unlock();
566 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); 569 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
567 return NULL; 570 return NULL;
568} 571}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 88281fbce88c..e7227128df2c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
599 nextp = &fp->next; 599 nextp = &fp->next;
600 fp->prev = NULL; 600 fp->prev = NULL;
601 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 601 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
602 fp->sk = NULL;
602 head->data_len += fp->len; 603 head->data_len += fp->len;
603 head->len += fp->len; 604 head->len += fp->len;
604 if (head->ip_summed != fp->ip_summed) 605 if (head->ip_summed != fp->ip_summed)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 51a5d06085ac..8cce0e9ea08c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
178 178
179 if (tpi->proto == htons(ETH_P_TEB)) 179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id); 180 itn = net_generic(net, gre_tap_net_id);
181 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
182 tpi->proto == htons(ETH_P_ERSPAN2))
183 itn = net_generic(net, erspan_net_id);
181 else 184 else
182 itn = net_generic(net, ipgre_net_id); 185 itn = net_generic(net, ipgre_net_id);
183 186
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
328 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); 331 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
329 return PACKET_RCVD; 332 return PACKET_RCVD;
330 } 333 }
334 return PACKET_REJECT;
335
331drop: 336drop:
332 kfree_skb(skb); 337 kfree_skb(skb);
333 return PACKET_RCVD; 338 return PACKET_RCVD;
@@ -1508,11 +1513,14 @@ nla_put_failure:
1508 1513
1509static void erspan_setup(struct net_device *dev) 1514static void erspan_setup(struct net_device *dev)
1510{ 1515{
1516 struct ip_tunnel *t = netdev_priv(dev);
1517
1511 ether_setup(dev); 1518 ether_setup(dev);
1512 dev->netdev_ops = &erspan_netdev_ops; 1519 dev->netdev_ops = &erspan_netdev_ops;
1513 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1520 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1514 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1521 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1515 ip_tunnel_setup(dev, erspan_net_id); 1522 ip_tunnel_setup(dev, erspan_net_id);
1523 t->erspan_ver = 1;
1516} 1524}
1517 1525
1518static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1526static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c0fe5ad996f2..26c36cccabdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 struct sockaddr_in sin; 151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports; 152 __be16 *ports;
154 int end; 153 int end;
155 154
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
164 ports = (__be16 *)skb_transport_header(skb); 163 ports = (__be16 *)skb_transport_header(skb);
165 164
166 sin.sin_family = AF_INET; 165 sin.sin_family = AF_INET;
167 sin.sin_addr.s_addr = iph->daddr; 166 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
168 sin.sin_port = ports[1]; 167 sin.sin_port = ports[1];
169 memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); 168 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
170 169
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..284a22154b4e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
627 const struct iphdr *tnl_params, u8 protocol) 627 const struct iphdr *tnl_params, u8 protocol)
628{ 628{
629 struct ip_tunnel *tunnel = netdev_priv(dev); 629 struct ip_tunnel *tunnel = netdev_priv(dev);
630 unsigned int inner_nhdr_len = 0;
630 const struct iphdr *inner_iph; 631 const struct iphdr *inner_iph;
631 struct flowi4 fl4; 632 struct flowi4 fl4;
632 u8 tos, ttl; 633 u8 tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
636 __be32 dst; 637 __be32 dst;
637 bool connected; 638 bool connected;
638 639
640 /* ensure we can access the inner net header, for several users below */
641 if (skb->protocol == htons(ETH_P_IP))
642 inner_nhdr_len = sizeof(struct iphdr);
643 else if (skb->protocol == htons(ETH_P_IPV6))
644 inner_nhdr_len = sizeof(struct ipv6hdr);
645 if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
646 goto tx_error;
647
639 inner_iph = (const struct iphdr *)skb_inner_network_header(skb); 648 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
640 connected = (tunnel->parms.iph.daddr != 0); 649 connected = (tunnel->parms.iph.daddr != 0);
641 650
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d9504adc47b3..184bf2e0a1ed 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -106,6 +106,10 @@ config NF_NAT_IPV4
106 106
107if NF_NAT_IPV4 107if NF_NAT_IPV4
108 108
109config NF_NAT_MASQUERADE_IPV4
110 bool
111
112if NF_TABLES
109config NFT_CHAIN_NAT_IPV4 113config NFT_CHAIN_NAT_IPV4
110 depends on NF_TABLES_IPV4 114 depends on NF_TABLES_IPV4
111 tristate "IPv4 nf_tables nat chain support" 115 tristate "IPv4 nf_tables nat chain support"
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
115 packet transformations such as the source, destination address and 119 packet transformations such as the source, destination address and
116 source and destination ports. 120 source and destination ports.
117 121
118config NF_NAT_MASQUERADE_IPV4
119 bool
120
121config NFT_MASQ_IPV4 122config NFT_MASQ_IPV4
122 tristate "IPv4 masquerading support for nf_tables" 123 tristate "IPv4 masquerading support for nf_tables"
123 depends on NF_TABLES_IPV4 124 depends on NF_TABLES_IPV4
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
135 help 136 help
136 This is the expression that provides IPv4 redirect support for 137 This is the expression that provides IPv4 redirect support for
137 nf_tables. 138 nf_tables.
139endif # NF_TABLES
138 140
139config NF_NAT_SNMP_BASIC 141config NF_NAT_SNMP_BASIC
140 tristate "Basic SNMP-ALG support" 142 tristate "Basic SNMP-ALG support"
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b92f422f2fa8..891ed2f91467 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
48static int ip_ping_group_range_min[] = { 0, 0 }; 48static int ip_ping_group_range_min[] = { 0, 0 };
49static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 49static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
50static int comp_sack_nr_max = 255; 50static int comp_sack_nr_max = 255;
51static u32 u32_max_div_HZ = UINT_MAX / HZ;
51 52
52/* obsolete */ 53/* obsolete */
53static int sysctl_tcp_low_latency __read_mostly; 54static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
745 { 746 {
746 .procname = "tcp_probe_interval", 747 .procname = "tcp_probe_interval",
747 .data = &init_net.ipv4.sysctl_tcp_probe_interval, 748 .data = &init_net.ipv4.sysctl_tcp_probe_interval,
748 .maxlen = sizeof(int), 749 .maxlen = sizeof(u32),
749 .mode = 0644, 750 .mode = 0644,
750 .proc_handler = proc_dointvec, 751 .proc_handler = proc_douintvec_minmax,
752 .extra2 = &u32_max_div_HZ,
751 }, 753 },
752 { 754 {
753 .procname = "igmp_link_local_mcast_reports", 755 .procname = "igmp_link_local_mcast_reports",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b8af2fec5ad5..10c6246396cc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1185 1185
1186 flags = msg->msg_flags; 1186 flags = msg->msg_flags;
1187 1187
1188 if (flags & MSG_ZEROCOPY && size) { 1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1189 if (sk->sk_state != TCP_ESTABLISHED) { 1189 if (sk->sk_state != TCP_ESTABLISHED) {
1190 err = -EINVAL; 1190 err = -EINVAL;
1191 goto out_err; 1191 goto out_err;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 13d34427ca3d..02ff2dde9609 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -95,11 +95,10 @@ struct bbr {
95 u32 mode:3, /* current bbr_mode in state machine */ 95 u32 mode:3, /* current bbr_mode in state machine */
96 prev_ca_state:3, /* CA state on previous ACK */ 96 prev_ca_state:3, /* CA state on previous ACK */
97 packet_conservation:1, /* use packet conservation? */ 97 packet_conservation:1, /* use packet conservation? */
98 restore_cwnd:1, /* decided to revert cwnd to old value */
99 round_start:1, /* start of packet-timed tx->ack round? */ 98 round_start:1, /* start of packet-timed tx->ack round? */
100 idle_restart:1, /* restarting after idle? */ 99 idle_restart:1, /* restarting after idle? */
101 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ 100 probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
102 unused:12, 101 unused:13,
103 lt_is_sampling:1, /* taking long-term ("LT") samples now? */ 102 lt_is_sampling:1, /* taking long-term ("LT") samples now? */
104 lt_rtt_cnt:7, /* round trips in long-term interval */ 103 lt_rtt_cnt:7, /* round trips in long-term interval */
105 lt_use_bw:1; /* use lt_bw as our bw estimate? */ 104 lt_use_bw:1; /* use lt_bw as our bw estimate? */
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
175/* If we estimate we're policed, use lt_bw for this many round trips: */ 174/* If we estimate we're policed, use lt_bw for this many round trips: */
176static const u32 bbr_lt_bw_max_rtts = 48; 175static const u32 bbr_lt_bw_max_rtts = 48;
177 176
177static void bbr_check_probe_rtt_done(struct sock *sk);
178
178/* Do we estimate that STARTUP filled the pipe? */ 179/* Do we estimate that STARTUP filled the pipe? */
179static bool bbr_full_bw_reached(const struct sock *sk) 180static bool bbr_full_bw_reached(const struct sock *sk)
180{ 181{
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
309 */ 310 */
310 if (bbr->mode == BBR_PROBE_BW) 311 if (bbr->mode == BBR_PROBE_BW)
311 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); 312 bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
313 else if (bbr->mode == BBR_PROBE_RTT)
314 bbr_check_probe_rtt_done(sk);
312 } 315 }
313} 316}
314 317
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
396 cwnd = tcp_packets_in_flight(tp) + acked; 399 cwnd = tcp_packets_in_flight(tp) + acked;
397 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { 400 } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
398 /* Exiting loss recovery; restore cwnd saved before recovery. */ 401 /* Exiting loss recovery; restore cwnd saved before recovery. */
399 bbr->restore_cwnd = 1; 402 cwnd = max(cwnd, bbr->prior_cwnd);
400 bbr->packet_conservation = 0; 403 bbr->packet_conservation = 0;
401 } 404 }
402 bbr->prev_ca_state = state; 405 bbr->prev_ca_state = state;
403 406
404 if (bbr->restore_cwnd) {
405 /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
406 cwnd = max(cwnd, bbr->prior_cwnd);
407 bbr->restore_cwnd = 0;
408 }
409
410 if (bbr->packet_conservation) { 407 if (bbr->packet_conservation) {
411 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); 408 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
412 return true; /* yes, using packet conservation */ 409 return true; /* yes, using packet conservation */
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
423{ 420{
424 struct tcp_sock *tp = tcp_sk(sk); 421 struct tcp_sock *tp = tcp_sk(sk);
425 struct bbr *bbr = inet_csk_ca(sk); 422 struct bbr *bbr = inet_csk_ca(sk);
426 u32 cwnd = 0, target_cwnd = 0; 423 u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
427 424
428 if (!acked) 425 if (!acked)
429 return; 426 goto done; /* no packet fully ACKed; just apply caps */
430 427
431 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) 428 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
432 goto done; 429 goto done;
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
748 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ 745 bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
749} 746}
750 747
748static void bbr_check_probe_rtt_done(struct sock *sk)
749{
750 struct tcp_sock *tp = tcp_sk(sk);
751 struct bbr *bbr = inet_csk_ca(sk);
752
753 if (!(bbr->probe_rtt_done_stamp &&
754 after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
755 return;
756
757 bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
758 tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
759 bbr_reset_mode(sk);
760}
761
751/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and 762/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
752 * periodically drain the bottleneck queue, to converge to measure the true 763 * periodically drain the bottleneck queue, to converge to measure the true
753 * min_rtt (unloaded propagation delay). This allows the flows to keep queues 764 * min_rtt (unloaded propagation delay). This allows the flows to keep queues
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
806 } else if (bbr->probe_rtt_done_stamp) { 817 } else if (bbr->probe_rtt_done_stamp) {
807 if (bbr->round_start) 818 if (bbr->round_start)
808 bbr->probe_rtt_round_done = 1; 819 bbr->probe_rtt_round_done = 1;
809 if (bbr->probe_rtt_round_done && 820 if (bbr->probe_rtt_round_done)
810 after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { 821 bbr_check_probe_rtt_done(sk);
811 bbr->min_rtt_stamp = tcp_jiffies32;
812 bbr->restore_cwnd = 1; /* snap to prior_cwnd */
813 bbr_reset_mode(sk);
814 }
815 } 822 }
816 } 823 }
817 /* Restart after idle ends only once we process a new S/ACK for data */ 824 /* Restart after idle ends only once we process a new S/ACK for data */
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
862 bbr->has_seen_rtt = 0; 869 bbr->has_seen_rtt = 0;
863 bbr_init_pacing_rate_from_rtt(sk); 870 bbr_init_pacing_rate_from_rtt(sk);
864 871
865 bbr->restore_cwnd = 0;
866 bbr->round_start = 0; 872 bbr->round_start = 0;
867 bbr->idle_restart = 0; 873 bbr->idle_restart = 0;
868 bbr->full_bw_reached = 0; 874 bbr->full_bw_reached = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4c2dd9f863f7..47e08c1b5bc3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6009 if (th->fin) 6009 if (th->fin)
6010 goto discard; 6010 goto discard;
6011 /* It is possible that we process SYN packets from backlog, 6011 /* It is possible that we process SYN packets from backlog,
6012 * so we need to make sure to disable BH right there. 6012 * so we need to make sure to disable BH and RCU right there.
6013 */ 6013 */
6014 rcu_read_lock();
6014 local_bh_disable(); 6015 local_bh_disable();
6015 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 6016 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
6016 local_bh_enable(); 6017 local_bh_enable();
6018 rcu_read_unlock();
6017 6019
6018 if (!acceptable) 6020 if (!acceptable)
6019 return 1; 6021 return 1;
@@ -6367,8 +6369,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
6367 if (!queue->synflood_warned && 6369 if (!queue->synflood_warned &&
6368 net->ipv4.sysctl_tcp_syncookies != 2 && 6370 net->ipv4.sysctl_tcp_syncookies != 2 &&
6369 xchg(&queue->synflood_warned, 1) == 0) 6371 xchg(&queue->synflood_warned, 1) == 0)
6370 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", 6372 net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
6371 proto, ntohs(tcp_hdr(skb)->dest), msg); 6373 proto, ntohs(tcp_hdr(skb)->dest), msg);
6372 6374
6373 return want_cookie; 6375 return want_cookie;
6374} 6376}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9e041fa5c545..cd426313a298 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
943 if (skb) { 943 if (skb) {
944 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 944 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
945 945
946 rcu_read_lock();
946 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 947 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
947 ireq->ir_rmt_addr, 948 ireq->ir_rmt_addr,
948 ireq_opt_deref(ireq)); 949 rcu_dereference(ireq->ireq_opt));
950 rcu_read_unlock();
949 err = net_xmit_eval(err); 951 err = net_xmit_eval(err);
950 } 952 }
951 953
@@ -2517,6 +2519,12 @@ static int __net_init tcp_sk_init(struct net *net)
2517 if (res) 2519 if (res)
2518 goto fail; 2520 goto fail;
2519 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 2521 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2522
2523 /* Please enforce IP_DF and IPID==0 for RST and
2524 * ACK sent in SYN-RECV and TIME-WAIT state.
2525 */
2526 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2527
2520 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; 2528 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2521 } 2529 }
2522 2530
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 75ef332a7caf..12affb7864d9 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -184,8 +184,9 @@ kill:
184 inet_twsk_deschedule_put(tw); 184 inet_twsk_deschedule_put(tw);
185 return TCP_TW_SUCCESS; 185 return TCP_TW_SUCCESS;
186 } 186 }
187 } else {
188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
187 } 189 }
188 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
189 190
190 if (tmp_opt.saw_tstamp) { 191 if (tmp_opt.saw_tstamp) {
191 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 192 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f4e35b2ff8b8..7d69dd6fa7e8 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2124 inet_compute_pseudo); 2124 inet_compute_pseudo);
2125} 2125}
2126 2126
2127/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
2128 * return code conversion for ip layer consumption
2129 */
2130static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
2131 struct udphdr *uh)
2132{
2133 int ret;
2134
2135 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2136 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2137 inet_compute_pseudo);
2138
2139 ret = udp_queue_rcv_skb(sk, skb);
2140
2141 /* a return value > 0 means to resubmit the input, but
2142 * it wants the return to be -protocol, or 0
2143 */
2144 if (ret > 0)
2145 return -ret;
2146 return 0;
2147}
2148
2127/* 2149/*
2128 * All we need to do is get the socket, and then do a checksum. 2150 * All we need to do is get the socket, and then do a checksum.
2129 */ 2151 */
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2170 if (unlikely(sk->sk_rx_dst != dst)) 2192 if (unlikely(sk->sk_rx_dst != dst))
2171 udp_sk_rx_dst_set(sk, dst); 2193 udp_sk_rx_dst_set(sk, dst);
2172 2194
2173 ret = udp_queue_rcv_skb(sk, skb); 2195 ret = udp_unicast_rcv_skb(sk, skb, uh);
2174 sock_put(sk); 2196 sock_put(sk);
2175 /* a return value > 0 means to resubmit the input, but 2197 return ret;
2176 * it wants the return to be -protocol, or 0
2177 */
2178 if (ret > 0)
2179 return -ret;
2180 return 0;
2181 } 2198 }
2182 2199
2183 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) 2200 if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2185 saddr, daddr, udptable, proto); 2202 saddr, daddr, udptable, proto);
2186 2203
2187 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 2204 sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2188 if (sk) { 2205 if (sk)
2189 int ret; 2206 return udp_unicast_rcv_skb(sk, skb, uh);
2190
2191 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2192 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
2193 inet_compute_pseudo);
2194
2195 ret = udp_queue_rcv_skb(sk, skb);
2196
2197 /* a return value > 0 means to resubmit the input, but
2198 * it wants the return to be -protocol, or 0
2199 */
2200 if (ret > 0)
2201 return -ret;
2202 return 0;
2203 }
2204 2207
2205 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 2208 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2206 goto drop; 2209 goto drop;
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index bcfc00e88756..f8de2482a529 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
67 67
68 if (xo && (xo->flags & XFRM_GRO)) { 68 if (xo && (xo->flags & XFRM_GRO)) {
69 skb_mac_header_rebuild(skb); 69 skb_mac_header_rebuild(skb);
70 skb_reset_transport_header(skb);
70 return 0; 71 return 0;
71 } 72 }
72 73
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
index 3d36644890bb..1ad2c2c4e250 100644
--- a/net/ipv4/xfrm4_mode_transport.c
+++ b/net/ipv4/xfrm4_mode_transport.c
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
46static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) 46static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
47{ 47{
48 int ihl = skb->data - skb_transport_header(skb); 48 int ihl = skb->data - skb_transport_header(skb);
49 struct xfrm_offload *xo = xfrm_offload(skb);
50 49
51 if (skb->transport_header != skb->network_header) { 50 if (skb->transport_header != skb->network_header) {
52 memmove(skb_transport_header(skb), 51 memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
54 skb->network_header = skb->transport_header; 53 skb->network_header = skb->transport_header;
55 } 54 }
56 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 55 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
57 if (!xo || !(xo->flags & XFRM_GRO)) 56 skb_reset_transport_header(skb);
58 skb_reset_transport_header(skb);
59 return 0; 57 return 0;
60} 58}
61 59
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2fac4ad74867..c63ccce6425f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
2398 2398
2399 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); 2399 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2400 2400
2401 ip6_route_add(&cfg, GFP_ATOMIC, NULL); 2401 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2402} 2402}
2403 2403
2404static struct inet6_dev *addrconf_add_dev(struct net_device *dev) 2404static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
3062 if (addr.s6_addr32[3]) { 3062 if (addr.s6_addr32[3]) {
3063 add_addr(idev, &addr, plen, scope); 3063 add_addr(idev, &addr, plen, scope);
3064 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags, 3064 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3065 GFP_ATOMIC); 3065 GFP_KERNEL);
3066 return; 3066 return;
3067 } 3067 }
3068 3068
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
3087 3087
3088 add_addr(idev, &addr, plen, flag); 3088 add_addr(idev, &addr, plen, flag);
3089 addrconf_prefix_route(&addr, plen, 0, idev->dev, 3089 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3090 0, pflags, GFP_ATOMIC); 3090 0, pflags, GFP_KERNEL);
3091 } 3091 }
3092 } 3092 }
3093 } 3093 }
@@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4201 p++; 4201 p++;
4202 continue; 4202 continue;
4203 } 4203 }
4204 state->offset++;
4205 return ifa; 4204 return ifa;
4206 } 4205 }
4207 4206
@@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4225 return ifa; 4224 return ifa;
4226 } 4225 }
4227 4226
4227 state->offset = 0;
4228 while (++state->bucket < IN6_ADDR_HSIZE) { 4228 while (++state->bucket < IN6_ADDR_HSIZE) {
4229 state->offset = 0;
4230 hlist_for_each_entry_rcu(ifa, 4229 hlist_for_each_entry_rcu(ifa,
4231 &inet6_addr_lst[state->bucket], addr_lst) { 4230 &inet6_addr_lst[state->bucket], addr_lst) {
4232 if (!net_eq(dev_net(ifa->idev->dev), net)) 4231 if (!net_eq(dev_net(ifa->idev->dev), net))
4233 continue; 4232 continue;
4234 state->offset++;
4235 return ifa; 4233 return ifa;
4236 } 4234 }
4237 } 4235 }
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 673bba31eb18..9a4261e50272 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
938 938
939 err = proto_register(&pingv6_prot, 1); 939 err = proto_register(&pingv6_prot, 1);
940 if (err) 940 if (err)
941 goto out_unregister_ping_proto; 941 goto out_unregister_raw_proto;
942 942
943 /* We MUST register RAW sockets before we create the ICMP6, 943 /* We MUST register RAW sockets before we create the ICMP6,
944 * IGMP6, or NDISC control sockets. 944 * IGMP6, or NDISC control sockets.
945 */ 945 */
946 err = rawv6_init(); 946 err = rawv6_init();
947 if (err) 947 if (err)
948 goto out_unregister_raw_proto; 948 goto out_unregister_ping_proto;
949 949
950 /* Register the family here so that the init calls below will 950 /* Register the family here so that the init calls below will
951 * be able to create sockets. (?? is this dangerous ??) 951 * be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
1113igmp_fail: 1113igmp_fail:
1114 ndisc_cleanup(); 1114 ndisc_cleanup();
1115ndisc_fail: 1115ndisc_fail:
1116 ip6_mr_cleanup(); 1116 icmpv6_cleanup();
1117icmp_fail: 1117icmp_fail:
1118 unregister_pernet_subsys(&inet6_net_ops); 1118 ip6_mr_cleanup();
1119ipmr_fail: 1119ipmr_fail:
1120 icmpv6_cleanup(); 1120 unregister_pernet_subsys(&inet6_net_ops);
1121register_pernet_fail: 1121register_pernet_fail:
1122 sock_unregister(PF_INET6); 1122 sock_unregister(PF_INET6);
1123 rtnl_unregister_all(PF_INET6); 1123 rtnl_unregister_all(PF_INET6);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index d212738e9d10..5516f55e214b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
198 } 198 }
199 } 199 }
200 200
201 lwtstate_put(f6i->fib6_nh.nh_lwtstate);
202
201 if (f6i->fib6_nh.nh_dev) 203 if (f6i->fib6_nh.nh_dev)
202 dev_put(f6i->fib6_nh.nh_dev); 204 dev_put(f6i->fib6_nh.nh_dev);
203 205
@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
987 fib6_clean_expires(iter); 989 fib6_clean_expires(iter);
988 else 990 else
989 fib6_set_expires(iter, rt->expires); 991 fib6_set_expires(iter, rt->expires);
990 fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu); 992
993 if (rt->fib6_pmtu)
994 fib6_metric_set(iter, RTAX_MTU,
995 rt->fib6_pmtu);
991 return -EEXIST; 996 return -EEXIST;
992 } 997 }
993 /* If we have the same destination and the same metric, 998 /* If we have the same destination and the same metric,
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 18a3794b0f52..e493b041d4ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1778 if (data[IFLA_GRE_COLLECT_METADATA]) 1778 if (data[IFLA_GRE_COLLECT_METADATA])
1779 parms->collect_md = true; 1779 parms->collect_md = true;
1780 1780
1781 parms->erspan_ver = 1;
1781 if (data[IFLA_GRE_ERSPAN_VER]) 1782 if (data[IFLA_GRE_ERSPAN_VER])
1782 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); 1783 parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1783 1784
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 37ff4805b20c..c7e495f12011 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
115 payload_len = skb->len - nhoff - sizeof(*ipv6h); 115 payload_len = skb->len - nhoff - sizeof(*ipv6h);
116 ipv6h->payload_len = htons(payload_len); 116 ipv6h->payload_len = htons(payload_len);
117 skb->network_header = (u8 *)ipv6h - skb->head; 117 skb->network_header = (u8 *)ipv6h - skb->head;
118 skb_reset_mac_len(skb);
118 119
119 if (udpfrag) { 120 if (udpfrag) {
120 int err = ip6_find_1stfragopt(skb, &prevhdr); 121 int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16f200f06500..f9f8f554d141 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
219 kfree_skb(skb); 219 kfree_skb(skb);
220 return -ENOBUFS; 220 return -ENOBUFS;
221 } 221 }
222 if (skb->sk)
223 skb_set_owner_w(skb2, skb->sk);
222 consume_skb(skb); 224 consume_skb(skb);
223 skb = skb2; 225 skb = skb2;
224 /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
225 * it is safe to call in our context (socket lock not held)
226 */
227 skb_set_owner_w(skb, (struct sock *)sk);
228 } 226 }
229 if (opt->opt_flen) 227 if (opt->opt_flen)
230 ipv6_push_frag_opts(skb, opt, &proto); 228 ipv6_push_frag_opts(skb, opt, &proto);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5df2a58d945c..a0b6932c3afd 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1188,7 +1188,15 @@ route_lookup:
1188 init_tel_txopt(&opt, encap_limit); 1188 init_tel_txopt(&opt, encap_limit);
1189 ipv6_push_frag_opts(skb, &opt.ops, &proto); 1189 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1190 } 1190 }
1191 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst); 1191
1192 if (hop_limit == 0) {
1193 if (skb->protocol == htons(ETH_P_IP))
1194 hop_limit = ip_hdr(skb)->ttl;
1195 else if (skb->protocol == htons(ETH_P_IPV6))
1196 hop_limit = ipv6_hdr(skb)->hop_limit;
1197 else
1198 hop_limit = ip6_dst_hoplimit(dst);
1199 }
1192 1200
1193 /* Calculate max headroom for all the headers and adjust 1201 /* Calculate max headroom for all the headers and adjust
1194 * needed_headroom if necessary. 1202 * needed_headroom if necessary.
@@ -1226,7 +1234,7 @@ static inline int
1226ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1234ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1227{ 1235{
1228 struct ip6_tnl *t = netdev_priv(dev); 1236 struct ip6_tnl *t = netdev_priv(dev);
1229 const struct iphdr *iph = ip_hdr(skb); 1237 const struct iphdr *iph;
1230 int encap_limit = -1; 1238 int encap_limit = -1;
1231 struct flowi6 fl6; 1239 struct flowi6 fl6;
1232 __u8 dsfield; 1240 __u8 dsfield;
@@ -1234,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1234 u8 tproto; 1242 u8 tproto;
1235 int err; 1243 int err;
1236 1244
1245 /* ensure we can access the full inner ip header */
1246 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1247 return -1;
1248
1249 iph = ip_hdr(skb);
1237 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1250 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1238 1251
1239 tproto = READ_ONCE(t->parms.proto); 1252 tproto = READ_ONCE(t->parms.proto);
@@ -1298,7 +1311,7 @@ static inline int
1298ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1311ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1299{ 1312{
1300 struct ip6_tnl *t = netdev_priv(dev); 1313 struct ip6_tnl *t = netdev_priv(dev);
1301 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 1314 struct ipv6hdr *ipv6h;
1302 int encap_limit = -1; 1315 int encap_limit = -1;
1303 __u16 offset; 1316 __u16 offset;
1304 struct flowi6 fl6; 1317 struct flowi6 fl6;
@@ -1307,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1307 u8 tproto; 1320 u8 tproto;
1308 int err; 1321 int err;
1309 1322
1323 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
1324 return -1;
1325
1326 ipv6h = ipv6_hdr(skb);
1310 tproto = READ_ONCE(t->parms.proto); 1327 tproto = READ_ONCE(t->parms.proto);
1311 if ((tproto != IPPROTO_IPV6 && tproto != 0) || 1328 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1312 ip6_tnl_addr_conflict(t, ipv6h)) 1329 ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 38dec9da90d3..eeaf7455d51e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
481 } 481 }
482 482
483 mtu = dst_mtu(dst); 483 mtu = dst_mtu(dst);
484 if (!skb->ignore_df && skb->len > mtu) { 484 if (skb->len > mtu) {
485 skb_dst_update_pmtu(skb, mtu); 485 skb_dst_update_pmtu(skb, mtu);
486 486
487 if (skb->protocol == htons(ETH_P_IPV6)) { 487 if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
1094 } 1094 }
1095 1095
1096 t = rtnl_dereference(ip6n->tnls_wc[0]); 1096 t = rtnl_dereference(ip6n->tnls_wc[0]);
1097 unregister_netdevice_queue(t->dev, list); 1097 if (t)
1098 unregister_netdevice_queue(t->dev, list);
1098} 1099}
1099 1100
1100static int __net_init vti6_init_net(struct net *net) 1101static int __net_init vti6_init_net(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 2a14d8b65924..8f68a518d9db 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
445 else if (head->ip_summed == CHECKSUM_COMPLETE) 445 else if (head->ip_summed == CHECKSUM_COMPLETE)
446 head->csum = csum_add(head->csum, fp->csum); 446 head->csum = csum_add(head->csum, fp->csum);
447 head->truesize += fp->truesize; 447 head->truesize += fp->truesize;
448 fp->sk = NULL;
448 } 449 }
449 sub_frag_mem_limit(fq->q.net, head->truesize); 450 sub_frag_mem_limit(fq->q.net, head->truesize);
450 451
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7208c16302f6..a366c05a239d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
364 364
365static void ip6_dst_destroy(struct dst_entry *dst) 365static void ip6_dst_destroy(struct dst_entry *dst)
366{ 366{
367 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
367 struct rt6_info *rt = (struct rt6_info *)dst; 368 struct rt6_info *rt = (struct rt6_info *)dst;
368 struct fib6_info *from; 369 struct fib6_info *from;
369 struct inet6_dev *idev; 370 struct inet6_dev *idev;
370 371
371 dst_destroy_metrics_generic(dst); 372 if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
373 kfree(p);
374
372 rt6_uncached_list_del(rt); 375 rt6_uncached_list_del(rt);
373 376
374 idev = rt->rt6i_idev; 377 idev = rt->rt6i_idev;
@@ -946,8 +949,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
946 949
947static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort) 950static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
948{ 951{
949 rt->dst.flags |= fib6_info_dst_flags(ort);
950
951 if (ort->fib6_flags & RTF_REJECT) { 952 if (ort->fib6_flags & RTF_REJECT) {
952 ip6_rt_init_dst_reject(rt, ort); 953 ip6_rt_init_dst_reject(rt, ort);
953 return; 954 return;
@@ -956,7 +957,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
956 rt->dst.error = 0; 957 rt->dst.error = 0;
957 rt->dst.output = ip6_output; 958 rt->dst.output = ip6_output;
958 959
959 if (ort->fib6_type == RTN_LOCAL) { 960 if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
960 rt->dst.input = ip6_input; 961 rt->dst.input = ip6_input;
961 } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) { 962 } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
962 rt->dst.input = ip6_mc_input; 963 rt->dst.input = ip6_mc_input;
@@ -978,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
978 rt->rt6i_flags &= ~RTF_EXPIRES; 979 rt->rt6i_flags &= ~RTF_EXPIRES;
979 rcu_assign_pointer(rt->from, from); 980 rcu_assign_pointer(rt->from, from);
980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 981 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
982 if (from->fib6_metrics != &dst_default_metrics) {
983 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
984 refcount_inc(&from->fib6_metrics->refcnt);
985 }
981} 986}
982 987
983/* Caller must already hold reference to @ort */ 988/* Caller must already hold reference to @ort */
@@ -996,7 +1001,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
996 rt->rt6i_src = ort->fib6_src; 1001 rt->rt6i_src = ort->fib6_src;
997#endif 1002#endif
998 rt->rt6i_prefsrc = ort->fib6_prefsrc; 1003 rt->rt6i_prefsrc = ort->fib6_prefsrc;
999 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
1000} 1004}
1001 1005
1002static struct fib6_node* fib6_backtrack(struct fib6_node *fn, 1006static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
@@ -4317,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
4317 if (!nh) 4321 if (!nh)
4318 return -ENOMEM; 4322 return -ENOMEM;
4319 nh->fib6_info = rt; 4323 nh->fib6_info = rt;
4320 err = ip6_convert_metrics(net, rt, r_cfg);
4321 if (err) {
4322 kfree(nh);
4323 return err;
4324 }
4325 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); 4324 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4326 list_add_tail(&nh->next, rt6_nh_list); 4325 list_add_tail(&nh->next, rt6_nh_list);
4327 4326
@@ -4671,20 +4670,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4671 int iif, int type, u32 portid, u32 seq, 4670 int iif, int type, u32 portid, u32 seq,
4672 unsigned int flags) 4671 unsigned int flags)
4673{ 4672{
4674 struct rtmsg *rtm; 4673 struct rt6_info *rt6 = (struct rt6_info *)dst;
4674 struct rt6key *rt6_dst, *rt6_src;
4675 u32 *pmetrics, table, rt6_flags;
4675 struct nlmsghdr *nlh; 4676 struct nlmsghdr *nlh;
4677 struct rtmsg *rtm;
4676 long expires = 0; 4678 long expires = 0;
4677 u32 *pmetrics;
4678 u32 table;
4679 4679
4680 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); 4680 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4681 if (!nlh) 4681 if (!nlh)
4682 return -EMSGSIZE; 4682 return -EMSGSIZE;
4683 4683
4684 if (rt6) {
4685 rt6_dst = &rt6->rt6i_dst;
4686 rt6_src = &rt6->rt6i_src;
4687 rt6_flags = rt6->rt6i_flags;
4688 } else {
4689 rt6_dst = &rt->fib6_dst;
4690 rt6_src = &rt->fib6_src;
4691 rt6_flags = rt->fib6_flags;
4692 }
4693
4684 rtm = nlmsg_data(nlh); 4694 rtm = nlmsg_data(nlh);
4685 rtm->rtm_family = AF_INET6; 4695 rtm->rtm_family = AF_INET6;
4686 rtm->rtm_dst_len = rt->fib6_dst.plen; 4696 rtm->rtm_dst_len = rt6_dst->plen;
4687 rtm->rtm_src_len = rt->fib6_src.plen; 4697 rtm->rtm_src_len = rt6_src->plen;
4688 rtm->rtm_tos = 0; 4698 rtm->rtm_tos = 0;
4689 if (rt->fib6_table) 4699 if (rt->fib6_table)
4690 table = rt->fib6_table->tb6_id; 4700 table = rt->fib6_table->tb6_id;
@@ -4699,7 +4709,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4699 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 4709 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4700 rtm->rtm_protocol = rt->fib6_protocol; 4710 rtm->rtm_protocol = rt->fib6_protocol;
4701 4711
4702 if (rt->fib6_flags & RTF_CACHE) 4712 if (rt6_flags & RTF_CACHE)
4703 rtm->rtm_flags |= RTM_F_CLONED; 4713 rtm->rtm_flags |= RTM_F_CLONED;
4704 4714
4705 if (dest) { 4715 if (dest) {
@@ -4707,7 +4717,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4707 goto nla_put_failure; 4717 goto nla_put_failure;
4708 rtm->rtm_dst_len = 128; 4718 rtm->rtm_dst_len = 128;
4709 } else if (rtm->rtm_dst_len) 4719 } else if (rtm->rtm_dst_len)
4710 if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr)) 4720 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
4711 goto nla_put_failure; 4721 goto nla_put_failure;
4712#ifdef CONFIG_IPV6_SUBTREES 4722#ifdef CONFIG_IPV6_SUBTREES
4713 if (src) { 4723 if (src) {
@@ -4715,12 +4725,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4715 goto nla_put_failure; 4725 goto nla_put_failure;
4716 rtm->rtm_src_len = 128; 4726 rtm->rtm_src_len = 128;
4717 } else if (rtm->rtm_src_len && 4727 } else if (rtm->rtm_src_len &&
4718 nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr)) 4728 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
4719 goto nla_put_failure; 4729 goto nla_put_failure;
4720#endif 4730#endif
4721 if (iif) { 4731 if (iif) {
4722#ifdef CONFIG_IPV6_MROUTE 4732#ifdef CONFIG_IPV6_MROUTE
4723 if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) { 4733 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4724 int err = ip6mr_get_route(net, skb, rtm, portid); 4734 int err = ip6mr_get_route(net, skb, rtm, portid);
4725 4735
4726 if (err == 0) 4736 if (err == 0)
@@ -4755,7 +4765,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4755 /* For multipath routes, walk the siblings list and add 4765 /* For multipath routes, walk the siblings list and add
4756 * each as a nexthop within RTA_MULTIPATH. 4766 * each as a nexthop within RTA_MULTIPATH.
4757 */ 4767 */
4758 if (rt->fib6_nsiblings) { 4768 if (rt6) {
4769 if (rt6_flags & RTF_GATEWAY &&
4770 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
4771 goto nla_put_failure;
4772
4773 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
4774 goto nla_put_failure;
4775 } else if (rt->fib6_nsiblings) {
4759 struct fib6_info *sibling, *next_sibling; 4776 struct fib6_info *sibling, *next_sibling;
4760 struct nlattr *mp; 4777 struct nlattr *mp;
4761 4778
@@ -4778,7 +4795,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4778 goto nla_put_failure; 4795 goto nla_put_failure;
4779 } 4796 }
4780 4797
4781 if (rt->fib6_flags & RTF_EXPIRES) { 4798 if (rt6_flags & RTF_EXPIRES) {
4782 expires = dst ? dst->expires : rt->expires; 4799 expires = dst ? dst->expires : rt->expires;
4783 expires -= jiffies; 4800 expires -= jiffies;
4784 } 4801 }
@@ -4786,7 +4803,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4786 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0) 4803 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4787 goto nla_put_failure; 4804 goto nla_put_failure;
4788 4805
4789 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags))) 4806 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4790 goto nla_put_failure; 4807 goto nla_put_failure;
4791 4808
4792 4809
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 83f4c77c79d8..28c4aa5078fc 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
752 } 752 }
753} 753}
754 754
755/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
756 * return code conversion for ip layer consumption
757 */
758static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
759 struct udphdr *uh)
760{
761 int ret;
762
763 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
764 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
765 ip6_compute_pseudo);
766
767 ret = udpv6_queue_rcv_skb(sk, skb);
768
769 /* a return value > 0 means to resubmit the input, but
770 * it wants the return to be -protocol, or 0
771 */
772 if (ret > 0)
773 return -ret;
774 return 0;
775}
776
755int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, 777int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
756 int proto) 778 int proto)
757{ 779{
@@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
803 if (unlikely(sk->sk_rx_dst != dst)) 825 if (unlikely(sk->sk_rx_dst != dst))
804 udp6_sk_rx_dst_set(sk, dst); 826 udp6_sk_rx_dst_set(sk, dst);
805 827
806 ret = udpv6_queue_rcv_skb(sk, skb); 828 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
807 sock_put(sk); 829 sock_put(sk);
830 goto report_csum_error;
831 }
808 832
809 /* a return value > 0 means to resubmit the input */ 833 ret = udp6_unicast_rcv_skb(sk, skb, uh);
810 if (ret > 0) 834 sock_put(sk);
811 return ret; 835 return ret;
812 return 0;
813 } 836 }
814 837
815 /* 838 /*
@@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
822 /* Unicast */ 845 /* Unicast */
823 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 846 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
824 if (sk) { 847 if (sk) {
825 int ret; 848 if (!uh->check && !udp_sk(sk)->no_check6_rx)
826 849 goto report_csum_error;
827 if (!uh->check && !udp_sk(sk)->no_check6_rx) { 850 return udp6_unicast_rcv_skb(sk, skb, uh);
828 udp6_csum_zero_error(skb);
829 goto csum_error;
830 }
831
832 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
833 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
834 ip6_compute_pseudo);
835
836 ret = udpv6_queue_rcv_skb(sk, skb);
837
838 /* a return value > 0 means to resubmit the input */
839 if (ret > 0)
840 return ret;
841
842 return 0;
843 } 851 }
844 852
845 if (!uh->check) { 853 if (!uh->check)
846 udp6_csum_zero_error(skb); 854 goto report_csum_error;
847 goto csum_error;
848 }
849 855
850 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 856 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
851 goto discard; 857 goto discard;
@@ -866,6 +872,9 @@ short_packet:
866 ulen, skb->len, 872 ulen, skb->len,
867 daddr, ntohs(uh->dest)); 873 daddr, ntohs(uh->dest));
868 goto discard; 874 goto discard;
875
876report_csum_error:
877 udp6_csum_zero_error(skb);
869csum_error: 878csum_error:
870 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); 879 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
871discard: 880discard:
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841f4a07438e..9ef490dddcea 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
59 59
60 if (xo && (xo->flags & XFRM_GRO)) { 60 if (xo && (xo->flags & XFRM_GRO)) {
61 skb_mac_header_rebuild(skb); 61 skb_mac_header_rebuild(skb);
62 skb_reset_transport_header(skb);
62 return -1; 63 return -1;
63 } 64 }
64 65
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 9ad07a91708e..3c29da5defe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
51static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) 51static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
52{ 52{
53 int ihl = skb->data - skb_transport_header(skb); 53 int ihl = skb->data - skb_transport_header(skb);
54 struct xfrm_offload *xo = xfrm_offload(skb);
55 54
56 if (skb->transport_header != skb->network_header) { 55 if (skb->transport_header != skb->network_header) {
57 memmove(skb_transport_header(skb), 56 memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
60 } 59 }
61 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 60 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
62 sizeof(struct ipv6hdr)); 61 sizeof(struct ipv6hdr));
63 if (!xo || !(xo->flags & XFRM_GRO)) 62 skb_reset_transport_header(skb);
64 skb_reset_transport_header(skb);
65 return 0; 63 return 0;
66} 64}
67 65
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5959ce9620eb..6a74080005cf 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
170 170
171 if (toobig && xfrm6_local_dontfrag(skb)) { 171 if (toobig && xfrm6_local_dontfrag(skb)) {
172 xfrm6_local_rxpmtu(skb, mtu); 172 xfrm6_local_rxpmtu(skb, mtu);
173 kfree_skb(skb);
173 return -EMSGSIZE; 174 return -EMSGSIZE;
174 } else if (!skb->ignore_df && toobig && skb->sk) { 175 } else if (!skb->ignore_df && toobig && skb->sk) {
175 xfrm_local_error(skb, mtu); 176 xfrm_local_error(skb, mtu);
177 kfree_skb(skb);
176 return -EMSGSIZE; 178 return -EMSGSIZE;
177 } 179 }
178 180
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a21d8ed0a325..e2f16a0173a9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
351 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 351 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
352 352
353 skb->dev = iucv->hs_dev; 353 skb->dev = iucv->hs_dev;
354 if (!skb->dev) 354 if (!skb->dev) {
355 return -ENODEV; 355 err = -ENODEV;
356 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) 356 goto err_free;
357 return -ENETDOWN; 357 }
358 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
359 err = -ENETDOWN;
360 goto err_free;
361 }
358 if (skb->len > skb->dev->mtu) { 362 if (skb->len > skb->dev->mtu) {
359 if (sock->sk_type == SOCK_SEQPACKET) 363 if (sock->sk_type == SOCK_SEQPACKET) {
360 return -EMSGSIZE; 364 err = -EMSGSIZE;
361 else 365 goto err_free;
362 skb_trim(skb, skb->dev->mtu); 366 }
367 skb_trim(skb, skb->dev->mtu);
363 } 368 }
364 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 369 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
365 nskb = skb_clone(skb, GFP_ATOMIC); 370 nskb = skb_clone(skb, GFP_ATOMIC);
366 if (!nskb) 371 if (!nskb) {
367 return -ENOMEM; 372 err = -ENOMEM;
373 goto err_free;
374 }
375
368 skb_queue_tail(&iucv->send_skb_q, nskb); 376 skb_queue_tail(&iucv->send_skb_q, nskb);
369 err = dev_queue_xmit(skb); 377 err = dev_queue_xmit(skb);
370 if (net_xmit_eval(err)) { 378 if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
375 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 383 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
376 } 384 }
377 return net_xmit_eval(err); 385 return net_xmit_eval(err);
386
387err_free:
388 kfree_skb(skb);
389 return err;
378} 390}
379 391
380static struct sock *__iucv_get_sock_by_name(char *nm) 392static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1167 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1179 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1168 if (err) { 1180 if (err) {
1169 atomic_dec(&iucv->msg_sent); 1181 atomic_dec(&iucv->msg_sent);
1170 goto fail; 1182 goto out;
1171 } 1183 }
1172 } else { /* Classic VM IUCV transport */ 1184 } else { /* Classic VM IUCV transport */
1173 skb_queue_tail(&iucv->send_skb_q, skb); 1185 skb_queue_tail(&iucv->send_skb_q, skb);
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2155 struct sock *sk; 2167 struct sock *sk;
2156 struct iucv_sock *iucv; 2168 struct iucv_sock *iucv;
2157 struct af_iucv_trans_hdr *trans_hdr; 2169 struct af_iucv_trans_hdr *trans_hdr;
2170 int err = NET_RX_SUCCESS;
2158 char nullstring[8]; 2171 char nullstring[8];
2159 int err = 0;
2160 2172
2161 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) { 2173 if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
2162 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d", 2174 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2254 err = afiucv_hs_callback_rx(sk, skb); 2266 err = afiucv_hs_callback_rx(sk, skb);
2255 break; 2267 break;
2256 default: 2268 default:
2257 ; 2269 kfree_skb(skb);
2258 } 2270 }
2259 2271
2260 return err; 2272 return err;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f7ef167c45a..eb502c6290c2 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
1874 * Returns 0 if there are still iucv pathes defined 1874 * Returns 0 if there are still iucv pathes defined
1875 * 1 if there are no iucv pathes defined 1875 * 1 if there are no iucv pathes defined
1876 */ 1876 */
1877int iucv_path_table_empty(void) 1877static int iucv_path_table_empty(void)
1878{ 1878{
1879 int i; 1879 int i;
1880 1880
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 6449a1c2283b..f0f5fedb8caa 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
947 if (len < IEEE80211_DEAUTH_FRAME_LEN) 947 if (len < IEEE80211_DEAUTH_FRAME_LEN)
948 return; 948 return;
949 949
950 ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n", 950 ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
951 mgmt->sa, mgmt->da, mgmt->bssid, reason); 951 ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
952 sta_info_destroy_addr(sdata, mgmt->sa); 952 sta_info_destroy_addr(sdata, mgmt->sa);
953} 953}
954 954
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
966 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 966 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
967 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 967 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
968 968
969 ibss_dbg(sdata, 969 ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
970 "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n", 970 ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
971 mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction); 971 mgmt->bssid, auth_transaction);
972 972
973 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 973 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
974 return; 974 return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
1175 rx_timestamp = drv_get_tsf(local, sdata); 1175 rx_timestamp = drv_get_tsf(local, sdata);
1176 } 1176 }
1177 1177
1178 ibss_dbg(sdata, 1178 ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
1179 "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
1180 mgmt->sa, mgmt->bssid, 1179 mgmt->sa, mgmt->bssid,
1181 (unsigned long long)rx_timestamp, 1180 (unsigned long long)rx_timestamp);
1181 ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
1182 (unsigned long long)beacon_timestamp, 1182 (unsigned long long)beacon_timestamp,
1183 (unsigned long long)(rx_timestamp - beacon_timestamp), 1183 (unsigned long long)(rx_timestamp - beacon_timestamp),
1184 jiffies); 1184 jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
1537 1537
1538 tx_last_beacon = drv_tx_last_beacon(local); 1538 tx_last_beacon = drv_tx_last_beacon(local);
1539 1539
1540 ibss_dbg(sdata, 1540 ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
1541 "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n", 1541 ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
1542 mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon); 1542 mgmt->bssid, tx_last_beacon);
1543 1543
1544 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) 1544 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
1545 return; 1545 return;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5e6cf2cee965..5836ddeac9e3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1756 1756
1757 if (local->ops->wake_tx_queue && 1757 if (local->ops->wake_tx_queue &&
1758 type != NL80211_IFTYPE_AP_VLAN && 1758 type != NL80211_IFTYPE_AP_VLAN &&
1759 type != NL80211_IFTYPE_MONITOR) 1759 (type != NL80211_IFTYPE_MONITOR ||
1760 (params->flags & MONITOR_FLAG_ACTIVE)))
1760 txq_size += sizeof(struct txq_info) + 1761 txq_size += sizeof(struct txq_info) +
1761 local->hw.txq_data_size; 1762 local->hw.txq_data_size;
1762 1763
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 4fb2709cb527..513627896204 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
256 256
257 flush_work(&local->radar_detected_work); 257 flush_work(&local->radar_detected_work);
258 rtnl_lock(); 258 rtnl_lock();
259 list_for_each_entry(sdata, &local->interfaces, list) 259 list_for_each_entry(sdata, &local->interfaces, list) {
260 /*
261 * XXX: there may be more work for other vif types and even
262 * for station mode: a good thing would be to run most of
263 * the iface type's dependent _stop (ieee80211_mg_stop,
264 * ieee80211_ibss_stop) etc...
265 * For now, fix only the specific bug that was seen: race
266 * between csa_connection_drop_work and us.
267 */
268 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
269 /*
270 * This worker is scheduled from the iface worker that
271 * runs on mac80211's workqueue, so we can't be
272 * scheduling this worker after the cancel right here.
273 * The exception is ieee80211_chswitch_done.
274 * Then we can have a race...
275 */
276 cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
277 }
260 flush_delayed_work(&sdata->dec_tailroom_needed_wk); 278 flush_delayed_work(&sdata->dec_tailroom_needed_wk);
279 }
261 ieee80211_scan_cancel(local); 280 ieee80211_scan_cancel(local);
262 281
263 /* make sure any new ROC will consider local->in_reconfig */ 282 /* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
471 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | 490 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
472 IEEE80211_VHT_CAP_SHORT_GI_80 | 491 IEEE80211_VHT_CAP_SHORT_GI_80 |
473 IEEE80211_VHT_CAP_SHORT_GI_160 | 492 IEEE80211_VHT_CAP_SHORT_GI_160 |
474 IEEE80211_VHT_CAP_RXSTBC_1 | 493 IEEE80211_VHT_CAP_RXSTBC_MASK |
475 IEEE80211_VHT_CAP_RXSTBC_2 |
476 IEEE80211_VHT_CAP_RXSTBC_3 |
477 IEEE80211_VHT_CAP_RXSTBC_4 |
478 IEEE80211_VHT_CAP_TXSTBC | 494 IEEE80211_VHT_CAP_TXSTBC |
479 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 495 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
480 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 496 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1208#if IS_ENABLED(CONFIG_IPV6) 1224#if IS_ENABLED(CONFIG_IPV6)
1209 unregister_inet6addr_notifier(&local->ifa6_notifier); 1225 unregister_inet6addr_notifier(&local->ifa6_notifier);
1210#endif 1226#endif
1227 ieee80211_txq_teardown_flows(local);
1211 1228
1212 rtnl_lock(); 1229 rtnl_lock();
1213 1230
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
1236 skb_queue_purge(&local->skb_queue); 1253 skb_queue_purge(&local->skb_queue);
1237 skb_queue_purge(&local->skb_queue_unreliable); 1254 skb_queue_purge(&local->skb_queue_unreliable);
1238 skb_queue_purge(&local->skb_queue_tdls_chsw); 1255 skb_queue_purge(&local->skb_queue_tdls_chsw);
1239 ieee80211_txq_teardown_flows(local);
1240 1256
1241 destroy_workqueue(local->workqueue); 1257 destroy_workqueue(local->workqueue);
1242 wiphy_unregister(local->hw.wiphy); 1258 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ee56f18cad3f..21526630bf65 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
217int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 217int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
218void ieee80211s_init(void); 218void ieee80211s_init(void);
219void ieee80211s_update_metric(struct ieee80211_local *local, 219void ieee80211s_update_metric(struct ieee80211_local *local,
220 struct sta_info *sta, struct sk_buff *skb); 220 struct sta_info *sta,
221 struct ieee80211_tx_status *st);
221void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 222void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
222void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata); 223void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
223int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 224int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 35ad3983ae4b..6950cd0bf594 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
295} 295}
296 296
297void ieee80211s_update_metric(struct ieee80211_local *local, 297void ieee80211s_update_metric(struct ieee80211_local *local,
298 struct sta_info *sta, struct sk_buff *skb) 298 struct sta_info *sta,
299 struct ieee80211_tx_status *st)
299{ 300{
300 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); 301 struct ieee80211_tx_info *txinfo = st->info;
301 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
302 int failed; 302 int failed;
303 303
304 if (!ieee80211_is_data(hdr->frame_control))
305 return;
306
307 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); 304 failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
308 305
309 /* moving average, scaled to 100. 306 /* moving average, scaled to 100.
@@ -572,6 +569,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
572 forward = false; 569 forward = false;
573 reply = true; 570 reply = true;
574 target_metric = 0; 571 target_metric = 0;
572
573 if (SN_GT(target_sn, ifmsh->sn))
574 ifmsh->sn = target_sn;
575
575 if (time_after(jiffies, ifmsh->last_sn_update + 576 if (time_after(jiffies, ifmsh->last_sn_update +
576 net_traversal_jiffies(sdata)) || 577 net_traversal_jiffies(sdata)) ||
577 time_before(jiffies, ifmsh->last_sn_update)) { 578 time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 7fb9957359a3..3dbecae4be73 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1073 */ 1073 */
1074 1074
1075 if (sdata->reserved_chanctx) { 1075 if (sdata->reserved_chanctx) {
1076 struct ieee80211_supported_band *sband = NULL;
1077 struct sta_info *mgd_sta = NULL;
1078 enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
1079
1076 /* 1080 /*
1077 * with multi-vif csa driver may call ieee80211_csa_finish() 1081 * with multi-vif csa driver may call ieee80211_csa_finish()
1078 * many times while waiting for other interfaces to use their 1082 * many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1081 if (sdata->reserved_ready) 1085 if (sdata->reserved_ready)
1082 goto out; 1086 goto out;
1083 1087
1088 if (sdata->vif.bss_conf.chandef.width !=
1089 sdata->csa_chandef.width) {
1090 /*
1091 * For managed interface, we need to also update the AP
1092 * station bandwidth and align the rate scale algorithm
1093 * on the bandwidth change. Here we only consider the
1094 * bandwidth of the new channel definition (as channel
1095 * switch flow does not have the full HT/VHT/HE
1096 * information), assuming that if additional changes are
1097 * required they would be done as part of the processing
1098 * of the next beacon from the AP.
1099 */
1100 switch (sdata->csa_chandef.width) {
1101 case NL80211_CHAN_WIDTH_20_NOHT:
1102 case NL80211_CHAN_WIDTH_20:
1103 default:
1104 bw = IEEE80211_STA_RX_BW_20;
1105 break;
1106 case NL80211_CHAN_WIDTH_40:
1107 bw = IEEE80211_STA_RX_BW_40;
1108 break;
1109 case NL80211_CHAN_WIDTH_80:
1110 bw = IEEE80211_STA_RX_BW_80;
1111 break;
1112 case NL80211_CHAN_WIDTH_80P80:
1113 case NL80211_CHAN_WIDTH_160:
1114 bw = IEEE80211_STA_RX_BW_160;
1115 break;
1116 }
1117
1118 mgd_sta = sta_info_get(sdata, ifmgd->bssid);
1119 sband =
1120 local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
1121 }
1122
1123 if (sdata->vif.bss_conf.chandef.width >
1124 sdata->csa_chandef.width) {
1125 mgd_sta->sta.bandwidth = bw;
1126 rate_control_rate_update(local, sband, mgd_sta,
1127 IEEE80211_RC_BW_CHANGED);
1128 }
1129
1084 ret = ieee80211_vif_use_reserved_context(sdata); 1130 ret = ieee80211_vif_use_reserved_context(sdata);
1085 if (ret) { 1131 if (ret) {
1086 sdata_info(sdata, 1132 sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
1091 goto out; 1137 goto out;
1092 } 1138 }
1093 1139
1140 if (sdata->vif.bss_conf.chandef.width <
1141 sdata->csa_chandef.width) {
1142 mgd_sta->sta.bandwidth = bw;
1143 rate_control_rate_update(local, sband, mgd_sta,
1144 IEEE80211_RC_BW_CHANGED);
1145 }
1146
1094 goto out; 1147 goto out;
1095 } 1148 }
1096 1149
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1312 cbss->beacon_interval)); 1365 cbss->beacon_interval));
1313 return; 1366 return;
1314 drop_connection: 1367 drop_connection:
1368 /*
1369 * This is just so that the disconnect flow will know that
1370 * we were trying to switch channel and failed. In case the
1371 * mode is 1 (we are not allowed to Tx), we will know not to
1372 * send a deauthentication frame. Those two fields will be
1373 * reset when the disconnection worker runs.
1374 */
1375 sdata->vif.csa_active = true;
1376 sdata->csa_block_tx = csa_ie.mode;
1377
1315 ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); 1378 ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
1316 mutex_unlock(&local->chanctx_mtx); 1379 mutex_unlock(&local->chanctx_mtx);
1317 mutex_unlock(&local->mtx); 1380 mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2522 struct ieee80211_local *local = sdata->local; 2585 struct ieee80211_local *local = sdata->local;
2523 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2586 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2524 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 2587 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
2588 bool tx;
2525 2589
2526 sdata_lock(sdata); 2590 sdata_lock(sdata);
2527 if (!ifmgd->associated) { 2591 if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2529 return; 2593 return;
2530 } 2594 }
2531 2595
2596 tx = !sdata->csa_block_tx;
2597
2532 /* AP is probably out of range (or not reachable for another reason) so 2598 /* AP is probably out of range (or not reachable for another reason) so
2533 * remove the bss struct for that AP. 2599 * remove the bss struct for that AP.
2534 */ 2600 */
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2536 2602
2537 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 2603 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
2538 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 2604 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
2539 true, frame_buf); 2605 tx, frame_buf);
2540 mutex_lock(&local->mtx); 2606 mutex_lock(&local->mtx);
2541 sdata->vif.csa_active = false; 2607 sdata->vif.csa_active = false;
2542 ifmgd->csa_waiting_bcn = false; 2608 ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
2547 } 2613 }
2548 mutex_unlock(&local->mtx); 2614 mutex_unlock(&local->mtx);
2549 2615
2550 ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, 2616 ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
2551 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 2617 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2552 2618
2553 sdata_unlock(sdata); 2619 sdata_unlock(sdata);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 64742f2765c4..96611d5dfadb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1728 */ 1728 */
1729 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1729 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1730 !ieee80211_has_morefrags(hdr->frame_control) && 1730 !ieee80211_has_morefrags(hdr->frame_control) &&
1731 !is_multicast_ether_addr(hdr->addr1) &&
1731 (ieee80211_is_mgmt(hdr->frame_control) || 1732 (ieee80211_is_mgmt(hdr->frame_control) ||
1732 ieee80211_is_data(hdr->frame_control)) && 1733 ieee80211_is_data(hdr->frame_control)) &&
1733 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1734 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9a6d7208bf4f..91d7c0cd1882 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
479 if (!skb) 479 if (!skb)
480 return; 480 return;
481 481
482 if (dropped) {
483 dev_kfree_skb_any(skb);
484 return;
485 }
486
487 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { 482 if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
488 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie; 483 u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
489 struct ieee80211_sub_if_data *sdata; 484 struct ieee80211_sub_if_data *sdata;
@@ -507,6 +502,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
507 rcu_read_unlock(); 502 rcu_read_unlock();
508 503
509 dev_kfree_skb_any(skb); 504 dev_kfree_skb_any(skb);
505 } else if (dropped) {
506 dev_kfree_skb_any(skb);
510 } else { 507 } else {
511 /* consumes skb */ 508 /* consumes skb */
512 skb_complete_wifi_ack(skb, acked); 509 skb_complete_wifi_ack(skb, acked);
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
811 808
812 rate_control_tx_status(local, sband, status); 809 rate_control_tx_status(local, sband, status);
813 if (ieee80211_vif_is_mesh(&sta->sdata->vif)) 810 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
814 ieee80211s_update_metric(local, sta, skb); 811 ieee80211s_update_metric(local, sta, status);
815 812
816 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked) 813 if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
817 ieee80211_frame_acked(sta, skb); 814 ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
972 } 969 }
973 970
974 rate_control_tx_status(local, sband, status); 971 rate_control_tx_status(local, sband, status);
972 if (ieee80211_vif_is_mesh(&sta->sdata->vif))
973 ieee80211s_update_metric(local, sta, status);
975 } 974 }
976 975
977 if (acked || noack_success) { 976 if (acked || noack_success) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 5cd5e6e5834e..6c647f425e05 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -16,6 +16,7 @@
16#include "ieee80211_i.h" 16#include "ieee80211_i.h"
17#include "driver-ops.h" 17#include "driver-ops.h"
18#include "rate.h" 18#include "rate.h"
19#include "wme.h"
19 20
20/* give usermode some time for retries in setting up the TDLS session */ 21/* give usermode some time for retries in setting up the TDLS session */
21#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) 22#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
1010 switch (action_code) { 1011 switch (action_code) {
1011 case WLAN_TDLS_SETUP_REQUEST: 1012 case WLAN_TDLS_SETUP_REQUEST:
1012 case WLAN_TDLS_SETUP_RESPONSE: 1013 case WLAN_TDLS_SETUP_RESPONSE:
1013 skb_set_queue_mapping(skb, IEEE80211_AC_BK); 1014 skb->priority = 256 + 2;
1014 skb->priority = 2;
1015 break; 1015 break;
1016 default: 1016 default:
1017 skb_set_queue_mapping(skb, IEEE80211_AC_VI); 1017 skb->priority = 256 + 5;
1018 skb->priority = 5;
1019 break; 1018 break;
1020 } 1019 }
1020 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
1021 1021
1022 /* 1022 /*
1023 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress. 1023 * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cd332e3e1134..25ba24bef8f5 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
214{ 214{
215 struct ieee80211_local *local = tx->local; 215 struct ieee80211_local *local = tx->local;
216 struct ieee80211_if_managed *ifmgd; 216 struct ieee80211_if_managed *ifmgd;
217 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
217 218
218 /* driver doesn't support power save */ 219 /* driver doesn't support power save */
219 if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) 220 if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
242 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) 243 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
243 return TX_CONTINUE; 244 return TX_CONTINUE;
244 245
246 if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
247 return TX_CONTINUE;
248
245 ifmgd = &tx->sdata->u.mgd; 249 ifmgd = &tx->sdata->u.mgd;
246 250
247 /* 251 /*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1890 sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; 1894 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1891 1895
1892 if (invoke_tx_handlers_early(&tx)) 1896 if (invoke_tx_handlers_early(&tx))
1893 return false; 1897 return true;
1894 1898
1895 if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) 1899 if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
1896 return true; 1900 return true;
@@ -3078,27 +3082,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
3078} 3082}
3079 3083
3080static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local, 3084static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
3081 struct sk_buff *skb, int headroom, 3085 struct sk_buff *skb, int headroom)
3082 int *subframe_len)
3083{ 3086{
3084 int amsdu_len = *subframe_len + sizeof(struct ethhdr); 3087 if (skb_headroom(skb) < headroom) {
3085 int padding = (4 - amsdu_len) & 3;
3086
3087 if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
3088 I802_DEBUG_INC(local->tx_expand_skb_head); 3088 I802_DEBUG_INC(local->tx_expand_skb_head);
3089 3089
3090 if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) { 3090 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
3091 wiphy_debug(local->hw.wiphy, 3091 wiphy_debug(local->hw.wiphy,
3092 "failed to reallocate TX buffer\n"); 3092 "failed to reallocate TX buffer\n");
3093 return false; 3093 return false;
3094 } 3094 }
3095 } 3095 }
3096 3096
3097 if (padding) {
3098 *subframe_len += padding;
3099 skb_put_zero(skb, padding);
3100 }
3101
3102 return true; 3097 return true;
3103} 3098}
3104 3099
@@ -3122,8 +3117,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
3122 if (info->control.flags & IEEE80211_TX_CTRL_AMSDU) 3117 if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
3123 return true; 3118 return true;
3124 3119
3125 if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr), 3120 if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
3126 &subframe_len))
3127 return false; 3121 return false;
3128 3122
3129 data = skb_push(skb, sizeof(*amsdu_hdr)); 3123 data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3183,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3189 void *data; 3183 void *data;
3190 bool ret = false; 3184 bool ret = false;
3191 unsigned int orig_len; 3185 unsigned int orig_len;
3192 int n = 1, nfrags; 3186 int n = 2, nfrags, pad = 0;
3187 u16 hdrlen;
3193 3188
3194 if (!ieee80211_hw_check(&local->hw, TX_AMSDU)) 3189 if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
3195 return false; 3190 return false;
@@ -3222,9 +3217,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3222 if (skb->len + head->len > max_amsdu_len) 3217 if (skb->len + head->len > max_amsdu_len)
3223 goto out; 3218 goto out;
3224 3219
3225 if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3226 goto out;
3227
3228 nfrags = 1 + skb_shinfo(skb)->nr_frags; 3220 nfrags = 1 + skb_shinfo(skb)->nr_frags;
3229 nfrags += 1 + skb_shinfo(head)->nr_frags; 3221 nfrags += 1 + skb_shinfo(head)->nr_frags;
3230 frag_tail = &skb_shinfo(head)->frag_list; 3222 frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3232,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3240 if (max_frags && nfrags > max_frags) 3232 if (max_frags && nfrags > max_frags)
3241 goto out; 3233 goto out;
3242 3234
3243 if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2, 3235 if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
3244 &subframe_len))
3245 goto out; 3236 goto out;
3246 3237
3238 /*
3239 * Pad out the previous subframe to a multiple of 4 by adding the
3240 * padding to the next one, that's being added. Note that head->len
3241 * is the length of the full A-MSDU, but that works since each time
3242 * we add a new subframe we pad out the previous one to a multiple
3243 * of 4 and thus it no longer matters in the next round.
3244 */
3245 hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
3246 if ((head->len - hdrlen) & 3)
3247 pad = 4 - ((head->len - hdrlen) & 3);
3248
3249 if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
3250 2 + pad))
3251 goto out_recalc;
3252
3247 ret = true; 3253 ret = true;
3248 data = skb_push(skb, ETH_ALEN + 2); 3254 data = skb_push(skb, ETH_ALEN + 2);
3249 memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN); 3255 memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3259,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3253 memcpy(data, &len, 2); 3259 memcpy(data, &len, 2);
3254 memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header)); 3260 memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
3255 3261
3262 memset(skb_push(skb, pad), 0, pad);
3263
3256 head->len += skb->len; 3264 head->len += skb->len;
3257 head->data_len += skb->len; 3265 head->data_len += skb->len;
3258 *frag_tail = skb; 3266 *frag_tail = skb;
3259 3267
3260 flow->backlog += head->len - orig_len; 3268out_recalc:
3261 tin->backlog_bytes += head->len - orig_len; 3269 if (head->len != orig_len) {
3262 3270 flow->backlog += head->len - orig_len;
3263 fq_recalc_backlog(fq, tin, flow); 3271 tin->backlog_bytes += head->len - orig_len;
3264 3272
3273 fq_recalc_backlog(fq, tin, flow);
3274 }
3265out: 3275out:
3266 spin_unlock_bh(&fq->lock); 3276 spin_unlock_bh(&fq->lock);
3267 3277
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 88efda7c9f8a..716cd6442d86 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
1135{ 1135{
1136 struct ieee80211_chanctx_conf *chanctx_conf; 1136 struct ieee80211_chanctx_conf *chanctx_conf;
1137 const struct ieee80211_reg_rule *rrule; 1137 const struct ieee80211_reg_rule *rrule;
1138 struct ieee80211_wmm_ac *wmm_ac; 1138 const struct ieee80211_wmm_ac *wmm_ac;
1139 u16 center_freq = 0; 1139 u16 center_freq = 0;
1140 1140
1141 if (sdata->vif.type != NL80211_IFTYPE_AP && 1141 if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
1154 1154
1155 rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); 1155 rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
1156 1156
1157 if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) { 1157 if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
1158 rcu_read_unlock(); 1158 rcu_read_unlock();
1159 return; 1159 return;
1160 } 1160 }
1161 1161
1162 if (sdata->vif.type == NL80211_IFTYPE_AP) 1162 if (sdata->vif.type == NL80211_IFTYPE_AP)
1163 wmm_ac = &rrule->wmm_rule->ap[ac]; 1163 wmm_ac = &rrule->wmm_rule.ap[ac];
1164 else 1164 else
1165 wmm_ac = &rrule->wmm_rule->client[ac]; 1165 wmm_ac = &rrule->wmm_rule.client[ac];
1166 qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); 1166 qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
1167 qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); 1167 qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
1168 qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); 1168 qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
1169 qparam->txop = !qparam->txop ? wmm_ac->cot / 32 : 1169 qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
1170 min_t(u16, qparam->txop, wmm_ac->cot / 32);
1171 rcu_read_unlock(); 1170 rcu_read_unlock();
1172} 1171}
1173 1172
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7a4de6d618b1..8fbe6cdbe255 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1533 unsigned int flags; 1533 unsigned int flags;
1534 1534
1535 if (event == NETDEV_REGISTER) { 1535 if (event == NETDEV_REGISTER) {
1536 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */ 1536
1537 /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
1538 * IPIP devices
1539 */
1537 if (dev->type == ARPHRD_ETHER || 1540 if (dev->type == ARPHRD_ETHER ||
1538 dev->type == ARPHRD_LOOPBACK || 1541 dev->type == ARPHRD_LOOPBACK ||
1539 dev->type == ARPHRD_IPGRE || 1542 dev->type == ARPHRD_IPGRE ||
1543 dev->type == ARPHRD_IP6GRE ||
1540 dev->type == ARPHRD_SIT || 1544 dev->type == ARPHRD_SIT ||
1541 dev->type == ARPHRD_TUNNEL) { 1545 dev->type == ARPHRD_TUNNEL) {
1542 mdev = mpls_add_dev(dev); 1546 mdev = mpls_add_dev(dev);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 82e6edf9c5d9..45f33d6dedf7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
100 bool found; 100 bool found;
101 int rc; 101 int rc;
102 102
103 if (id > ndp->package_num) { 103 if (id > ndp->package_num - 1) {
104 netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id); 104 netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
105 return -ENODEV; 105 return -ENODEV;
106 } 106 }
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
240 return 0; /* done */ 240 return 0; /* done */
241 241
242 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 242 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
243 &ncsi_genl_family, 0, NCSI_CMD_PKG_INFO); 243 &ncsi_genl_family, NLM_F_MULTI, NCSI_CMD_PKG_INFO);
244 if (!hdr) { 244 if (!hdr) {
245 rc = -EMSGSIZE; 245 rc = -EMSGSIZE;
246 goto err; 246 goto err;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 71709c104081..f61c306de1d0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
771 depends on NETFILTER_ADVANCED 771 depends on NETFILTER_ADVANCED
772 ---help--- 772 ---help---
773 This option adds a `CHECKSUM' target, which can be used in the iptables mangle 773 This option adds a `CHECKSUM' target, which can be used in the iptables mangle
774 table. 774 table to work around buggy DHCP clients in virtualized environments.
775 775
776 You can use this target to compute and fill in the checksum in 776 Some old DHCP clients drop packets because they are not aware
777 a packet that lacks a checksum. This is particularly useful, 777 that the checksum would normally be offloaded to hardware and
778 if you need to work around old applications such as dhcp clients, 778 thus should be considered valid.
779 that do not work well with checksum offloads, but don't want to disable 779 This target can be used to fill in the checksum using iptables
780 checksum offload in your device. 780 when such packets are sent via a virtual network device.
781 781
782 To compile it as a module, choose M here. If unsure, say N. 782 To compile it as a module, choose M here. If unsure, say N.
783 783
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 9f14b0df6960..51c5d7eec0a3 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
776}; 776};
777#endif 777#endif
778 778
779static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
780{
781 u8 nfproto = (unsigned long)_nfproto;
782
783 if (nf_ct_l3num(ct) != nfproto)
784 return 0;
785
786 if (nf_ct_protonum(ct) == IPPROTO_TCP &&
787 ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
788 ct->proto.tcp.seen[0].td_maxwin = 0;
789 ct->proto.tcp.seen[1].td_maxwin = 0;
790 }
791
792 return 0;
793}
794
779static int nf_ct_netns_do_get(struct net *net, u8 nfproto) 795static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
780{ 796{
781 struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id); 797 struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
798 bool fixup_needed = false;
782 int err = 0; 799 int err = 0;
783 800
784 mutex_lock(&nf_ct_proto_mutex); 801 mutex_lock(&nf_ct_proto_mutex);
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
798 ARRAY_SIZE(ipv4_conntrack_ops)); 815 ARRAY_SIZE(ipv4_conntrack_ops));
799 if (err) 816 if (err)
800 cnet->users4 = 0; 817 cnet->users4 = 0;
818 else
819 fixup_needed = true;
801 break; 820 break;
802#if IS_ENABLED(CONFIG_IPV6) 821#if IS_ENABLED(CONFIG_IPV6)
803 case NFPROTO_IPV6: 822 case NFPROTO_IPV6:
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
814 ARRAY_SIZE(ipv6_conntrack_ops)); 833 ARRAY_SIZE(ipv6_conntrack_ops));
815 if (err) 834 if (err)
816 cnet->users6 = 0; 835 cnet->users6 = 0;
836 else
837 fixup_needed = true;
817 break; 838 break;
818#endif 839#endif
819 default: 840 default:
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
822 } 843 }
823 out_unlock: 844 out_unlock:
824 mutex_unlock(&nf_ct_proto_mutex); 845 mutex_unlock(&nf_ct_proto_mutex);
846
847 if (fixup_needed)
848 nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
849 (void *)(unsigned long)nfproto, 0, 0);
850
825 return err; 851 return err;
826} 852}
827 853
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 8c58f96b59e7..f3f91ed2c21a 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
675} 675}
676#endif 676#endif
677 677
678#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 678#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
679 679
680#include <linux/netfilter/nfnetlink.h> 680#include <linux/netfilter/nfnetlink.h>
681#include <linux/netfilter/nfnetlink_cttimeout.h> 681#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
697 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; 697 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
698 } 698 }
699 } 699 }
700
701 timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
700 return 0; 702 return 0;
701} 703}
702 704
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
726 [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 }, 728 [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
727 [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 }, 729 [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
728}; 730};
729#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 731#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
730 732
731#ifdef CONFIG_SYSCTL 733#ifdef CONFIG_SYSCTL
732/* template, data assigned later */ 734/* template, data assigned later */
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
827 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ; 829 dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
828 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ; 830 dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
829 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL; 831 dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
832
833 /* timeouts[0] is unused, make it same as SYN_SENT so
834 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
835 */
836 dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
830 } 837 }
831 838
832 return dccp_kmemdup_sysctl_table(net, pn, dn); 839 return dccp_kmemdup_sysctl_table(net, pn, dn);
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
856 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 863 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
857 .nla_policy = nf_ct_port_nla_policy, 864 .nla_policy = nf_ct_port_nla_policy,
858#endif 865#endif
859#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 866#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
860 .ctnl_timeout = { 867 .ctnl_timeout = {
861 .nlattr_to_obj = dccp_timeout_nlattr_to_obj, 868 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
862 .obj_to_nlattr = dccp_timeout_obj_to_nlattr, 869 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
864 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, 871 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
865 .nla_policy = dccp_timeout_nla_policy, 872 .nla_policy = dccp_timeout_nla_policy,
866 }, 873 },
867#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 874#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
868 .init_net = dccp_init_net, 875 .init_net = dccp_init_net,
869 .get_net_proto = dccp_get_net_proto, 876 .get_net_proto = dccp_get_net_proto,
870}; 877};
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
889 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 896 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
890 .nla_policy = nf_ct_port_nla_policy, 897 .nla_policy = nf_ct_port_nla_policy,
891#endif 898#endif
892#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 899#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
893 .ctnl_timeout = { 900 .ctnl_timeout = {
894 .nlattr_to_obj = dccp_timeout_nlattr_to_obj, 901 .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
895 .obj_to_nlattr = dccp_timeout_obj_to_nlattr, 902 .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
897 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX, 904 .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
898 .nla_policy = dccp_timeout_nla_policy, 905 .nla_policy = dccp_timeout_nla_policy,
899 }, 906 },
900#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 907#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
901 .init_net = dccp_init_net, 908 .init_net = dccp_init_net,
902 .get_net_proto = dccp_get_net_proto, 909 .get_net_proto = dccp_get_net_proto,
903}; 910};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index ac4a0b296dcd..1df3244ecd07 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
70 return ret; 70 return ret;
71} 71}
72 72
73#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 73#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
74 74
75#include <linux/netfilter/nfnetlink.h> 75#include <linux/netfilter/nfnetlink.h>
76#include <linux/netfilter/nfnetlink_cttimeout.h> 76#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -113,7 +113,7 @@ static const struct nla_policy
113generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = { 113generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
114 [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 }, 114 [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
115}; 115};
116#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 116#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
117 117
118#ifdef CONFIG_SYSCTL 118#ifdef CONFIG_SYSCTL
119static struct ctl_table generic_sysctl_table[] = { 119static struct ctl_table generic_sysctl_table[] = {
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
164 .pkt_to_tuple = generic_pkt_to_tuple, 164 .pkt_to_tuple = generic_pkt_to_tuple,
165 .packet = generic_packet, 165 .packet = generic_packet,
166 .new = generic_new, 166 .new = generic_new,
167#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 167#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
168 .ctnl_timeout = { 168 .ctnl_timeout = {
169 .nlattr_to_obj = generic_timeout_nlattr_to_obj, 169 .nlattr_to_obj = generic_timeout_nlattr_to_obj,
170 .obj_to_nlattr = generic_timeout_obj_to_nlattr, 170 .obj_to_nlattr = generic_timeout_obj_to_nlattr,
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
172 .obj_size = sizeof(unsigned int), 172 .obj_size = sizeof(unsigned int),
173 .nla_policy = generic_timeout_nla_policy, 173 .nla_policy = generic_timeout_nla_policy,
174 }, 174 },
175#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 175#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
176 .init_net = generic_init_net, 176 .init_net = generic_init_net,
177 .get_net_proto = generic_get_net_proto, 177 .get_net_proto = generic_get_net_proto,
178}; 178};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d1632252bf5b..650eb4fba2c5 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
285 nf_ct_gre_keymap_destroy(master); 285 nf_ct_gre_keymap_destroy(master);
286} 286}
287 287
288#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 288#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
289 289
290#include <linux/netfilter/nfnetlink.h> 290#include <linux/netfilter/nfnetlink.h>
291#include <linux/netfilter/nfnetlink_cttimeout.h> 291#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
334 [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 }, 334 [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
335 [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 }, 335 [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
336}; 336};
337#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 337#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
338 338
339static int gre_init_net(struct net *net, u_int16_t proto) 339static int gre_init_net(struct net *net, u_int16_t proto)
340{ 340{
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
367 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 367 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
368 .nla_policy = nf_ct_port_nla_policy, 368 .nla_policy = nf_ct_port_nla_policy,
369#endif 369#endif
370#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 370#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
371 .ctnl_timeout = { 371 .ctnl_timeout = {
372 .nlattr_to_obj = gre_timeout_nlattr_to_obj, 372 .nlattr_to_obj = gre_timeout_nlattr_to_obj,
373 .obj_to_nlattr = gre_timeout_obj_to_nlattr, 373 .obj_to_nlattr = gre_timeout_obj_to_nlattr,
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
375 .obj_size = sizeof(unsigned int) * GRE_CT_MAX, 375 .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
376 .nla_policy = gre_timeout_nla_policy, 376 .nla_policy = gre_timeout_nla_policy,
377 }, 377 },
378#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 378#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
379 .net_id = &proto_gre_net_id, 379 .net_id = &proto_gre_net_id,
380 .init_net = gre_init_net, 380 .init_net = gre_init_net,
381}; 381};
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 036670b38282..43c7e1a217b9 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
273} 273}
274#endif 274#endif
275 275
276#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 276#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
277 277
278#include <linux/netfilter/nfnetlink.h> 278#include <linux/netfilter/nfnetlink.h>
279#include <linux/netfilter/nfnetlink_cttimeout.h> 279#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -313,7 +313,7 @@ static const struct nla_policy
313icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { 313icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
314 [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, 314 [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
315}; 315};
316#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 316#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
317 317
318#ifdef CONFIG_SYSCTL 318#ifdef CONFIG_SYSCTL
319static struct ctl_table icmp_sysctl_table[] = { 319static struct ctl_table icmp_sysctl_table[] = {
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
374 .nlattr_to_tuple = icmp_nlattr_to_tuple, 374 .nlattr_to_tuple = icmp_nlattr_to_tuple,
375 .nla_policy = icmp_nla_policy, 375 .nla_policy = icmp_nla_policy,
376#endif 376#endif
377#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 377#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
378 .ctnl_timeout = { 378 .ctnl_timeout = {
379 .nlattr_to_obj = icmp_timeout_nlattr_to_obj, 379 .nlattr_to_obj = icmp_timeout_nlattr_to_obj,
380 .obj_to_nlattr = icmp_timeout_obj_to_nlattr, 380 .obj_to_nlattr = icmp_timeout_obj_to_nlattr,
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
382 .obj_size = sizeof(unsigned int), 382 .obj_size = sizeof(unsigned int),
383 .nla_policy = icmp_timeout_nla_policy, 383 .nla_policy = icmp_timeout_nla_policy,
384 }, 384 },
385#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 385#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
386 .init_net = icmp_init_net, 386 .init_net = icmp_init_net,
387 .get_net_proto = icmp_get_net_proto, 387 .get_net_proto = icmp_get_net_proto,
388}; 388};
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index bed07b998a10..97e40f77d678 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
274} 274}
275#endif 275#endif
276 276
277#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 277#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
278 278
279#include <linux/netfilter/nfnetlink.h> 279#include <linux/netfilter/nfnetlink.h>
280#include <linux/netfilter/nfnetlink_cttimeout.h> 280#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -314,7 +314,7 @@ static const struct nla_policy
314icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = { 314icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
315 [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 }, 315 [CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
316}; 316};
317#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 317#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
318 318
319#ifdef CONFIG_SYSCTL 319#ifdef CONFIG_SYSCTL
320static struct ctl_table icmpv6_sysctl_table[] = { 320static struct ctl_table icmpv6_sysctl_table[] = {
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
373 .nlattr_to_tuple = icmpv6_nlattr_to_tuple, 373 .nlattr_to_tuple = icmpv6_nlattr_to_tuple,
374 .nla_policy = icmpv6_nla_policy, 374 .nla_policy = icmpv6_nla_policy,
375#endif 375#endif
376#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 376#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
377 .ctnl_timeout = { 377 .ctnl_timeout = {
378 .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj, 378 .nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
379 .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr, 379 .obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
381 .obj_size = sizeof(unsigned int), 381 .obj_size = sizeof(unsigned int),
382 .nla_policy = icmpv6_timeout_nla_policy, 382 .nla_policy = icmpv6_timeout_nla_policy,
383 }, 383 },
384#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 384#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
385 .init_net = icmpv6_init_net, 385 .init_net = icmpv6_init_net,
386 .get_net_proto = icmpv6_get_net_proto, 386 .get_net_proto = icmpv6_get_net_proto,
387}; 387};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 8d1e085fc14a..e4d738d34cd0 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
591} 591}
592#endif 592#endif
593 593
594#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 594#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
595 595
596#include <linux/netfilter/nfnetlink.h> 596#include <linux/netfilter/nfnetlink.h>
597#include <linux/netfilter/nfnetlink_cttimeout.h> 597#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
613 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ; 613 timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
614 } 614 }
615 } 615 }
616
617 timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
616 return 0; 618 return 0;
617} 619}
618 620
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
644 [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, 646 [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
645 [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, 647 [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
646}; 648};
647#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 649#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
648 650
649 651
650#ifdef CONFIG_SYSCTL 652#ifdef CONFIG_SYSCTL
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
743 745
744 for (i = 0; i < SCTP_CONNTRACK_MAX; i++) 746 for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
745 sn->timeouts[i] = sctp_timeouts[i]; 747 sn->timeouts[i] = sctp_timeouts[i];
748
749 /* timeouts[0] is unused, init it so ->timeouts[0] contains
750 * 'new' timeout, like udp or icmp.
751 */
752 sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
746 } 753 }
747 754
748 return sctp_kmemdup_sysctl_table(pn, sn); 755 return sctp_kmemdup_sysctl_table(pn, sn);
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
773 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 780 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
774 .nla_policy = nf_ct_port_nla_policy, 781 .nla_policy = nf_ct_port_nla_policy,
775#endif 782#endif
776#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 783#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
777 .ctnl_timeout = { 784 .ctnl_timeout = {
778 .nlattr_to_obj = sctp_timeout_nlattr_to_obj, 785 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
779 .obj_to_nlattr = sctp_timeout_obj_to_nlattr, 786 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
781 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, 788 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
782 .nla_policy = sctp_timeout_nla_policy, 789 .nla_policy = sctp_timeout_nla_policy,
783 }, 790 },
784#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 791#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
785 .init_net = sctp_init_net, 792 .init_net = sctp_init_net,
786 .get_net_proto = sctp_get_net_proto, 793 .get_net_proto = sctp_get_net_proto,
787}; 794};
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
806 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 813 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
807 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, 814 .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
808 .nla_policy = nf_ct_port_nla_policy, 815 .nla_policy = nf_ct_port_nla_policy,
809#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 816#endif
817#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
810 .ctnl_timeout = { 818 .ctnl_timeout = {
811 .nlattr_to_obj = sctp_timeout_nlattr_to_obj, 819 .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
812 .obj_to_nlattr = sctp_timeout_obj_to_nlattr, 820 .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
814 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX, 822 .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
815 .nla_policy = sctp_timeout_nla_policy, 823 .nla_policy = sctp_timeout_nla_policy,
816 }, 824 },
817#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 825#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
818#endif
819 .init_net = sctp_init_net, 826 .init_net = sctp_init_net,
820 .get_net_proto = sctp_get_net_proto, 827 .get_net_proto = sctp_get_net_proto,
821}; 828};
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index d80d322b9d8b..247b89784a6f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1213#define TCP_NLATTR_SIZE ( \ 1213#define TCP_NLATTR_SIZE ( \
1214 NLA_ALIGN(NLA_HDRLEN + 1) + \ 1214 NLA_ALIGN(NLA_HDRLEN + 1) + \
1215 NLA_ALIGN(NLA_HDRLEN + 1) + \ 1215 NLA_ALIGN(NLA_HDRLEN + 1) + \
1216 NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \ 1216 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1217 NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags)))) 1217 NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1218 1218
1219static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) 1219static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1220{ 1220{
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
1279} 1279}
1280#endif 1280#endif
1281 1281
1282#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1282#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1283 1283
1284#include <linux/netfilter/nfnetlink.h> 1284#include <linux/netfilter/nfnetlink.h>
1285#include <linux/netfilter/nfnetlink_cttimeout.h> 1285#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1301 timeouts[TCP_CONNTRACK_SYN_SENT] = 1301 timeouts[TCP_CONNTRACK_SYN_SENT] =
1302 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ; 1302 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1303 } 1303 }
1304
1304 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) { 1305 if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1305 timeouts[TCP_CONNTRACK_SYN_RECV] = 1306 timeouts[TCP_CONNTRACK_SYN_RECV] =
1306 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ; 1307 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1341 timeouts[TCP_CONNTRACK_UNACK] = 1342 timeouts[TCP_CONNTRACK_UNACK] =
1342 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ; 1343 ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1343 } 1344 }
1345
1346 timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1344 return 0; 1347 return 0;
1345} 1348}
1346 1349
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1391 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 }, 1394 [CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
1392 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 }, 1395 [CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
1393}; 1396};
1394#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1397#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1395 1398
1396#ifdef CONFIG_SYSCTL 1399#ifdef CONFIG_SYSCTL
1397static struct ctl_table tcp_sysctl_table[] = { 1400static struct ctl_table tcp_sysctl_table[] = {
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
1518 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++) 1521 for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1519 tn->timeouts[i] = tcp_timeouts[i]; 1522 tn->timeouts[i] = tcp_timeouts[i];
1520 1523
1524 /* timeouts[0] is unused, make it same as SYN_SENT so
1525 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1526 */
1527 tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1521 tn->tcp_loose = nf_ct_tcp_loose; 1528 tn->tcp_loose = nf_ct_tcp_loose;
1522 tn->tcp_be_liberal = nf_ct_tcp_be_liberal; 1529 tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1523 tn->tcp_max_retrans = nf_ct_tcp_max_retrans; 1530 tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
1551 .nlattr_size = TCP_NLATTR_SIZE, 1558 .nlattr_size = TCP_NLATTR_SIZE,
1552 .nla_policy = nf_ct_port_nla_policy, 1559 .nla_policy = nf_ct_port_nla_policy,
1553#endif 1560#endif
1554#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1561#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1555 .ctnl_timeout = { 1562 .ctnl_timeout = {
1556 .nlattr_to_obj = tcp_timeout_nlattr_to_obj, 1563 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1557 .obj_to_nlattr = tcp_timeout_obj_to_nlattr, 1564 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
1560 TCP_CONNTRACK_TIMEOUT_MAX, 1567 TCP_CONNTRACK_TIMEOUT_MAX,
1561 .nla_policy = tcp_timeout_nla_policy, 1568 .nla_policy = tcp_timeout_nla_policy,
1562 }, 1569 },
1563#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1570#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1564 .init_net = tcp_init_net, 1571 .init_net = tcp_init_net,
1565 .get_net_proto = tcp_get_net_proto, 1572 .get_net_proto = tcp_get_net_proto,
1566}; 1573};
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
1586 .nlattr_tuple_size = tcp_nlattr_tuple_size, 1593 .nlattr_tuple_size = tcp_nlattr_tuple_size,
1587 .nla_policy = nf_ct_port_nla_policy, 1594 .nla_policy = nf_ct_port_nla_policy,
1588#endif 1595#endif
1589#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 1596#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1590 .ctnl_timeout = { 1597 .ctnl_timeout = {
1591 .nlattr_to_obj = tcp_timeout_nlattr_to_obj, 1598 .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
1592 .obj_to_nlattr = tcp_timeout_obj_to_nlattr, 1599 .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
1595 TCP_CONNTRACK_TIMEOUT_MAX, 1602 TCP_CONNTRACK_TIMEOUT_MAX,
1596 .nla_policy = tcp_timeout_nla_policy, 1603 .nla_policy = tcp_timeout_nla_policy,
1597 }, 1604 },
1598#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 1605#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1599 .init_net = tcp_init_net, 1606 .init_net = tcp_init_net,
1600 .get_net_proto = tcp_get_net_proto, 1607 .get_net_proto = tcp_get_net_proto,
1601}; 1608};
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7a1b8988a931..3065fb8ef91b 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
171 return NF_ACCEPT; 171 return NF_ACCEPT;
172} 172}
173 173
174#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 174#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
175 175
176#include <linux/netfilter/nfnetlink.h> 176#include <linux/netfilter/nfnetlink.h>
177#include <linux/netfilter/nfnetlink_cttimeout.h> 177#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
221 [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, 221 [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
222 [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, 222 [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
223}; 223};
224#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 224#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
225 225
226#ifdef CONFIG_SYSCTL 226#ifdef CONFIG_SYSCTL
227static struct ctl_table udp_sysctl_table[] = { 227static struct ctl_table udp_sysctl_table[] = {
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
292 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 292 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
293 .nla_policy = nf_ct_port_nla_policy, 293 .nla_policy = nf_ct_port_nla_policy,
294#endif 294#endif
295#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 295#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
296 .ctnl_timeout = { 296 .ctnl_timeout = {
297 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 297 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
298 .obj_to_nlattr = udp_timeout_obj_to_nlattr, 298 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
300 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 300 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
301 .nla_policy = udp_timeout_nla_policy, 301 .nla_policy = udp_timeout_nla_policy,
302 }, 302 },
303#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 303#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
304 .init_net = udp_init_net, 304 .init_net = udp_init_net,
305 .get_net_proto = udp_get_net_proto, 305 .get_net_proto = udp_get_net_proto,
306}; 306};
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
321 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 321 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
322 .nla_policy = nf_ct_port_nla_policy, 322 .nla_policy = nf_ct_port_nla_policy,
323#endif 323#endif
324#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 324#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
325 .ctnl_timeout = { 325 .ctnl_timeout = {
326 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 326 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
327 .obj_to_nlattr = udp_timeout_obj_to_nlattr, 327 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
329 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 329 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
330 .nla_policy = udp_timeout_nla_policy, 330 .nla_policy = udp_timeout_nla_policy,
331 }, 331 },
332#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 332#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
333 .init_net = udp_init_net, 333 .init_net = udp_init_net,
334 .get_net_proto = udp_get_net_proto, 334 .get_net_proto = udp_get_net_proto,
335}; 335};
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
350 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 350 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
351 .nla_policy = nf_ct_port_nla_policy, 351 .nla_policy = nf_ct_port_nla_policy,
352#endif 352#endif
353#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 353#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
354 .ctnl_timeout = { 354 .ctnl_timeout = {
355 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 355 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
356 .obj_to_nlattr = udp_timeout_obj_to_nlattr, 356 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
358 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 358 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
359 .nla_policy = udp_timeout_nla_policy, 359 .nla_policy = udp_timeout_nla_policy,
360 }, 360 },
361#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 361#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
362 .init_net = udp_init_net, 362 .init_net = udp_init_net,
363 .get_net_proto = udp_get_net_proto, 363 .get_net_proto = udp_get_net_proto,
364}; 364};
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
379 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, 379 .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
380 .nla_policy = nf_ct_port_nla_policy, 380 .nla_policy = nf_ct_port_nla_policy,
381#endif 381#endif
382#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) 382#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
383 .ctnl_timeout = { 383 .ctnl_timeout = {
384 .nlattr_to_obj = udp_timeout_nlattr_to_obj, 384 .nlattr_to_obj = udp_timeout_nlattr_to_obj,
385 .obj_to_nlattr = udp_timeout_obj_to_nlattr, 385 .obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
387 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, 387 .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
388 .nla_policy = udp_timeout_nla_policy, 388 .nla_policy = udp_timeout_nla_policy,
389 }, 389 },
390#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ 390#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
391 .init_net = udp_init_net, 391 .init_net = udp_init_net,
392 .get_net_proto = udp_get_net_proto, 392 .get_net_proto = udp_get_net_proto,
393}; 393};
394EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6); 394EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
395#endif 395#endif
396#include <net/netfilter/nf_conntrack_timeout.h>
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1dca5683f59f..2cfb173cd0b2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
4637 } 4637 }
4638 set->ndeact++; 4638 set->ndeact++;
4639 4639
4640 nft_set_elem_deactivate(ctx->net, set, elem);
4640 nft_trans_elem_set(trans) = set; 4641 nft_trans_elem_set(trans) = set;
4641 nft_trans_elem(trans) = *elem; 4642 nft_trans_elem(trans) = *elem;
4642 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 4643 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index d46a236cdf31..a30f8ba4b89a 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -489,8 +489,8 @@ err:
489 return err; 489 return err;
490} 490}
491 491
492static struct ctnl_timeout * 492static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
493ctnl_timeout_find_get(struct net *net, const char *name) 493 const char *name)
494{ 494{
495 struct ctnl_timeout *timeout, *matching = NULL; 495 struct ctnl_timeout *timeout, *matching = NULL;
496 496
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
509 break; 509 break;
510 } 510 }
511err: 511err:
512 return matching; 512 return matching ? &matching->timeout : NULL;
513} 513}
514 514
515static void ctnl_timeout_put(struct nf_ct_timeout *t) 515static void ctnl_timeout_put(struct nf_ct_timeout *t)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index ea4ba551abb2..d33094f4ec41 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
233 int err; 233 int err;
234 234
235 if (verdict == NF_ACCEPT || 235 if (verdict == NF_ACCEPT ||
236 verdict == NF_REPEAT ||
236 verdict == NF_STOP) { 237 verdict == NF_STOP) {
237 rcu_read_lock(); 238 rcu_read_lock();
238 ct_hook = rcu_dereference(nf_ct_hook); 239 ct_hook = rcu_dereference(nf_ct_hook);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 26a8baebd072..5dd87748afa8 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -799,7 +799,7 @@ err:
799} 799}
800 800
801struct nft_ct_timeout_obj { 801struct nft_ct_timeout_obj {
802 struct nf_conn *tmpl; 802 struct nf_ct_timeout *timeout;
803 u8 l4proto; 803 u8 l4proto;
804}; 804};
805 805
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
809{ 809{
810 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 810 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
811 struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); 811 struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
812 struct sk_buff *skb = pkt->skb; 812 struct nf_conn_timeout *timeout;
813 const unsigned int *values;
814
815 if (priv->l4proto != pkt->tprot)
816 return;
813 817
814 if (ct || 818 if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
815 priv->l4proto != pkt->tprot)
816 return; 819 return;
817 820
818 nf_ct_set(skb, priv->tmpl, IP_CT_NEW); 821 timeout = nf_ct_timeout_find(ct);
822 if (!timeout) {
823 timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
824 if (!timeout) {
825 regs->verdict.code = NF_DROP;
826 return;
827 }
828 }
829
830 rcu_assign_pointer(timeout->timeout, priv->timeout);
831
832 /* adjust the timeout as per 'new' state. ct is unconfirmed,
833 * so the current timestamp must not be added.
834 */
835 values = nf_ct_timeout_data(timeout);
836 if (values)
837 nf_ct_refresh(ct, pkt->skb, values[0]);
819} 838}
820 839
821static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx, 840static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
822 const struct nlattr * const tb[], 841 const struct nlattr * const tb[],
823 struct nft_object *obj) 842 struct nft_object *obj)
824{ 843{
825 const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
826 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 844 struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
827 const struct nf_conntrack_l4proto *l4proto; 845 const struct nf_conntrack_l4proto *l4proto;
828 struct nf_conn_timeout *timeout_ext;
829 struct nf_ct_timeout *timeout; 846 struct nf_ct_timeout *timeout;
830 int l3num = ctx->family; 847 int l3num = ctx->family;
831 struct nf_conn *tmpl;
832 __u8 l4num; 848 __u8 l4num;
833 int ret; 849 int ret;
834 850
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
863 879
864 timeout->l3num = l3num; 880 timeout->l3num = l3num;
865 timeout->l4proto = l4proto; 881 timeout->l4proto = l4proto;
866 tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
867 if (!tmpl) {
868 ret = -ENOMEM;
869 goto err_free_timeout;
870 }
871
872 timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
873 if (!timeout_ext) {
874 ret = -ENOMEM;
875 goto err_free_tmpl;
876 }
877 882
878 ret = nf_ct_netns_get(ctx->net, ctx->family); 883 ret = nf_ct_netns_get(ctx->net, ctx->family);
879 if (ret < 0) 884 if (ret < 0)
880 goto err_free_tmpl; 885 goto err_free_timeout;
881
882 priv->tmpl = tmpl;
883 886
887 priv->timeout = timeout;
884 return 0; 888 return 0;
885 889
886err_free_tmpl:
887 nf_ct_tmpl_free(tmpl);
888err_free_timeout: 890err_free_timeout:
889 kfree(timeout); 891 kfree(timeout);
890err_proto_put: 892err_proto_put:
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
896 struct nft_object *obj) 898 struct nft_object *obj)
897{ 899{
898 struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 900 struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
899 struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); 901 struct nf_ct_timeout *timeout = priv->timeout;
900 struct nf_ct_timeout *timeout;
901 902
902 timeout = rcu_dereference_raw(t->timeout);
903 nf_ct_untimeout(ctx->net, timeout); 903 nf_ct_untimeout(ctx->net, timeout);
904 nf_ct_l4proto_put(timeout->l4proto); 904 nf_ct_l4proto_put(timeout->l4proto);
905 nf_ct_netns_put(ctx->net, ctx->family); 905 nf_ct_netns_put(ctx->net, ctx->family);
906 nf_ct_tmpl_free(priv->tmpl); 906 kfree(priv->timeout);
907} 907}
908 908
909static int nft_ct_timeout_obj_dump(struct sk_buff *skb, 909static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
910 struct nft_object *obj, bool reset) 910 struct nft_object *obj, bool reset)
911{ 911{
912 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj); 912 const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
913 const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl); 913 const struct nf_ct_timeout *timeout = priv->timeout;
914 const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
915 struct nlattr *nest_params; 914 struct nlattr *nest_params;
916 int ret; 915 int ret;
917 916
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 5af74b37f423..a35fb59ace73 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
49 49
50 priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]); 50 priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
51 err = nft_validate_register_store(ctx, priv->dreg, NULL, 51 err = nft_validate_register_store(ctx, priv->dreg, NULL,
52 NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN); 52 NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
53 if (err < 0) 53 if (err < 0)
54 return err; 54 return err;
55 55
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 55e2d9215c0d..0e5ec126f6ad 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -355,12 +355,11 @@ cont:
355 355
356static void nft_rbtree_gc(struct work_struct *work) 356static void nft_rbtree_gc(struct work_struct *work)
357{ 357{
358 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
358 struct nft_set_gc_batch *gcb = NULL; 359 struct nft_set_gc_batch *gcb = NULL;
359 struct rb_node *node, *prev = NULL;
360 struct nft_rbtree_elem *rbe;
361 struct nft_rbtree *priv; 360 struct nft_rbtree *priv;
361 struct rb_node *node;
362 struct nft_set *set; 362 struct nft_set *set;
363 int i;
364 363
365 priv = container_of(work, struct nft_rbtree, gc_work.work); 364 priv = container_of(work, struct nft_rbtree, gc_work.work);
366 set = nft_set_container_of(priv); 365 set = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
371 rbe = rb_entry(node, struct nft_rbtree_elem, node); 370 rbe = rb_entry(node, struct nft_rbtree_elem, node);
372 371
373 if (nft_rbtree_interval_end(rbe)) { 372 if (nft_rbtree_interval_end(rbe)) {
374 prev = node; 373 rbe_end = rbe;
375 continue; 374 continue;
376 } 375 }
377 if (!nft_set_elem_expired(&rbe->ext)) 376 if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
379 if (nft_set_elem_mark_busy(&rbe->ext)) 378 if (nft_set_elem_mark_busy(&rbe->ext))
380 continue; 379 continue;
381 380
381 if (rbe_prev) {
382 rb_erase(&rbe_prev->node, &priv->root);
383 rbe_prev = NULL;
384 }
382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); 385 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
383 if (!gcb) 386 if (!gcb)
384 break; 387 break;
385 388
386 atomic_dec(&set->nelems); 389 atomic_dec(&set->nelems);
387 nft_set_gc_batch_add(gcb, rbe); 390 nft_set_gc_batch_add(gcb, rbe);
391 rbe_prev = rbe;
388 392
389 if (prev) { 393 if (rbe_end) {
390 rbe = rb_entry(prev, struct nft_rbtree_elem, node);
391 atomic_dec(&set->nelems); 394 atomic_dec(&set->nelems);
392 nft_set_gc_batch_add(gcb, rbe); 395 nft_set_gc_batch_add(gcb, rbe_end);
393 prev = NULL; 396 rb_erase(&rbe_end->node, &priv->root);
397 rbe_end = NULL;
394 } 398 }
395 node = rb_next(node); 399 node = rb_next(node);
396 if (!node) 400 if (!node)
397 break; 401 break;
398 } 402 }
399 if (gcb) { 403 if (rbe_prev)
400 for (i = 0; i < gcb->head.cnt; i++) { 404 rb_erase(&rbe_prev->node, &priv->root);
401 rbe = gcb->elems[i];
402 rb_erase(&rbe->node, &priv->root);
403 }
404 }
405 write_seqcount_end(&priv->count); 405 write_seqcount_end(&priv->count);
406 write_unlock_bh(&priv->lock); 406 write_unlock_bh(&priv->lock);
407 407
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
index 9f4151ec3e06..6c7aa6a0a0d2 100644
--- a/net/netfilter/xt_CHECKSUM.c
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -16,6 +16,9 @@
16#include <linux/netfilter/x_tables.h> 16#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter/xt_CHECKSUM.h> 17#include <linux/netfilter/xt_CHECKSUM.h>
18 18
19#include <linux/netfilter_ipv4/ip_tables.h>
20#include <linux/netfilter_ipv6/ip6_tables.h>
21
19MODULE_LICENSE("GPL"); 22MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>"); 23MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
21MODULE_DESCRIPTION("Xtables: checksum modification"); 24MODULE_DESCRIPTION("Xtables: checksum modification");
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
25static unsigned int 28static unsigned int
26checksum_tg(struct sk_buff *skb, const struct xt_action_param *par) 29checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
27{ 30{
28 if (skb->ip_summed == CHECKSUM_PARTIAL) 31 if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
29 skb_checksum_help(skb); 32 skb_checksum_help(skb);
30 33
31 return XT_CONTINUE; 34 return XT_CONTINUE;
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
34static int checksum_tg_check(const struct xt_tgchk_param *par) 37static int checksum_tg_check(const struct xt_tgchk_param *par)
35{ 38{
36 const struct xt_CHECKSUM_info *einfo = par->targinfo; 39 const struct xt_CHECKSUM_info *einfo = par->targinfo;
40 const struct ip6t_ip6 *i6 = par->entryinfo;
41 const struct ipt_ip *i4 = par->entryinfo;
37 42
38 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) { 43 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
39 pr_info_ratelimited("unsupported CHECKSUM operation %x\n", 44 pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
43 if (!einfo->operation) 48 if (!einfo->operation)
44 return -EINVAL; 49 return -EINVAL;
45 50
51 switch (par->family) {
52 case NFPROTO_IPV4:
53 if (i4->proto == IPPROTO_UDP &&
54 (i4->invflags & XT_INV_PROTO) == 0)
55 return 0;
56 break;
57 case NFPROTO_IPV6:
58 if ((i6->flags & IP6T_F_PROTO) &&
59 i6->proto == IPPROTO_UDP &&
60 (i6->invflags & XT_INV_PROTO) == 0)
61 return 0;
62 break;
63 }
64
65 pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
46 return 0; 66 return 0;
47} 67}
48 68
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index dfbdbb2fc0ed..51d0c257e7a5 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
125static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) 125static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
126{ 126{
127 struct xt_cluster_match_info *info = par->matchinfo; 127 struct xt_cluster_match_info *info = par->matchinfo;
128 int ret;
128 129
129 if (info->total_nodes > XT_CLUSTER_NODES_MAX) { 130 if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
130 pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n", 131 pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
135 pr_info_ratelimited("node mask cannot exceed total number of nodes\n"); 136 pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
136 return -EDOM; 137 return -EDOM;
137 } 138 }
138 return 0; 139
140 ret = nf_ct_netns_get(par->net, par->family);
141 if (ret < 0)
142 pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
143 par->family);
144 return ret;
145}
146
147static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
148{
149 nf_ct_netns_put(par->net, par->family);
139} 150}
140 151
141static struct xt_match xt_cluster_match __read_mostly = { 152static struct xt_match xt_cluster_match __read_mostly = {
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
144 .match = xt_cluster_mt, 155 .match = xt_cluster_mt,
145 .checkentry = xt_cluster_mt_checkentry, 156 .checkentry = xt_cluster_mt_checkentry,
146 .matchsize = sizeof(struct xt_cluster_match_info), 157 .matchsize = sizeof(struct xt_cluster_match_info),
158 .destroy = xt_cluster_mt_destroy,
147 .me = THIS_MODULE, 159 .me = THIS_MODULE,
148}; 160};
149 161
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9b16402f29af..3e7d259e5d8d 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
1057static void *dl_seq_start(struct seq_file *s, loff_t *pos) 1057static void *dl_seq_start(struct seq_file *s, loff_t *pos)
1058 __acquires(htable->lock) 1058 __acquires(htable->lock)
1059{ 1059{
1060 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1060 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1061 unsigned int *bucket; 1061 unsigned int *bucket;
1062 1062
1063 spin_lock_bh(&htable->lock); 1063 spin_lock_bh(&htable->lock);
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
1074 1074
1075static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos) 1075static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
1076{ 1076{
1077 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1077 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1078 unsigned int *bucket = v; 1078 unsigned int *bucket = v;
1079 1079
1080 *pos = ++(*bucket); 1080 *pos = ++(*bucket);
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
1088static void dl_seq_stop(struct seq_file *s, void *v) 1088static void dl_seq_stop(struct seq_file *s, void *v)
1089 __releases(htable->lock) 1089 __releases(htable->lock)
1090{ 1090{
1091 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1091 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1092 unsigned int *bucket = v; 1092 unsigned int *bucket = v;
1093 1093
1094 if (!IS_ERR(bucket)) 1094 if (!IS_ERR(bucket))
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
1130static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, 1130static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
1131 struct seq_file *s) 1131 struct seq_file *s)
1132{ 1132{
1133 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1133 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
1134 1134
1135 spin_lock(&ent->lock); 1135 spin_lock(&ent->lock);
1136 /* recalculate to show accurate numbers */ 1136 /* recalculate to show accurate numbers */
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
1145static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, 1145static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
1146 struct seq_file *s) 1146 struct seq_file *s)
1147{ 1147{
1148 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1148 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
1149 1149
1150 spin_lock(&ent->lock); 1150 spin_lock(&ent->lock);
1151 /* recalculate to show accurate numbers */ 1151 /* recalculate to show accurate numbers */
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
1160static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, 1160static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
1161 struct seq_file *s) 1161 struct seq_file *s)
1162{ 1162{
1163 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private)); 1163 struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
1164 1164
1165 spin_lock(&ent->lock); 1165 spin_lock(&ent->lock);
1166 /* recalculate to show accurate numbers */ 1166 /* recalculate to show accurate numbers */
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
1174 1174
1175static int dl_seq_show_v2(struct seq_file *s, void *v) 1175static int dl_seq_show_v2(struct seq_file *s, void *v)
1176{ 1176{
1177 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1177 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1178 unsigned int *bucket = (unsigned int *)v; 1178 unsigned int *bucket = (unsigned int *)v;
1179 struct dsthash_ent *ent; 1179 struct dsthash_ent *ent;
1180 1180
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
1188 1188
1189static int dl_seq_show_v1(struct seq_file *s, void *v) 1189static int dl_seq_show_v1(struct seq_file *s, void *v)
1190{ 1190{
1191 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1191 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1192 unsigned int *bucket = v; 1192 unsigned int *bucket = v;
1193 struct dsthash_ent *ent; 1193 struct dsthash_ent *ent;
1194 1194
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
1202 1202
1203static int dl_seq_show(struct seq_file *s, void *v) 1203static int dl_seq_show(struct seq_file *s, void *v)
1204{ 1204{
1205 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private)); 1205 struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
1206 unsigned int *bucket = v; 1206 unsigned int *bucket = v;
1207 struct dsthash_ent *ent; 1207 struct dsthash_ent *ent;
1208 1208
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 0472f3472842..ada144e5645b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
56 struct sk_buff *pskb = (struct sk_buff *)skb; 56 struct sk_buff *pskb = (struct sk_buff *)skb;
57 struct sock *sk = skb->sk; 57 struct sock *sk = skb->sk;
58 58
59 if (!net_eq(xt_net(par), sock_net(sk))) 59 if (sk && !net_eq(xt_net(par), sock_net(sk)))
60 sk = NULL; 60 sk = NULL;
61 61
62 if (!sk) 62 if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
117 struct sk_buff *pskb = (struct sk_buff *)skb; 117 struct sk_buff *pskb = (struct sk_buff *)skb;
118 struct sock *sk = skb->sk; 118 struct sock *sk = skb->sk;
119 119
120 if (!net_eq(xt_net(par), sock_net(sk))) 120 if (sk && !net_eq(xt_net(par), sock_net(sk)))
121 sk = NULL; 121 sk = NULL;
122 122
123 if (!sk) 123 if (!sk)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index c070dfc0190a..c92894c3e40a 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
781{ 781{
782 u32 addr_len; 782 u32 addr_len;
783 783
784 if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { 784 if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
785 info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
785 addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); 786 addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
786 if (addr_len != sizeof(struct in_addr) && 787 if (addr_len != sizeof(struct in_addr) &&
787 addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) 788 addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ac8030c4bcf8..19cb2e473ea6 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
209 } 209 }
210 create_info = (struct hci_create_pipe_resp *)skb->data; 210 create_info = (struct hci_create_pipe_resp *)skb->data;
211 211
212 if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
213 status = NFC_HCI_ANY_E_NOK;
214 goto exit;
215 }
216
212 /* Save the new created pipe and bind with local gate, 217 /* Save the new created pipe and bind with local gate,
213 * the description for skb->data[3] is destination gate id 218 * the description for skb->data[3] is destination gate id
214 * but since we received this cmd from host controller, we 219 * but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
232 } 237 }
233 delete_info = (struct hci_delete_pipe_noti *)skb->data; 238 delete_info = (struct hci_delete_pipe_noti *)skb->data;
234 239
240 if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
241 status = NFC_HCI_ANY_E_NOK;
242 goto exit;
243 }
244
235 hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; 245 hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
236 hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; 246 hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
237 break; 247 break;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 86a75105af1a..0aeb34c6389d 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1624,10 +1624,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1624 OVS_NLERR(log, "Failed to allocate conntrack template"); 1624 OVS_NLERR(log, "Failed to allocate conntrack template");
1625 return -ENOMEM; 1625 return -ENOMEM;
1626 } 1626 }
1627
1628 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1629 nf_conntrack_get(&ct_info.ct->ct_general);
1630
1631 if (helper) { 1627 if (helper) {
1632 err = ovs_ct_add_helper(&ct_info, helper, key, log); 1628 err = ovs_ct_add_helper(&ct_info, helper, key, log);
1633 if (err) 1629 if (err)
@@ -1639,6 +1635,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1639 if (err) 1635 if (err)
1640 goto err_free_ct; 1636 goto err_free_ct;
1641 1637
1638 __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
1639 nf_conntrack_get(&ct_info.ct->ct_general);
1642 return 0; 1640 return 0;
1643err_free_ct: 1641err_free_ct:
1644 __ovs_ct_free_action(&ct_info); 1642 __ovs_ct_free_action(&ct_info);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5610061e7f2e..75c92a87e7b2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4137,36 +4137,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
4137 .close = packet_mm_close, 4137 .close = packet_mm_close,
4138}; 4138};
4139 4139
4140static void free_pg_vec(struct pgv *pg_vec, unsigned int len) 4140static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4141 unsigned int len)
4141{ 4142{
4142 int i; 4143 int i;
4143 4144
4144 for (i = 0; i < len; i++) { 4145 for (i = 0; i < len; i++) {
4145 if (likely(pg_vec[i].buffer)) { 4146 if (likely(pg_vec[i].buffer)) {
4146 kvfree(pg_vec[i].buffer); 4147 if (is_vmalloc_addr(pg_vec[i].buffer))
4148 vfree(pg_vec[i].buffer);
4149 else
4150 free_pages((unsigned long)pg_vec[i].buffer,
4151 order);
4147 pg_vec[i].buffer = NULL; 4152 pg_vec[i].buffer = NULL;
4148 } 4153 }
4149 } 4154 }
4150 kfree(pg_vec); 4155 kfree(pg_vec);
4151} 4156}
4152 4157
4153static char *alloc_one_pg_vec_page(unsigned long size) 4158static char *alloc_one_pg_vec_page(unsigned long order)
4154{ 4159{
4155 char *buffer; 4160 char *buffer;
4161 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4162 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4156 4163
4157 buffer = kvzalloc(size, GFP_KERNEL); 4164 buffer = (char *) __get_free_pages(gfp_flags, order);
4158 if (buffer) 4165 if (buffer)
4159 return buffer; 4166 return buffer;
4160 4167
4161 buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 4168 /* __get_free_pages failed, fall back to vmalloc */
4169 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4170 if (buffer)
4171 return buffer;
4162 4172
4163 return buffer; 4173 /* vmalloc failed, lets dig into swap here */
4174 gfp_flags &= ~__GFP_NORETRY;
4175 buffer = (char *) __get_free_pages(gfp_flags, order);
4176 if (buffer)
4177 return buffer;
4178
4179 /* complete and utter failure */
4180 return NULL;
4164} 4181}
4165 4182
4166static struct pgv *alloc_pg_vec(struct tpacket_req *req) 4183static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4167{ 4184{
4168 unsigned int block_nr = req->tp_block_nr; 4185 unsigned int block_nr = req->tp_block_nr;
4169 unsigned long size = req->tp_block_size;
4170 struct pgv *pg_vec; 4186 struct pgv *pg_vec;
4171 int i; 4187 int i;
4172 4188
@@ -4175,7 +4191,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
4175 goto out; 4191 goto out;
4176 4192
4177 for (i = 0; i < block_nr; i++) { 4193 for (i = 0; i < block_nr; i++) {
4178 pg_vec[i].buffer = alloc_one_pg_vec_page(size); 4194 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4179 if (unlikely(!pg_vec[i].buffer)) 4195 if (unlikely(!pg_vec[i].buffer))
4180 goto out_free_pgvec; 4196 goto out_free_pgvec;
4181 } 4197 }
@@ -4184,7 +4200,7 @@ out:
4184 return pg_vec; 4200 return pg_vec;
4185 4201
4186out_free_pgvec: 4202out_free_pgvec:
4187 free_pg_vec(pg_vec, block_nr); 4203 free_pg_vec(pg_vec, order, block_nr);
4188 pg_vec = NULL; 4204 pg_vec = NULL;
4189 goto out; 4205 goto out;
4190} 4206}
@@ -4194,9 +4210,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4194{ 4210{
4195 struct pgv *pg_vec = NULL; 4211 struct pgv *pg_vec = NULL;
4196 struct packet_sock *po = pkt_sk(sk); 4212 struct packet_sock *po = pkt_sk(sk);
4213 int was_running, order = 0;
4197 struct packet_ring_buffer *rb; 4214 struct packet_ring_buffer *rb;
4198 struct sk_buff_head *rb_queue; 4215 struct sk_buff_head *rb_queue;
4199 int was_running;
4200 __be16 num; 4216 __be16 num;
4201 int err = -EINVAL; 4217 int err = -EINVAL;
4202 /* Added to avoid minimal code churn */ 4218 /* Added to avoid minimal code churn */
@@ -4258,7 +4274,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4258 goto out; 4274 goto out;
4259 4275
4260 err = -ENOMEM; 4276 err = -ENOMEM;
4261 pg_vec = alloc_pg_vec(req); 4277 order = get_order(req->tp_block_size);
4278 pg_vec = alloc_pg_vec(req, order);
4262 if (unlikely(!pg_vec)) 4279 if (unlikely(!pg_vec))
4263 goto out; 4280 goto out;
4264 switch (po->tp_version) { 4281 switch (po->tp_version) {
@@ -4312,6 +4329,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4312 rb->frame_size = req->tp_frame_size; 4329 rb->frame_size = req->tp_frame_size;
4313 spin_unlock_bh(&rb_queue->lock); 4330 spin_unlock_bh(&rb_queue->lock);
4314 4331
4332 swap(rb->pg_vec_order, order);
4315 swap(rb->pg_vec_len, req->tp_block_nr); 4333 swap(rb->pg_vec_len, req->tp_block_nr);
4316 4334
4317 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE; 4335 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4355,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4337 } 4355 }
4338 4356
4339 if (pg_vec) 4357 if (pg_vec)
4340 free_pg_vec(pg_vec, req->tp_block_nr); 4358 free_pg_vec(pg_vec, order, req->tp_block_nr);
4341out: 4359out:
4342 return err; 4360 return err;
4343} 4361}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 8f50036f62f0..3bb7c5fb3bff 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
64 unsigned int frame_size; 64 unsigned int frame_size;
65 unsigned int frame_max; 65 unsigned int frame_max;
66 66
67 unsigned int pg_vec_order;
67 unsigned int pg_vec_pages; 68 unsigned int pg_vec_pages;
68 unsigned int pg_vec_len; 69 unsigned int pg_vec_len;
69 70
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 01b3bd6a3708..b9092111bc45 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,6 +1,6 @@
1 1
2config RDS 2config RDS
3 tristate "The RDS Protocol" 3 tristate "The Reliable Datagram Sockets Protocol"
4 depends on INET 4 depends on INET
5 ---help--- 5 ---help---
6 The RDS (Reliable Datagram Sockets) protocol provides reliable, 6 The RDS (Reliable Datagram Sockets) protocol provides reliable,
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 3ab55784b637..762d2c6788a3 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
76 struct rds_sock *rs; 76 struct rds_sock *rs;
77 77
78 __rds_create_bind_key(key, addr, port, scope_id); 78 __rds_create_bind_key(key, addr, port, scope_id);
79 rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms); 79 rcu_read_lock();
80 rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
80 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) 81 if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
81 rds_sock_addref(rs); 82 rds_sock_addref(rs);
82 else 83 else
83 rs = NULL; 84 rs = NULL;
85 rcu_read_unlock();
84 86
85 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, 87 rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
86 ntohs(port)); 88 ntohs(port));
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
235 goto out; 237 goto out;
236 } 238 }
237 239
240 sock_set_flag(sk, SOCK_RCU_FREE);
238 ret = rds_add_bound(rs, binding_addr, &port, scope_id); 241 ret = rds_add_bound(rs, binding_addr, &port, scope_id);
239 if (ret) 242 if (ret)
240 goto out; 243 goto out;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index c1d97640c0be..eba75c1ba359 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
341 341
342 if (rds_conn_state(conn) == RDS_CONN_UP) { 342 if (rds_conn_state(conn) == RDS_CONN_UP) {
343 struct rds_ib_device *rds_ibdev; 343 struct rds_ib_device *rds_ibdev;
344 struct rdma_dev_addr *dev_addr;
345 344
346 ic = conn->c_transport_data; 345 ic = conn->c_transport_data;
347 dev_addr = &ic->i_cm_id->route.addr.dev_addr; 346 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
348 rdma_addr_get_sgid(dev_addr, 347 (union ib_gid *)&iinfo6->dst_gid);
349 (union ib_gid *)&iinfo6->src_gid);
350 rdma_addr_get_dgid(dev_addr,
351 (union ib_gid *)&iinfo6->dst_gid);
352
353 rds_ibdev = ic->rds_ibdev; 348 rds_ibdev = ic->rds_ibdev;
354 iinfo6->max_send_wr = ic->i_send_ring.w_nr; 349 iinfo6->max_send_wr = ic->i_send_ring.w_nr;
355 iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; 350 iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 73427ff439f9..71ff356ee702 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
443int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); 443int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
444 444
445/* ib_stats.c */ 445/* ib_stats.c */
446DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); 446DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
447#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) 447#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
448#define rds_ib_stats_add(member, count) \ 448#define rds_ib_stats_add(member, count) \
449 rds_stats_add_which(rds_ib_stats, member, count) 449 rds_stats_add_which(rds_ib_stats, member, count)
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2c7b7c352d3e..b9bbcf3d6c63 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -37,7 +37,6 @@
37#include <net/tcp.h> 37#include <net/tcp.h>
38#include <net/net_namespace.h> 38#include <net/net_namespace.h>
39#include <net/netns/generic.h> 39#include <net/netns/generic.h>
40#include <net/tcp.h>
41#include <net/addrconf.h> 40#include <net/addrconf.h>
42 41
43#include "rds.h" 42#include "rds.h"
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 00192a996be0..0f8465852254 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/mod_devicetable.h>
23#include <linux/rfkill.h> 24#include <linux/rfkill.h>
24#include <linux/platform_device.h> 25#include <linux/platform_device.h>
25#include <linux/clk.h> 26#include <linux/clk.h>
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c97558710421..ef9554131434 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
40struct rxrpc_connection; 40struct rxrpc_connection;
41 41
42/* 42/*
43 * Mark applied to socket buffers. 43 * Mark applied to socket buffers in skb->mark. skb->priority is used
44 * to pass supplementary information.
44 */ 45 */
45enum rxrpc_skb_mark { 46enum rxrpc_skb_mark {
46 RXRPC_SKB_MARK_DATA, /* data message */ 47 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
47 RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */ 48 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
48 RXRPC_SKB_MARK_BUSY, /* server busy message */
49 RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
50 RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
51 RXRPC_SKB_MARK_NET_ERROR, /* network error message */
52 RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
53 RXRPC_SKB_MARK_NEW_CALL, /* local error message */
54}; 49};
55 50
56/* 51/*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
293 struct hlist_node hash_link; 288 struct hlist_node hash_link;
294 struct rxrpc_local *local; 289 struct rxrpc_local *local;
295 struct hlist_head error_targets; /* targets for net error distribution */ 290 struct hlist_head error_targets; /* targets for net error distribution */
296 struct work_struct error_distributor;
297 struct rb_root service_conns; /* Service connections */ 291 struct rb_root service_conns; /* Service connections */
298 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */ 292 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */ 293 time64_t last_tx_at; /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
304 unsigned int maxdata; /* data size (MTU - hdrsize) */ 298 unsigned int maxdata; /* data size (MTU - hdrsize) */
305 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 299 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
306 int debug_id; /* debug ID for printks */ 300 int debug_id; /* debug ID for printks */
307 int error_report; /* Net (+0) or local (+1000000) to distribute */
308#define RXRPC_LOCAL_ERROR_OFFSET 1000000
309 struct sockaddr_rxrpc srx; /* remote address */ 301 struct sockaddr_rxrpc srx; /* remote address */
310 302
311 /* calculated RTT cache */ 303 /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
463 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 455 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
464}; 456};
465 457
458static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
459{
460 return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
461}
462
463static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
464{
465 return !rxrpc_to_server(sp);
466}
467
466/* 468/*
467 * Flags in call->flags. 469 * Flags in call->flags.
468 */ 470 */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
717int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); 719int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
718void rxrpc_discard_prealloc(struct rxrpc_sock *); 720void rxrpc_discard_prealloc(struct rxrpc_sock *);
719struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *, 721struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
722 struct rxrpc_sock *,
723 struct rxrpc_peer *,
720 struct rxrpc_connection *, 724 struct rxrpc_connection *,
721 struct sk_buff *); 725 struct sk_buff *);
722void rxrpc_accept_incoming_calls(struct rxrpc_local *); 726void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
908 912
909struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); 913struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
910struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, 914struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
911 struct sk_buff *); 915 struct sk_buff *,
916 struct rxrpc_peer **);
912void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); 917void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
913void rxrpc_disconnect_call(struct rxrpc_call *); 918void rxrpc_disconnect_call(struct rxrpc_call *);
914void rxrpc_kill_connection(struct rxrpc_connection *); 919void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
1031 * peer_event.c 1036 * peer_event.c
1032 */ 1037 */
1033void rxrpc_error_report(struct sock *); 1038void rxrpc_error_report(struct sock *);
1034void rxrpc_peer_error_distributor(struct work_struct *);
1035void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1039void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
1036 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1040 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
1037void rxrpc_peer_keepalive_worker(struct work_struct *); 1041void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
1044struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *, 1048struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
1045 struct sockaddr_rxrpc *, gfp_t); 1049 struct sockaddr_rxrpc *, gfp_t);
1046struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t); 1050struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
1047struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *, 1051void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
1048 struct rxrpc_peer *);
1049void rxrpc_destroy_all_peers(struct rxrpc_net *); 1052void rxrpc_destroy_all_peers(struct rxrpc_net *);
1050struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *); 1053struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
1051struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *); 1054struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
1052void rxrpc_put_peer(struct rxrpc_peer *); 1055void rxrpc_put_peer(struct rxrpc_peer *);
1053void __rxrpc_queue_peer_error(struct rxrpc_peer *);
1054 1056
1055/* 1057/*
1056 * proc.c 1058 * proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9d1e298b784c..9c7f26d06a52 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
249 */ 249 */
250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, 250static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
251 struct rxrpc_local *local, 251 struct rxrpc_local *local,
252 struct rxrpc_peer *peer,
252 struct rxrpc_connection *conn, 253 struct rxrpc_connection *conn,
253 struct sk_buff *skb) 254 struct sk_buff *skb)
254{ 255{
255 struct rxrpc_backlog *b = rx->backlog; 256 struct rxrpc_backlog *b = rx->backlog;
256 struct rxrpc_peer *peer, *xpeer;
257 struct rxrpc_call *call; 257 struct rxrpc_call *call;
258 unsigned short call_head, conn_head, peer_head; 258 unsigned short call_head, conn_head, peer_head;
259 unsigned short call_tail, conn_tail, peer_tail; 259 unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
276 return NULL; 276 return NULL;
277 277
278 if (!conn) { 278 if (!conn) {
279 /* No connection. We're going to need a peer to start off 279 if (peer && !rxrpc_get_peer_maybe(peer))
280 * with. If one doesn't yet exist, use a spare from the 280 peer = NULL;
281 * preallocation set. We dump the address into the spare in 281 if (!peer) {
282 * anticipation - and to save on stack space. 282 peer = b->peer_backlog[peer_tail];
283 */ 283 if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
284 xpeer = b->peer_backlog[peer_tail]; 284 return NULL;
285 if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
286 return NULL;
287
288 peer = rxrpc_lookup_incoming_peer(local, xpeer);
289 if (peer == xpeer) {
290 b->peer_backlog[peer_tail] = NULL; 285 b->peer_backlog[peer_tail] = NULL;
291 smp_store_release(&b->peer_backlog_tail, 286 smp_store_release(&b->peer_backlog_tail,
292 (peer_tail + 1) & 287 (peer_tail + 1) &
293 (RXRPC_BACKLOG_MAX - 1)); 288 (RXRPC_BACKLOG_MAX - 1));
289
290 rxrpc_new_incoming_peer(local, peer);
294 } 291 }
295 292
296 /* Now allocate and set up the connection */ 293 /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
335 * The call is returned with the user access mutex held. 332 * The call is returned with the user access mutex held.
336 */ 333 */
337struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, 334struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
335 struct rxrpc_sock *rx,
336 struct rxrpc_peer *peer,
338 struct rxrpc_connection *conn, 337 struct rxrpc_connection *conn,
339 struct sk_buff *skb) 338 struct sk_buff *skb)
340{ 339{
341 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 340 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
342 struct rxrpc_sock *rx;
343 struct rxrpc_call *call; 341 struct rxrpc_call *call;
344 u16 service_id = sp->hdr.serviceId;
345 342
346 _enter(""); 343 _enter("");
347 344
348 /* Get the socket providing the service */
349 rx = rcu_dereference(local->service);
350 if (rx && (service_id == rx->srx.srx_service ||
351 service_id == rx->second_service))
352 goto found_service;
353
354 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
355 RX_INVALID_OPERATION, EOPNOTSUPP);
356 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
357 skb->priority = RX_INVALID_OPERATION;
358 _leave(" = NULL [service]");
359 return NULL;
360
361found_service:
362 spin_lock(&rx->incoming_lock); 345 spin_lock(&rx->incoming_lock);
363 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || 346 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
364 rx->sk.sk_state == RXRPC_CLOSE) { 347 rx->sk.sk_state == RXRPC_CLOSE) {
365 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, 348 trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 349 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
367 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 350 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
368 skb->priority = RX_INVALID_OPERATION; 351 skb->priority = RX_INVALID_OPERATION;
369 _leave(" = NULL [close]"); 352 _leave(" = NULL [close]");
370 call = NULL; 353 call = NULL;
371 goto out; 354 goto out;
372 } 355 }
373 356
374 call = rxrpc_alloc_incoming_call(rx, local, conn, skb); 357 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
375 if (!call) { 358 if (!call) {
376 skb->mark = RXRPC_SKB_MARK_BUSY; 359 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
377 _leave(" = NULL [busy]"); 360 _leave(" = NULL [busy]");
378 call = NULL; 361 call = NULL;
379 goto out; 362 goto out;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9486293fef5c..799f75b6900d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
400 rcu_assign_pointer(conn->channels[chan].call, call); 400 rcu_assign_pointer(conn->channels[chan].call, call);
401 401
402 spin_lock(&conn->params.peer->lock); 402 spin_lock(&conn->params.peer->lock);
403 hlist_add_head(&call->error_link, &conn->params.peer->error_targets); 403 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
404 spin_unlock(&conn->params.peer->lock); 404 spin_unlock(&conn->params.peer->lock);
405 405
406 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id); 406 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f8f37188a932..8acf74fe24c0 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
710 } 710 }
711 711
712 spin_lock_bh(&call->conn->params.peer->lock); 712 spin_lock_bh(&call->conn->params.peer->lock);
713 hlist_add_head(&call->error_link, 713 hlist_add_head_rcu(&call->error_link,
714 &call->conn->params.peer->error_targets); 714 &call->conn->params.peer->error_targets);
715 spin_unlock_bh(&call->conn->params.peer->lock); 715 spin_unlock_bh(&call->conn->params.peer->lock);
716 716
717out: 717out:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 77440a356b14..885dae829f4a 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
69 * If successful, a pointer to the connection is returned, but no ref is taken. 69 * If successful, a pointer to the connection is returned, but no ref is taken.
70 * NULL is returned if there is no match. 70 * NULL is returned if there is no match.
71 * 71 *
72 * When searching for a service call, if we find a peer but no connection, we
73 * return that through *_peer in case we need to create a new service call.
74 *
72 * The caller must be holding the RCU read lock. 75 * The caller must be holding the RCU read lock.
73 */ 76 */
74struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local, 77struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
75 struct sk_buff *skb) 78 struct sk_buff *skb,
79 struct rxrpc_peer **_peer)
76{ 80{
77 struct rxrpc_connection *conn; 81 struct rxrpc_connection *conn;
78 struct rxrpc_conn_proto k; 82 struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
85 if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0) 89 if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
86 goto not_found; 90 goto not_found;
87 91
88 k.epoch = sp->hdr.epoch;
89 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
90
91 /* We may have to handle mixing IPv4 and IPv6 */ 92 /* We may have to handle mixing IPv4 and IPv6 */
92 if (srx.transport.family != local->srx.transport.family) { 93 if (srx.transport.family != local->srx.transport.family) {
93 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 94 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
99 k.epoch = sp->hdr.epoch; 100 k.epoch = sp->hdr.epoch;
100 k.cid = sp->hdr.cid & RXRPC_CIDMASK; 101 k.cid = sp->hdr.cid & RXRPC_CIDMASK;
101 102
102 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 103 if (rxrpc_to_server(sp)) {
103 /* We need to look up service connections by the full protocol 104 /* We need to look up service connections by the full protocol
104 * parameter set. We look up the peer first as an intermediate 105 * parameter set. We look up the peer first as an intermediate
105 * step and then the connection from the peer's tree. 106 * step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
107 peer = rxrpc_lookup_peer_rcu(local, &srx); 108 peer = rxrpc_lookup_peer_rcu(local, &srx);
108 if (!peer) 109 if (!peer)
109 goto not_found; 110 goto not_found;
111 *_peer = peer;
110 conn = rxrpc_find_service_conn_rcu(peer, skb); 112 conn = rxrpc_find_service_conn_rcu(peer, skb);
111 if (!conn || atomic_read(&conn->usage) == 0) 113 if (!conn || atomic_read(&conn->usage) == 0)
112 goto not_found; 114 goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
214 call->peer->cong_cwnd = call->cong_cwnd; 216 call->peer->cong_cwnd = call->cong_cwnd;
215 217
216 spin_lock_bh(&conn->params.peer->lock); 218 spin_lock_bh(&conn->params.peer->lock);
217 hlist_del_init(&call->error_link); 219 hlist_del_rcu(&call->error_link);
218 spin_unlock_bh(&conn->params.peer->lock); 220 spin_unlock_bh(&conn->params.peer->lock);
219 221
220 if (rxrpc_is_client_call(call)) 222 if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index cfdc199c6351..800f5b8a1baa 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
622 if (!skb) 622 if (!skb)
623 continue; 623 continue;
624 624
625 sent_at = skb->tstamp;
626 smp_rmb(); /* Read timestamp before serial. */
625 sp = rxrpc_skb(skb); 627 sp = rxrpc_skb(skb);
626 if (sp->hdr.serial != orig_serial) 628 if (sp->hdr.serial != orig_serial)
627 continue; 629 continue;
628 smp_rmb();
629 sent_at = skb->tstamp;
630 goto found; 630 goto found;
631 } 631 }
632
632 return; 633 return;
633 634
634found: 635found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
1124{ 1125{
1125 struct rxrpc_connection *conn; 1126 struct rxrpc_connection *conn;
1126 struct rxrpc_channel *chan; 1127 struct rxrpc_channel *chan;
1127 struct rxrpc_call *call; 1128 struct rxrpc_call *call = NULL;
1128 struct rxrpc_skb_priv *sp; 1129 struct rxrpc_skb_priv *sp;
1129 struct rxrpc_local *local = udp_sk->sk_user_data; 1130 struct rxrpc_local *local = udp_sk->sk_user_data;
1131 struct rxrpc_peer *peer = NULL;
1132 struct rxrpc_sock *rx = NULL;
1130 struct sk_buff *skb; 1133 struct sk_buff *skb;
1131 unsigned int channel; 1134 unsigned int channel;
1132 int ret, skew; 1135 int ret, skew = 0;
1133 1136
1134 _enter("%p", udp_sk); 1137 _enter("%p", udp_sk);
1135 1138
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
1143 return; 1146 return;
1144 } 1147 }
1145 1148
1149 if (skb->tstamp == 0)
1150 skb->tstamp = ktime_get_real();
1151
1146 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1152 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1147 1153
1148 _net("recv skb %p", skb); 1154 _net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
1177 1183
1178 trace_rxrpc_rx_packet(sp); 1184 trace_rxrpc_rx_packet(sp);
1179 1185
1180 _net("Rx RxRPC %s ep=%x call=%x:%x",
1181 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
1182 sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
1183
1184 if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
1185 !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
1186 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1187 goto bad_message;
1188 }
1189
1190 switch (sp->hdr.type) { 1186 switch (sp->hdr.type) {
1191 case RXRPC_PACKET_TYPE_VERSION: 1187 case RXRPC_PACKET_TYPE_VERSION:
1192 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) 1188 if (rxrpc_to_client(sp))
1193 goto discard; 1189 goto discard;
1194 rxrpc_post_packet_to_local(local, skb); 1190 rxrpc_post_packet_to_local(local, skb);
1195 goto out; 1191 goto out;
1196 1192
1197 case RXRPC_PACKET_TYPE_BUSY: 1193 case RXRPC_PACKET_TYPE_BUSY:
1198 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) 1194 if (rxrpc_to_server(sp))
1199 goto discard; 1195 goto discard;
1200 /* Fall through */ 1196 /* Fall through */
1197 case RXRPC_PACKET_TYPE_ACK:
1198 case RXRPC_PACKET_TYPE_ACKALL:
1199 if (sp->hdr.callNumber == 0)
1200 goto bad_message;
1201 /* Fall through */
1202 case RXRPC_PACKET_TYPE_ABORT:
1203 break;
1201 1204
1202 case RXRPC_PACKET_TYPE_DATA: 1205 case RXRPC_PACKET_TYPE_DATA:
1203 if (sp->hdr.callNumber == 0) 1206 if (sp->hdr.callNumber == 0 ||
1207 sp->hdr.seq == 0)
1204 goto bad_message; 1208 goto bad_message;
1205 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1209 if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
1206 !rxrpc_validate_jumbo(skb)) 1210 !rxrpc_validate_jumbo(skb))
1207 goto bad_message; 1211 goto bad_message;
1208 break; 1212 break;
1209 1213
1214 case RXRPC_PACKET_TYPE_CHALLENGE:
1215 if (rxrpc_to_server(sp))
1216 goto discard;
1217 break;
1218 case RXRPC_PACKET_TYPE_RESPONSE:
1219 if (rxrpc_to_client(sp))
1220 goto discard;
1221 break;
1222
1210 /* Packet types 9-11 should just be ignored. */ 1223 /* Packet types 9-11 should just be ignored. */
1211 case RXRPC_PACKET_TYPE_PARAMS: 1224 case RXRPC_PACKET_TYPE_PARAMS:
1212 case RXRPC_PACKET_TYPE_10: 1225 case RXRPC_PACKET_TYPE_10:
1213 case RXRPC_PACKET_TYPE_11: 1226 case RXRPC_PACKET_TYPE_11:
1214 goto discard; 1227 goto discard;
1228
1229 default:
1230 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1231 goto bad_message;
1215 } 1232 }
1216 1233
1234 if (sp->hdr.serviceId == 0)
1235 goto bad_message;
1236
1217 rcu_read_lock(); 1237 rcu_read_lock();
1218 1238
1219 conn = rxrpc_find_connection_rcu(local, skb); 1239 if (rxrpc_to_server(sp)) {
1240 /* Weed out packets to services we're not offering. Packets
1241 * that would begin a call are explicitly rejected and the rest
1242 * are just discarded.
1243 */
1244 rx = rcu_dereference(local->service);
1245 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
1246 sp->hdr.serviceId != rx->second_service)) {
1247 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
1248 sp->hdr.seq == 1)
1249 goto unsupported_service;
1250 goto discard_unlock;
1251 }
1252 }
1253
1254 conn = rxrpc_find_connection_rcu(local, skb, &peer);
1220 if (conn) { 1255 if (conn) {
1221 if (sp->hdr.securityIndex != conn->security_ix) 1256 if (sp->hdr.securityIndex != conn->security_ix)
1222 goto wrong_security; 1257 goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
1280 call = rcu_dereference(chan->call); 1315 call = rcu_dereference(chan->call);
1281 1316
1282 if (sp->hdr.callNumber > chan->call_id) { 1317 if (sp->hdr.callNumber > chan->call_id) {
1283 if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) { 1318 if (rxrpc_to_client(sp)) {
1284 rcu_read_unlock(); 1319 rcu_read_unlock();
1285 goto reject_packet; 1320 goto reject_packet;
1286 } 1321 }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
1297 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags)) 1332 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1298 set_bit(RXRPC_CALL_RX_HEARD, &call->flags); 1333 set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1299 } 1334 }
1300 } else {
1301 skew = 0;
1302 call = NULL;
1303 } 1335 }
1304 1336
1305 if (!call || atomic_read(&call->usage) == 0) { 1337 if (!call || atomic_read(&call->usage) == 0) {
1306 if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) || 1338 if (rxrpc_to_client(sp) ||
1307 sp->hdr.callNumber == 0 ||
1308 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1339 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1309 goto bad_message_unlock; 1340 goto bad_message_unlock;
1310 if (sp->hdr.seq != 1) 1341 if (sp->hdr.seq != 1)
1311 goto discard_unlock; 1342 goto discard_unlock;
1312 call = rxrpc_new_incoming_call(local, conn, skb); 1343 call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
1313 if (!call) { 1344 if (!call) {
1314 rcu_read_unlock(); 1345 rcu_read_unlock();
1315 goto reject_packet; 1346 goto reject_packet;
@@ -1340,6 +1371,13 @@ wrong_security:
1340 skb->priority = RXKADINCONSISTENCY; 1371 skb->priority = RXKADINCONSISTENCY;
1341 goto post_abort; 1372 goto post_abort;
1342 1373
1374unsupported_service:
1375 rcu_read_unlock();
1376 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1377 RX_INVALID_OPERATION, EOPNOTSUPP);
1378 skb->priority = RX_INVALID_OPERATION;
1379 goto post_abort;
1380
1343reupgrade: 1381reupgrade:
1344 rcu_read_unlock(); 1382 rcu_read_unlock();
1345 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1383 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ bad_message:
1354protocol_error: 1392protocol_error:
1355 skb->priority = RX_PROTOCOL_ERROR; 1393 skb->priority = RX_PROTOCOL_ERROR;
1356post_abort: 1394post_abort:
1357 skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; 1395 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
1358reject_packet: 1396reject_packet:
1359 trace_rxrpc_rx_done(skb->mark, skb->priority); 1397 trace_rxrpc_rx_done(skb->mark, skb->priority);
1360 rxrpc_reject_packet(local, skb); 1398 rxrpc_reject_packet(local, skb);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 777c3ed4cfc0..94d234e9c685 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
135 } 135 }
136 136
137 switch (local->srx.transport.family) { 137 switch (local->srx.transport.family) {
138 case AF_INET: 138 case AF_INET6:
139 /* we want to receive ICMP errors */ 139 /* we want to receive ICMPv6 errors */
140 opt = 1; 140 opt = 1;
141 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, 141 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
142 (char *) &opt, sizeof(opt)); 142 (char *) &opt, sizeof(opt));
143 if (ret < 0) { 143 if (ret < 0) {
144 _debug("setsockopt failed"); 144 _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
146 } 146 }
147 147
148 /* we want to set the don't fragment bit */ 148 /* we want to set the don't fragment bit */
149 opt = IP_PMTUDISC_DO; 149 opt = IPV6_PMTUDISC_DO;
150 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, 150 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
151 (char *) &opt, sizeof(opt)); 151 (char *) &opt, sizeof(opt));
152 if (ret < 0) { 152 if (ret < 0) {
153 _debug("setsockopt failed"); 153 _debug("setsockopt failed");
154 goto error; 154 goto error;
155 } 155 }
156 break;
157 156
158 case AF_INET6: 157 /* Fall through and set IPv4 options too otherwise we don't get
158 * errors from IPv4 packets sent through the IPv6 socket.
159 */
160
161 case AF_INET:
159 /* we want to receive ICMP errors */ 162 /* we want to receive ICMP errors */
160 opt = 1; 163 opt = 1;
161 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, 164 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
162 (char *) &opt, sizeof(opt)); 165 (char *) &opt, sizeof(opt));
163 if (ret < 0) { 166 if (ret < 0) {
164 _debug("setsockopt failed"); 167 _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
166 } 169 }
167 170
168 /* we want to set the don't fragment bit */ 171 /* we want to set the don't fragment bit */
169 opt = IPV6_PMTUDISC_DO; 172 opt = IP_PMTUDISC_DO;
170 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, 173 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
171 (char *) &opt, sizeof(opt)); 174 (char *) &opt, sizeof(opt));
172 if (ret < 0) { 175 if (ret < 0) {
173 _debug("setsockopt failed"); 176 _debug("setsockopt failed");
174 goto error; 177 goto error;
175 } 178 }
179
180 /* We want receive timestamps. */
181 opt = 1;
182 ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
183 (char *)&opt, sizeof(opt));
184 if (ret < 0) {
185 _debug("setsockopt failed");
186 goto error;
187 }
176 break; 188 break;
177 189
178 default: 190 default:
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index ccf5de160444..e8fb8922bca8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
124 struct kvec iov[2]; 124 struct kvec iov[2];
125 rxrpc_serial_t serial; 125 rxrpc_serial_t serial;
126 rxrpc_seq_t hard_ack, top; 126 rxrpc_seq_t hard_ack, top;
127 ktime_t now;
128 size_t len, n; 127 size_t len, n;
129 int ret; 128 int ret;
130 u8 reason; 129 u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
196 /* We need to stick a time in before we send the packet in case 195 /* We need to stick a time in before we send the packet in case
197 * the reply gets back before kernel_sendmsg() completes - but 196 * the reply gets back before kernel_sendmsg() completes - but
198 * asking UDP to send the packet can take a relatively long 197 * asking UDP to send the packet can take a relatively long
199 * time, so we update the time after, on the assumption that 198 * time.
200 * the packet transmission is more likely to happen towards the
201 * end of the kernel_sendmsg() call.
202 */ 199 */
203 call->ping_time = ktime_get_real(); 200 call->ping_time = ktime_get_real();
204 set_bit(RXRPC_CALL_PINGING, &call->flags); 201 set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
206 } 203 }
207 204
208 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 205 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
209 now = ktime_get_real();
210 if (ping)
211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_seconds(); 206 conn->params.peer->last_tx_at = ktime_get_seconds();
213 if (ret < 0) 207 if (ret < 0)
214 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 208 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
363 357
364 /* If our RTT cache needs working on, request an ACK. Also request 358 /* If our RTT cache needs working on, request an ACK. Also request
365 * ACKs if a DATA packet appears to have been lost. 359 * ACKs if a DATA packet appears to have been lost.
360 *
361 * However, we mustn't request an ACK on the last reply packet of a
362 * service call, lest OpenAFS incorrectly send us an ACK with some
363 * soft-ACKs in it and then never follow up with a proper hard ACK.
366 */ 364 */
367 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && 365 if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
366 rxrpc_to_server(sp)
367 ) &&
368 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 368 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
369 retrans || 369 retrans ||
370 call->cong_mode == RXRPC_CALL_SLOW_START || 370 call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
390 goto send_fragmentable; 390 goto send_fragmentable;
391 391
392 down_read(&conn->params.local->defrag_sem); 392 down_read(&conn->params.local->defrag_sem);
393
394 sp->hdr.serial = serial;
395 smp_wmb(); /* Set serial before timestamp */
396 skb->tstamp = ktime_get_real();
397
393 /* send the packet by UDP 398 /* send the packet by UDP
394 * - returns -EMSGSIZE if UDP would have to fragment the packet 399 * - returns -EMSGSIZE if UDP would have to fragment the packet
395 * to go out of the interface 400 * to go out of the interface
@@ -413,12 +418,8 @@ done:
413 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, 418 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
414 retrans, lost); 419 retrans, lost);
415 if (ret >= 0) { 420 if (ret >= 0) {
416 ktime_t now = ktime_get_real();
417 skb->tstamp = now;
418 smp_wmb();
419 sp->hdr.serial = serial;
420 if (whdr.flags & RXRPC_REQUEST_ACK) { 421 if (whdr.flags & RXRPC_REQUEST_ACK) {
421 call->peer->rtt_last_req = now; 422 call->peer->rtt_last_req = skb->tstamp;
422 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 423 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
423 if (call->peer->rtt_usage > 1) { 424 if (call->peer->rtt_usage > 1) {
424 unsigned long nowj = jiffies, ack_lost_at; 425 unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
457 458
458 down_write(&conn->params.local->defrag_sem); 459 down_write(&conn->params.local->defrag_sem);
459 460
461 sp->hdr.serial = serial;
462 smp_wmb(); /* Set serial before timestamp */
463 skb->tstamp = ktime_get_real();
464
460 switch (conn->params.local->srx.transport.family) { 465 switch (conn->params.local->srx.transport.family) {
461 case AF_INET: 466 case AF_INET:
462 opt = IP_PMTUDISC_DONT; 467 opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
519 struct kvec iov[2]; 524 struct kvec iov[2];
520 size_t size; 525 size_t size;
521 __be32 code; 526 __be32 code;
522 int ret; 527 int ret, ioc;
523 528
524 _enter("%d", local->debug_id); 529 _enter("%d", local->debug_id);
525 530
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
527 iov[0].iov_len = sizeof(whdr); 532 iov[0].iov_len = sizeof(whdr);
528 iov[1].iov_base = &code; 533 iov[1].iov_base = &code;
529 iov[1].iov_len = sizeof(code); 534 iov[1].iov_len = sizeof(code);
530 size = sizeof(whdr) + sizeof(code);
531 535
532 msg.msg_name = &srx.transport; 536 msg.msg_name = &srx.transport;
533 msg.msg_control = NULL; 537 msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
535 msg.msg_flags = 0; 539 msg.msg_flags = 0;
536 540
537 memset(&whdr, 0, sizeof(whdr)); 541 memset(&whdr, 0, sizeof(whdr));
538 whdr.type = RXRPC_PACKET_TYPE_ABORT;
539 542
540 while ((skb = skb_dequeue(&local->reject_queue))) { 543 while ((skb = skb_dequeue(&local->reject_queue))) {
541 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 544 rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
542 sp = rxrpc_skb(skb); 545 sp = rxrpc_skb(skb);
543 546
547 switch (skb->mark) {
548 case RXRPC_SKB_MARK_REJECT_BUSY:
549 whdr.type = RXRPC_PACKET_TYPE_BUSY;
550 size = sizeof(whdr);
551 ioc = 1;
552 break;
553 case RXRPC_SKB_MARK_REJECT_ABORT:
554 whdr.type = RXRPC_PACKET_TYPE_ABORT;
555 code = htonl(skb->priority);
556 size = sizeof(whdr) + sizeof(code);
557 ioc = 2;
558 break;
559 default:
560 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
561 continue;
562 }
563
544 if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) { 564 if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
545 msg.msg_namelen = srx.transport_len; 565 msg.msg_namelen = srx.transport_len;
546 566
547 code = htonl(skb->priority);
548
549 whdr.epoch = htonl(sp->hdr.epoch); 567 whdr.epoch = htonl(sp->hdr.epoch);
550 whdr.cid = htonl(sp->hdr.cid); 568 whdr.cid = htonl(sp->hdr.cid);
551 whdr.callNumber = htonl(sp->hdr.callNumber); 569 whdr.callNumber = htonl(sp->hdr.callNumber);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 4f9da2f51c69..f3e6fc670da2 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -23,6 +23,8 @@
23#include "ar-internal.h" 23#include "ar-internal.h"
24 24
25static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); 25static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 enum rxrpc_call_completion);
26 28
27/* 29/*
28 * Find the peer associated with an ICMP packet. 30 * Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
194 rcu_read_unlock(); 196 rcu_read_unlock();
195 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
196 198
197 /* The ref we obtained is passed off to the work item */
198 __rxrpc_queue_peer_error(peer);
199 _leave(""); 199 _leave("");
200} 200}
201 201
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
205static void rxrpc_store_error(struct rxrpc_peer *peer, 205static void rxrpc_store_error(struct rxrpc_peer *peer,
206 struct sock_exterr_skb *serr) 206 struct sock_exterr_skb *serr)
207{ 207{
208 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
208 struct sock_extended_err *ee; 209 struct sock_extended_err *ee;
209 int err; 210 int err;
210 211
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
255 case SO_EE_ORIGIN_NONE: 256 case SO_EE_ORIGIN_NONE:
256 case SO_EE_ORIGIN_LOCAL: 257 case SO_EE_ORIGIN_LOCAL:
257 _proto("Rx Received local error { error=%d }", err); 258 _proto("Rx Received local error { error=%d }", err);
258 err += RXRPC_LOCAL_ERROR_OFFSET; 259 compl = RXRPC_CALL_LOCAL_ERROR;
259 break; 260 break;
260 261
261 case SO_EE_ORIGIN_ICMP6: 262 case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
264 break; 265 break;
265 } 266 }
266 267
267 peer->error_report = err; 268 rxrpc_distribute_error(peer, err, compl);
268} 269}
269 270
270/* 271/*
271 * Distribute an error that occurred on a peer 272 * Distribute an error that occurred on a peer.
272 */ 273 */
273void rxrpc_peer_error_distributor(struct work_struct *work) 274static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
275 enum rxrpc_call_completion compl)
274{ 276{
275 struct rxrpc_peer *peer =
276 container_of(work, struct rxrpc_peer, error_distributor);
277 struct rxrpc_call *call; 277 struct rxrpc_call *call;
278 enum rxrpc_call_completion compl;
279 int error;
280
281 _enter("");
282
283 error = READ_ONCE(peer->error_report);
284 if (error < RXRPC_LOCAL_ERROR_OFFSET) {
285 compl = RXRPC_CALL_NETWORK_ERROR;
286 } else {
287 compl = RXRPC_CALL_LOCAL_ERROR;
288 error -= RXRPC_LOCAL_ERROR_OFFSET;
289 }
290 278
291 _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error); 279 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
292
293 spin_lock_bh(&peer->lock);
294
295 while (!hlist_empty(&peer->error_targets)) {
296 call = hlist_entry(peer->error_targets.first,
297 struct rxrpc_call, error_link);
298 hlist_del_init(&call->error_link);
299 rxrpc_see_call(call); 280 rxrpc_see_call(call);
300 281 if (call->state < RXRPC_CALL_COMPLETE &&
301 if (rxrpc_set_call_completion(call, compl, 0, -error)) 282 rxrpc_set_call_completion(call, compl, 0, -error))
302 rxrpc_notify_socket(call); 283 rxrpc_notify_socket(call);
303 } 284 }
304
305 spin_unlock_bh(&peer->lock);
306
307 rxrpc_put_peer(peer);
308 _leave("");
309} 285}
310 286
311/* 287/*
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1dc7648e3eff..01a9febfa367 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
124 struct rxrpc_net *rxnet = local->rxnet; 124 struct rxrpc_net *rxnet = local->rxnet;
125 125
126 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { 126 hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
127 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) { 127 if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
128 if (atomic_read(&peer->usage) == 0) 128 atomic_read(&peer->usage) > 0)
129 return NULL;
130 return peer; 129 return peer;
131 }
132 } 130 }
133 131
134 return NULL; 132 return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
222 atomic_set(&peer->usage, 1); 220 atomic_set(&peer->usage, 1);
223 peer->local = local; 221 peer->local = local;
224 INIT_HLIST_HEAD(&peer->error_targets); 222 INIT_HLIST_HEAD(&peer->error_targets);
225 INIT_WORK(&peer->error_distributor,
226 &rxrpc_peer_error_distributor);
227 peer->service_conns = RB_ROOT; 223 peer->service_conns = RB_ROOT;
228 seqlock_init(&peer->service_conn_lock); 224 seqlock_init(&peer->service_conn_lock);
229 spin_lock_init(&peer->lock); 225 spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
299} 295}
300 296
301/* 297/*
302 * Set up a new incoming peer. The address is prestored in the preallocated 298 * Set up a new incoming peer. There shouldn't be any other matching peers
303 * peer. 299 * since we've already done a search in the list from the non-reentrant context
300 * (the data_ready handler) that is the only place we can add new peers.
304 */ 301 */
305struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local, 302void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
306 struct rxrpc_peer *prealloc)
307{ 303{
308 struct rxrpc_peer *peer;
309 struct rxrpc_net *rxnet = local->rxnet; 304 struct rxrpc_net *rxnet = local->rxnet;
310 unsigned long hash_key; 305 unsigned long hash_key;
311 306
312 hash_key = rxrpc_peer_hash_key(local, &prealloc->srx); 307 hash_key = rxrpc_peer_hash_key(local, &peer->srx);
313 prealloc->local = local; 308 peer->local = local;
314 rxrpc_init_peer(prealloc, hash_key); 309 rxrpc_init_peer(peer, hash_key);
315 310
316 spin_lock(&rxnet->peer_hash_lock); 311 spin_lock(&rxnet->peer_hash_lock);
317 312 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
318 /* Need to check that we aren't racing with someone else */ 313 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
319 peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
320 if (peer && !rxrpc_get_peer_maybe(peer))
321 peer = NULL;
322 if (!peer) {
323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 }
327
328 spin_unlock(&rxnet->peer_hash_lock); 314 spin_unlock(&rxnet->peer_hash_lock);
329 return peer;
330} 315}
331 316
332/* 317/*
@@ -416,21 +401,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
416} 401}
417 402
418/* 403/*
419 * Queue a peer record. This passes the caller's ref to the workqueue.
420 */
421void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
422{
423 const void *here = __builtin_return_address(0);
424 int n;
425
426 n = atomic_read(&peer->usage);
427 if (rxrpc_queue_work(&peer->error_distributor))
428 trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
429 else
430 rxrpc_put_peer(peer);
431}
432
433/*
434 * Discard a peer record. 404 * Discard a peer record.
435 */ 405 */
436static void __rxrpc_put_peer(struct rxrpc_peer *peer) 406static void __rxrpc_put_peer(struct rxrpc_peer *peer)
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 93da73bf7098..f9cb83c938f3 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
50#define RXRPC_PACKET_TYPE_10 10 /* Ignored */ 50#define RXRPC_PACKET_TYPE_10 10 /* Ignored */
51#define RXRPC_PACKET_TYPE_11 11 /* Ignored */ 51#define RXRPC_PACKET_TYPE_11 11 /* Ignored */
52#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ 52#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
53#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
54 53
55 uint8_t flags; /* packet flags */ 54 uint8_t flags; /* packet flags */
56#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */ 55#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
72 71
73} __packed; 72} __packed;
74 73
75#define RXRPC_SUPPORTED_PACKET_TYPES ( \
76 (1 << RXRPC_PACKET_TYPE_DATA) | \
77 (1 << RXRPC_PACKET_TYPE_ACK) | \
78 (1 << RXRPC_PACKET_TYPE_BUSY) | \
79 (1 << RXRPC_PACKET_TYPE_ABORT) | \
80 (1 << RXRPC_PACKET_TYPE_ACKALL) | \
81 (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
82 (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
83 /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
84 (1 << RXRPC_PACKET_TYPE_PARAMS) | \
85 (1 << RXRPC_PACKET_TYPE_10) | \
86 (1 << RXRPC_PACKET_TYPE_11) | \
87 (1 << RXRPC_PACKET_TYPE_VERSION))
88
89/*****************************************************************************/ 74/*****************************************************************************/
90/* 75/*
91 * jumbo packet secondary header 76 * jumbo packet secondary header
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 229d63c99be2..e12f8ef7baa4 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
300} 300}
301EXPORT_SYMBOL(tcf_generic_walker); 301EXPORT_SYMBOL(tcf_generic_walker);
302 302
303static bool __tcf_idr_check(struct tc_action_net *tn, u32 index, 303int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
304 struct tc_action **a, int bind)
305{ 304{
306 struct tcf_idrinfo *idrinfo = tn->idrinfo; 305 struct tcf_idrinfo *idrinfo = tn->idrinfo;
307 struct tc_action *p; 306 struct tc_action *p;
308 307
309 spin_lock(&idrinfo->lock); 308 spin_lock(&idrinfo->lock);
310 p = idr_find(&idrinfo->action_idr, index); 309 p = idr_find(&idrinfo->action_idr, index);
311 if (IS_ERR(p)) { 310 if (IS_ERR(p))
312 p = NULL; 311 p = NULL;
313 } else if (p) { 312 else if (p)
314 refcount_inc(&p->tcfa_refcnt); 313 refcount_inc(&p->tcfa_refcnt);
315 if (bind)
316 atomic_inc(&p->tcfa_bindcnt);
317 }
318 spin_unlock(&idrinfo->lock); 314 spin_unlock(&idrinfo->lock);
319 315
320 if (p) { 316 if (p) {
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
323 } 319 }
324 return false; 320 return false;
325} 321}
326
327int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
328{
329 return __tcf_idr_check(tn, index, a, 0);
330}
331EXPORT_SYMBOL(tcf_idr_search); 322EXPORT_SYMBOL(tcf_idr_search);
332 323
333bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, 324static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
334 int bind)
335{ 325{
336 return __tcf_idr_check(tn, index, a, bind);
337}
338EXPORT_SYMBOL(tcf_idr_check);
339
340int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
341{
342 struct tcf_idrinfo *idrinfo = tn->idrinfo;
343 struct tc_action *p; 326 struct tc_action *p;
344 int ret = 0; 327 int ret = 0;
345 328
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
370 spin_unlock(&idrinfo->lock); 353 spin_unlock(&idrinfo->lock);
371 return ret; 354 return ret;
372} 355}
373EXPORT_SYMBOL(tcf_idr_delete_index);
374 356
375int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 357int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
376 struct tc_action **a, const struct tc_action_ops *ops, 358 struct tc_action **a, const struct tc_action_ops *ops,
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
409 391
410 p->idrinfo = idrinfo; 392 p->idrinfo = idrinfo;
411 p->ops = ops; 393 p->ops = ops;
412 INIT_LIST_HEAD(&p->list);
413 *a = p; 394 *a = p;
414 return 0; 395 return 0;
415err3: 396err3:
@@ -681,19 +662,30 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
681 return ret; 662 return ret;
682} 663}
683 664
665static int tcf_action_destroy_1(struct tc_action *a, int bind)
666{
667 struct tc_action *actions[] = { a, NULL };
668
669 return tcf_action_destroy(actions, bind);
670}
671
684static int tcf_action_put(struct tc_action *p) 672static int tcf_action_put(struct tc_action *p)
685{ 673{
686 return __tcf_action_put(p, false); 674 return __tcf_action_put(p, false);
687} 675}
688 676
677/* Put all actions in this array, skip those NULL's. */
689static void tcf_action_put_many(struct tc_action *actions[]) 678static void tcf_action_put_many(struct tc_action *actions[])
690{ 679{
691 int i; 680 int i;
692 681
693 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 682 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
694 struct tc_action *a = actions[i]; 683 struct tc_action *a = actions[i];
695 const struct tc_action_ops *ops = a->ops; 684 const struct tc_action_ops *ops;
696 685
686 if (!a)
687 continue;
688 ops = a->ops;
697 if (tcf_action_put(a)) 689 if (tcf_action_put(a))
698 module_put(ops->owner); 690 module_put(ops->owner);
699 } 691 }
@@ -896,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
896 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { 888 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
897 err = tcf_action_goto_chain_init(a, tp); 889 err = tcf_action_goto_chain_init(a, tp);
898 if (err) { 890 if (err) {
899 struct tc_action *actions[] = { a, NULL }; 891 tcf_action_destroy_1(a, bind);
900
901 tcf_action_destroy(actions, bind);
902 NL_SET_ERR_MSG(extack, "Failed to init TC action chain"); 892 NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
903 return ERR_PTR(err); 893 return ERR_PTR(err);
904 } 894 }
905 } 895 }
906 896
907 if (!tcf_action_valid(a->tcfa_action)) { 897 if (!tcf_action_valid(a->tcfa_action)) {
908 NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead"); 898 tcf_action_destroy_1(a, bind);
909 a->tcfa_action = TC_ACT_UNSPEC; 899 NL_SET_ERR_MSG(extack, "Invalid control action value");
900 return ERR_PTR(-EINVAL);
910 } 901 }
911 902
912 return a; 903 return a;
@@ -1175,41 +1166,38 @@ err_out:
1175 return err; 1166 return err;
1176} 1167}
1177 1168
1178static int tcf_action_delete(struct net *net, struct tc_action *actions[], 1169static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1179 int *acts_deleted, struct netlink_ext_ack *extack)
1180{ 1170{
1181 u32 act_index; 1171 int i;
1182 int ret, i;
1183 1172
1184 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1173 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1185 struct tc_action *a = actions[i]; 1174 struct tc_action *a = actions[i];
1186 const struct tc_action_ops *ops = a->ops; 1175 const struct tc_action_ops *ops = a->ops;
1187
1188 /* Actions can be deleted concurrently so we must save their 1176 /* Actions can be deleted concurrently so we must save their
1189 * type and id to search again after reference is released. 1177 * type and id to search again after reference is released.
1190 */ 1178 */
1191 act_index = a->tcfa_index; 1179 struct tcf_idrinfo *idrinfo = a->idrinfo;
1180 u32 act_index = a->tcfa_index;
1192 1181
1182 actions[i] = NULL;
1193 if (tcf_action_put(a)) { 1183 if (tcf_action_put(a)) {
1194 /* last reference, action was deleted concurrently */ 1184 /* last reference, action was deleted concurrently */
1195 module_put(ops->owner); 1185 module_put(ops->owner);
1196 } else { 1186 } else {
1187 int ret;
1188
1197 /* now do the delete */ 1189 /* now do the delete */
1198 ret = ops->delete(net, act_index); 1190 ret = tcf_idr_delete_index(idrinfo, act_index);
1199 if (ret < 0) { 1191 if (ret < 0)
1200 *acts_deleted = i + 1;
1201 return ret; 1192 return ret;
1202 }
1203 } 1193 }
1204 } 1194 }
1205 *acts_deleted = i;
1206 return 0; 1195 return 0;
1207} 1196}
1208 1197
1209static int 1198static int
1210tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1199tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1211 int *acts_deleted, u32 portid, size_t attr_size, 1200 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1212 struct netlink_ext_ack *extack)
1213{ 1201{
1214 int ret; 1202 int ret;
1215 struct sk_buff *skb; 1203 struct sk_buff *skb;
@@ -1227,7 +1215,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1227 } 1215 }
1228 1216
1229 /* now do the delete */ 1217 /* now do the delete */
1230 ret = tcf_action_delete(net, actions, acts_deleted, extack); 1218 ret = tcf_action_delete(net, actions);
1231 if (ret < 0) { 1219 if (ret < 0) {
1232 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1220 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1233 kfree_skb(skb); 1221 kfree_skb(skb);
@@ -1249,8 +1237,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1249 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1237 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1250 struct tc_action *act; 1238 struct tc_action *act;
1251 size_t attr_size = 0; 1239 size_t attr_size = 0;
1252 struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {}; 1240 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1253 int acts_deleted = 0;
1254 1241
1255 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack); 1242 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
1256 if (ret < 0) 1243 if (ret < 0)
@@ -1280,14 +1267,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1280 if (event == RTM_GETACTION) 1267 if (event == RTM_GETACTION)
1281 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1268 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1282 else { /* delete */ 1269 else { /* delete */
1283 ret = tcf_del_notify(net, n, actions, &acts_deleted, portid, 1270 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1284 attr_size, extack);
1285 if (ret) 1271 if (ret)
1286 goto err; 1272 goto err;
1287 return ret; 1273 return 0;
1288 } 1274 }
1289err: 1275err:
1290 tcf_action_put_many(&actions[acts_deleted]); 1276 tcf_action_put_many(actions);
1291 return ret; 1277 return ret;
1292} 1278}
1293 1279
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d30b23e42436..0c68bc9cf0b4 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
395 return tcf_idr_search(tn, a, index); 395 return tcf_idr_search(tn, a, index);
396} 396}
397 397
398static int tcf_bpf_delete(struct net *net, u32 index)
399{
400 struct tc_action_net *tn = net_generic(net, bpf_net_id);
401
402 return tcf_idr_delete_index(tn, index);
403}
404
405static struct tc_action_ops act_bpf_ops __read_mostly = { 398static struct tc_action_ops act_bpf_ops __read_mostly = {
406 .kind = "bpf", 399 .kind = "bpf",
407 .type = TCA_ACT_BPF, 400 .type = TCA_ACT_BPF,
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
412 .init = tcf_bpf_init, 405 .init = tcf_bpf_init,
413 .walk = tcf_bpf_walker, 406 .walk = tcf_bpf_walker,
414 .lookup = tcf_bpf_search, 407 .lookup = tcf_bpf_search,
415 .delete = tcf_bpf_delete,
416 .size = sizeof(struct tcf_bpf), 408 .size = sizeof(struct tcf_bpf),
417}; 409};
418 410
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 54c0bf54f2ac..6f0f273f1139 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
198 return tcf_idr_search(tn, a, index); 198 return tcf_idr_search(tn, a, index);
199} 199}
200 200
201static int tcf_connmark_delete(struct net *net, u32 index)
202{
203 struct tc_action_net *tn = net_generic(net, connmark_net_id);
204
205 return tcf_idr_delete_index(tn, index);
206}
207
208static struct tc_action_ops act_connmark_ops = { 201static struct tc_action_ops act_connmark_ops = {
209 .kind = "connmark", 202 .kind = "connmark",
210 .type = TCA_ACT_CONNMARK, 203 .type = TCA_ACT_CONNMARK,
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
214 .init = tcf_connmark_init, 207 .init = tcf_connmark_init,
215 .walk = tcf_connmark_walker, 208 .walk = tcf_connmark_walker,
216 .lookup = tcf_connmark_search, 209 .lookup = tcf_connmark_search,
217 .delete = tcf_connmark_delete,
218 .size = sizeof(struct tcf_connmark_info), 210 .size = sizeof(struct tcf_connmark_info),
219}; 211};
220 212
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index e698d3fe2080..b8a67ae3105a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
659 return nla_total_size(sizeof(struct tc_csum)); 659 return nla_total_size(sizeof(struct tc_csum));
660} 660}
661 661
662static int tcf_csum_delete(struct net *net, u32 index)
663{
664 struct tc_action_net *tn = net_generic(net, csum_net_id);
665
666 return tcf_idr_delete_index(tn, index);
667}
668
669static struct tc_action_ops act_csum_ops = { 662static struct tc_action_ops act_csum_ops = {
670 .kind = "csum", 663 .kind = "csum",
671 .type = TCA_ACT_CSUM, 664 .type = TCA_ACT_CSUM,
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
677 .walk = tcf_csum_walker, 670 .walk = tcf_csum_walker,
678 .lookup = tcf_csum_search, 671 .lookup = tcf_csum_search,
679 .get_fill_size = tcf_csum_get_fill_size, 672 .get_fill_size = tcf_csum_get_fill_size,
680 .delete = tcf_csum_delete,
681 .size = sizeof(struct tcf_csum), 673 .size = sizeof(struct tcf_csum),
682}; 674};
683 675
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 6a3f25a8ffb3..cd1d9bd32ef9 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
243 return sz; 243 return sz;
244} 244}
245 245
246static int tcf_gact_delete(struct net *net, u32 index)
247{
248 struct tc_action_net *tn = net_generic(net, gact_net_id);
249
250 return tcf_idr_delete_index(tn, index);
251}
252
253static struct tc_action_ops act_gact_ops = { 246static struct tc_action_ops act_gact_ops = {
254 .kind = "gact", 247 .kind = "gact",
255 .type = TCA_ACT_GACT, 248 .type = TCA_ACT_GACT,
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
261 .walk = tcf_gact_walker, 254 .walk = tcf_gact_walker,
262 .lookup = tcf_gact_search, 255 .lookup = tcf_gact_search,
263 .get_fill_size = tcf_gact_get_fill_size, 256 .get_fill_size = tcf_gact_get_fill_size,
264 .delete = tcf_gact_delete,
265 .size = sizeof(struct tcf_gact), 257 .size = sizeof(struct tcf_gact),
266}; 258};
267 259
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index d1081bdf1bdb..06a3d4801878 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
167{ 167{
168 struct tcf_meta_ops *o; 168 struct tcf_meta_ops *o;
169 169
170 read_lock_bh(&ife_mod_lock); 170 read_lock(&ife_mod_lock);
171 list_for_each_entry(o, &ifeoplist, list) { 171 list_for_each_entry(o, &ifeoplist, list) {
172 if (o->metaid == metaid) { 172 if (o->metaid == metaid) {
173 if (!try_module_get(o->owner)) 173 if (!try_module_get(o->owner))
174 o = NULL; 174 o = NULL;
175 read_unlock_bh(&ife_mod_lock); 175 read_unlock(&ife_mod_lock);
176 return o; 176 return o;
177 } 177 }
178 } 178 }
179 read_unlock_bh(&ife_mod_lock); 179 read_unlock(&ife_mod_lock);
180 180
181 return NULL; 181 return NULL;
182} 182}
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
190 !mops->get || !mops->alloc) 190 !mops->get || !mops->alloc)
191 return -EINVAL; 191 return -EINVAL;
192 192
193 write_lock_bh(&ife_mod_lock); 193 write_lock(&ife_mod_lock);
194 194
195 list_for_each_entry(m, &ifeoplist, list) { 195 list_for_each_entry(m, &ifeoplist, list) {
196 if (m->metaid == mops->metaid || 196 if (m->metaid == mops->metaid ||
197 (strcmp(mops->name, m->name) == 0)) { 197 (strcmp(mops->name, m->name) == 0)) {
198 write_unlock_bh(&ife_mod_lock); 198 write_unlock(&ife_mod_lock);
199 return -EEXIST; 199 return -EEXIST;
200 } 200 }
201 } 201 }
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
204 mops->release = ife_release_meta_gen; 204 mops->release = ife_release_meta_gen;
205 205
206 list_add_tail(&mops->list, &ifeoplist); 206 list_add_tail(&mops->list, &ifeoplist);
207 write_unlock_bh(&ife_mod_lock); 207 write_unlock(&ife_mod_lock);
208 return 0; 208 return 0;
209} 209}
210EXPORT_SYMBOL_GPL(unregister_ife_op); 210EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
214 struct tcf_meta_ops *m; 214 struct tcf_meta_ops *m;
215 int err = -ENOENT; 215 int err = -ENOENT;
216 216
217 write_lock_bh(&ife_mod_lock); 217 write_lock(&ife_mod_lock);
218 list_for_each_entry(m, &ifeoplist, list) { 218 list_for_each_entry(m, &ifeoplist, list) {
219 if (m->metaid == mops->metaid) { 219 if (m->metaid == mops->metaid) {
220 list_del(&mops->list); 220 list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
222 break; 222 break;
223 } 223 }
224 } 224 }
225 write_unlock_bh(&ife_mod_lock); 225 write_unlock(&ife_mod_lock);
226 226
227 return err; 227 return err;
228} 228}
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
265#endif 265#endif
266 266
267/* called when adding new meta information 267/* called when adding new meta information
268 * under ife->tcf_lock for existing action
269*/ 268*/
270static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, 269static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
271 void *val, int len, bool exists,
272 bool rtnl_held)
273{ 270{
274 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 271 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
275 int ret = 0; 272 int ret = 0;
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
277 if (!ops) { 274 if (!ops) {
278 ret = -ENOENT; 275 ret = -ENOENT;
279#ifdef CONFIG_MODULES 276#ifdef CONFIG_MODULES
280 if (exists)
281 spin_unlock_bh(&ife->tcf_lock);
282 if (rtnl_held) 277 if (rtnl_held)
283 rtnl_unlock(); 278 rtnl_unlock();
284 request_module("ife-meta-%s", ife_meta_id2name(metaid)); 279 request_module("ife-meta-%s", ife_meta_id2name(metaid));
285 if (rtnl_held) 280 if (rtnl_held)
286 rtnl_lock(); 281 rtnl_lock();
287 if (exists)
288 spin_lock_bh(&ife->tcf_lock);
289 ops = find_ife_oplist(metaid); 282 ops = find_ife_oplist(metaid);
290#endif 283#endif
291 } 284 }
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
302} 295}
303 296
304/* called when adding new meta information 297/* called when adding new meta information
305 * under ife->tcf_lock for existing action
306*/ 298*/
307static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 299static int __add_metainfo(const struct tcf_meta_ops *ops,
308 int len, bool atomic) 300 struct tcf_ife_info *ife, u32 metaid, void *metaval,
301 int len, bool atomic, bool exists)
309{ 302{
310 struct tcf_meta_info *mi = NULL; 303 struct tcf_meta_info *mi = NULL;
311 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
312 int ret = 0; 304 int ret = 0;
313 305
314 if (!ops)
315 return -ENOENT;
316
317 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); 306 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
318 if (!mi) { 307 if (!mi)
319 /*put back what find_ife_oplist took */
320 module_put(ops->owner);
321 return -ENOMEM; 308 return -ENOMEM;
322 }
323 309
324 mi->metaid = metaid; 310 mi->metaid = metaid;
325 mi->ops = ops; 311 mi->ops = ops;
@@ -327,29 +313,61 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
327 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); 313 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
328 if (ret != 0) { 314 if (ret != 0) {
329 kfree(mi); 315 kfree(mi);
330 module_put(ops->owner);
331 return ret; 316 return ret;
332 } 317 }
333 } 318 }
334 319
320 if (exists)
321 spin_lock_bh(&ife->tcf_lock);
335 list_add_tail(&mi->metalist, &ife->metalist); 322 list_add_tail(&mi->metalist, &ife->metalist);
323 if (exists)
324 spin_unlock_bh(&ife->tcf_lock);
325
326 return ret;
327}
328
329static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
330 struct tcf_ife_info *ife, u32 metaid,
331 bool exists)
332{
333 int ret;
334
335 if (!try_module_get(ops->owner))
336 return -ENOENT;
337 ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
338 if (ret)
339 module_put(ops->owner);
340 return ret;
341}
342
343static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
344 int len, bool exists)
345{
346 const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
347 int ret;
336 348
349 if (!ops)
350 return -ENOENT;
351 ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
352 if (ret)
353 /*put back what find_ife_oplist took */
354 module_put(ops->owner);
337 return ret; 355 return ret;
338} 356}
339 357
340static int use_all_metadata(struct tcf_ife_info *ife) 358static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
341{ 359{
342 struct tcf_meta_ops *o; 360 struct tcf_meta_ops *o;
343 int rc = 0; 361 int rc = 0;
344 int installed = 0; 362 int installed = 0;
345 363
346 read_lock_bh(&ife_mod_lock); 364 read_lock(&ife_mod_lock);
347 list_for_each_entry(o, &ifeoplist, list) { 365 list_for_each_entry(o, &ifeoplist, list) {
348 rc = add_metainfo(ife, o->metaid, NULL, 0, true); 366 rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
349 if (rc == 0) 367 if (rc == 0)
350 installed += 1; 368 installed += 1;
351 } 369 }
352 read_unlock_bh(&ife_mod_lock); 370 read_unlock(&ife_mod_lock);
353 371
354 if (installed) 372 if (installed)
355 return 0; 373 return 0;
@@ -396,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
396 struct tcf_meta_info *e, *n; 414 struct tcf_meta_info *e, *n;
397 415
398 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 416 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
399 module_put(e->ops->owner);
400 list_del(&e->metalist); 417 list_del(&e->metalist);
401 if (e->metaval) { 418 if (e->metaval) {
402 if (e->ops->release) 419 if (e->ops->release)
@@ -404,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
404 else 421 else
405 kfree(e->metaval); 422 kfree(e->metaval);
406 } 423 }
424 module_put(e->ops->owner);
407 kfree(e); 425 kfree(e);
408 } 426 }
409} 427}
@@ -422,7 +440,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
422 kfree_rcu(p, rcu); 440 kfree_rcu(p, rcu);
423} 441}
424 442
425/* under ife->tcf_lock for existing action */
426static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, 443static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
427 bool exists, bool rtnl_held) 444 bool exists, bool rtnl_held)
428{ 445{
@@ -436,8 +453,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
436 val = nla_data(tb[i]); 453 val = nla_data(tb[i]);
437 len = nla_len(tb[i]); 454 len = nla_len(tb[i]);
438 455
439 rc = load_metaops_and_vet(ife, i, val, len, exists, 456 rc = load_metaops_and_vet(i, val, len, rtnl_held);
440 rtnl_held);
441 if (rc != 0) 457 if (rc != 0)
442 return rc; 458 return rc;
443 459
@@ -540,8 +556,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
540 p->eth_type = ife_type; 556 p->eth_type = ife_type;
541 } 557 }
542 558
543 if (exists)
544 spin_lock_bh(&ife->tcf_lock);
545 559
546 if (ret == ACT_P_CREATED) 560 if (ret == ACT_P_CREATED)
547 INIT_LIST_HEAD(&ife->metalist); 561 INIT_LIST_HEAD(&ife->metalist);
@@ -551,10 +565,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
551 NULL, NULL); 565 NULL, NULL);
552 if (err) { 566 if (err) {
553metadata_parse_err: 567metadata_parse_err:
554 if (exists)
555 spin_unlock_bh(&ife->tcf_lock);
556 tcf_idr_release(*a, bind); 568 tcf_idr_release(*a, bind);
557
558 kfree(p); 569 kfree(p);
559 return err; 570 return err;
560 } 571 }
@@ -569,17 +580,16 @@ metadata_parse_err:
569 * as we can. You better have at least one else we are 580 * as we can. You better have at least one else we are
570 * going to bail out 581 * going to bail out
571 */ 582 */
572 err = use_all_metadata(ife); 583 err = use_all_metadata(ife, exists);
573 if (err) { 584 if (err) {
574 if (exists)
575 spin_unlock_bh(&ife->tcf_lock);
576 tcf_idr_release(*a, bind); 585 tcf_idr_release(*a, bind);
577
578 kfree(p); 586 kfree(p);
579 return err; 587 return err;
580 } 588 }
581 } 589 }
582 590
591 if (exists)
592 spin_lock_bh(&ife->tcf_lock);
583 ife->tcf_action = parm->action; 593 ife->tcf_action = parm->action;
584 /* protected by tcf_lock when modifying existing action */ 594 /* protected by tcf_lock when modifying existing action */
585 rcu_swap_protected(ife->params, p, 1); 595 rcu_swap_protected(ife->params, p, 1);
@@ -853,13 +863,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
853 return tcf_idr_search(tn, a, index); 863 return tcf_idr_search(tn, a, index);
854} 864}
855 865
856static int tcf_ife_delete(struct net *net, u32 index)
857{
858 struct tc_action_net *tn = net_generic(net, ife_net_id);
859
860 return tcf_idr_delete_index(tn, index);
861}
862
863static struct tc_action_ops act_ife_ops = { 866static struct tc_action_ops act_ife_ops = {
864 .kind = "ife", 867 .kind = "ife",
865 .type = TCA_ACT_IFE, 868 .type = TCA_ACT_IFE,
@@ -870,7 +873,6 @@ static struct tc_action_ops act_ife_ops = {
870 .init = tcf_ife_init, 873 .init = tcf_ife_init,
871 .walk = tcf_ife_walker, 874 .walk = tcf_ife_walker,
872 .lookup = tcf_ife_search, 875 .lookup = tcf_ife_search,
873 .delete = tcf_ife_delete,
874 .size = sizeof(struct tcf_ife_info), 876 .size = sizeof(struct tcf_ife_info),
875}; 877};
876 878
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 51f235bbeb5b..8525de811616 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
135 } 135 }
136 136
137 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); 137 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
138 if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { 138 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
139 if (exists) 139 if (exists)
140 tcf_idr_release(*a, bind); 140 tcf_idr_release(*a, bind);
141 else 141 else
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
337 return tcf_idr_search(tn, a, index); 337 return tcf_idr_search(tn, a, index);
338} 338}
339 339
340static int tcf_ipt_delete(struct net *net, u32 index)
341{
342 struct tc_action_net *tn = net_generic(net, ipt_net_id);
343
344 return tcf_idr_delete_index(tn, index);
345}
346
347static struct tc_action_ops act_ipt_ops = { 340static struct tc_action_ops act_ipt_ops = {
348 .kind = "ipt", 341 .kind = "ipt",
349 .type = TCA_ACT_IPT, 342 .type = TCA_ACT_IPT,
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
354 .init = tcf_ipt_init, 347 .init = tcf_ipt_init,
355 .walk = tcf_ipt_walker, 348 .walk = tcf_ipt_walker,
356 .lookup = tcf_ipt_search, 349 .lookup = tcf_ipt_search,
357 .delete = tcf_ipt_delete,
358 .size = sizeof(struct tcf_ipt), 350 .size = sizeof(struct tcf_ipt),
359}; 351};
360 352
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
395 return tcf_idr_search(tn, a, index); 387 return tcf_idr_search(tn, a, index);
396} 388}
397 389
398static int tcf_xt_delete(struct net *net, u32 index)
399{
400 struct tc_action_net *tn = net_generic(net, xt_net_id);
401
402 return tcf_idr_delete_index(tn, index);
403}
404
405static struct tc_action_ops act_xt_ops = { 390static struct tc_action_ops act_xt_ops = {
406 .kind = "xt", 391 .kind = "xt",
407 .type = TCA_ACT_XT, 392 .type = TCA_ACT_XT,
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
412 .init = tcf_xt_init, 397 .init = tcf_xt_init,
413 .walk = tcf_xt_walker, 398 .walk = tcf_xt_walker,
414 .lookup = tcf_xt_search, 399 .lookup = tcf_xt_search,
415 .delete = tcf_xt_delete,
416 .size = sizeof(struct tcf_ipt), 400 .size = sizeof(struct tcf_ipt),
417}; 401};
418 402
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 38fd20f10f67..8bf66d0a6800 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
395 dev_put(dev); 395 dev_put(dev);
396} 396}
397 397
398static int tcf_mirred_delete(struct net *net, u32 index)
399{
400 struct tc_action_net *tn = net_generic(net, mirred_net_id);
401
402 return tcf_idr_delete_index(tn, index);
403}
404
405static struct tc_action_ops act_mirred_ops = { 398static struct tc_action_ops act_mirred_ops = {
406 .kind = "mirred", 399 .kind = "mirred",
407 .type = TCA_ACT_MIRRED, 400 .type = TCA_ACT_MIRRED,
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
416 .size = sizeof(struct tcf_mirred), 409 .size = sizeof(struct tcf_mirred),
417 .get_dev = tcf_mirred_get_dev, 410 .get_dev = tcf_mirred_get_dev,
418 .put_dev = tcf_mirred_put_dev, 411 .put_dev = tcf_mirred_put_dev,
419 .delete = tcf_mirred_delete,
420}; 412};
421 413
422static __net_init int mirred_init_net(struct net *net) 414static __net_init int mirred_init_net(struct net *net)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 822e903bfc25..4313aa102440 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
300 return tcf_idr_search(tn, a, index); 300 return tcf_idr_search(tn, a, index);
301} 301}
302 302
303static int tcf_nat_delete(struct net *net, u32 index)
304{
305 struct tc_action_net *tn = net_generic(net, nat_net_id);
306
307 return tcf_idr_delete_index(tn, index);
308}
309
310static struct tc_action_ops act_nat_ops = { 303static struct tc_action_ops act_nat_ops = {
311 .kind = "nat", 304 .kind = "nat",
312 .type = TCA_ACT_NAT, 305 .type = TCA_ACT_NAT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
316 .init = tcf_nat_init, 309 .init = tcf_nat_init,
317 .walk = tcf_nat_walker, 310 .walk = tcf_nat_walker,
318 .lookup = tcf_nat_search, 311 .lookup = tcf_nat_search,
319 .delete = tcf_nat_delete,
320 .size = sizeof(struct tcf_nat), 312 .size = sizeof(struct tcf_nat),
321}; 313};
322 314
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 8a7a7cb94e83..ad99a99f11f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
109{ 109{
110 struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX); 110 struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
111 111
112 if (!keys_start)
113 goto nla_failure;
112 for (; n > 0; n--) { 114 for (; n > 0; n--) {
113 struct nlattr *key_start; 115 struct nlattr *key_start;
114 116
115 key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX); 117 key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
118 if (!key_start)
119 goto nla_failure;
116 120
117 if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) || 121 if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
118 nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) { 122 nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
119 nlmsg_trim(skb, keys_start); 123 goto nla_failure;
120 return -EINVAL;
121 }
122 124
123 nla_nest_end(skb, key_start); 125 nla_nest_end(skb, key_start);
124 126
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
128 nla_nest_end(skb, keys_start); 130 nla_nest_end(skb, keys_start);
129 131
130 return 0; 132 return 0;
133nla_failure:
134 nla_nest_cancel(skb, keys_start);
135 return -EINVAL;
131} 136}
132 137
133static int tcf_pedit_init(struct net *net, struct nlattr *nla, 138static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
418 opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; 423 opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
419 424
420 if (p->tcfp_keys_ex) { 425 if (p->tcfp_keys_ex) {
421 tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys); 426 if (tcf_pedit_key_ex_dump(skb,
427 p->tcfp_keys_ex,
428 p->tcfp_nkeys))
429 goto nla_put_failure;
422 430
423 if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) 431 if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
424 goto nla_put_failure; 432 goto nla_put_failure;
@@ -460,13 +468,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
460 return tcf_idr_search(tn, a, index); 468 return tcf_idr_search(tn, a, index);
461} 469}
462 470
463static int tcf_pedit_delete(struct net *net, u32 index)
464{
465 struct tc_action_net *tn = net_generic(net, pedit_net_id);
466
467 return tcf_idr_delete_index(tn, index);
468}
469
470static struct tc_action_ops act_pedit_ops = { 471static struct tc_action_ops act_pedit_ops = {
471 .kind = "pedit", 472 .kind = "pedit",
472 .type = TCA_ACT_PEDIT, 473 .type = TCA_ACT_PEDIT,
@@ -477,7 +478,6 @@ static struct tc_action_ops act_pedit_ops = {
477 .init = tcf_pedit_init, 478 .init = tcf_pedit_init,
478 .walk = tcf_pedit_walker, 479 .walk = tcf_pedit_walker,
479 .lookup = tcf_pedit_search, 480 .lookup = tcf_pedit_search,
480 .delete = tcf_pedit_delete,
481 .size = sizeof(struct tcf_pedit), 481 .size = sizeof(struct tcf_pedit),
482}; 482};
483 483
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 06f0742db593..5d8bfa878477 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
320 return tcf_idr_search(tn, a, index); 320 return tcf_idr_search(tn, a, index);
321} 321}
322 322
323static int tcf_police_delete(struct net *net, u32 index)
324{
325 struct tc_action_net *tn = net_generic(net, police_net_id);
326
327 return tcf_idr_delete_index(tn, index);
328}
329
330MODULE_AUTHOR("Alexey Kuznetsov"); 323MODULE_AUTHOR("Alexey Kuznetsov");
331MODULE_DESCRIPTION("Policing actions"); 324MODULE_DESCRIPTION("Policing actions");
332MODULE_LICENSE("GPL"); 325MODULE_LICENSE("GPL");
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
340 .init = tcf_police_init, 333 .init = tcf_police_init,
341 .walk = tcf_police_walker, 334 .walk = tcf_police_walker,
342 .lookup = tcf_police_search, 335 .lookup = tcf_police_search,
343 .delete = tcf_police_delete,
344 .size = sizeof(struct tcf_police), 336 .size = sizeof(struct tcf_police),
345}; 337};
346 338
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 207b4132d1b0..6b67aa13d2dd 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
69 69
70 if (!exists) { 70 if (!exists) {
71 ret = tcf_idr_create(tn, parm->index, est, a, 71 ret = tcf_idr_create(tn, parm->index, est, a,
72 &act_sample_ops, bind, false); 72 &act_sample_ops, bind, true);
73 if (ret) { 73 if (ret) {
74 tcf_idr_cleanup(tn, parm->index); 74 tcf_idr_cleanup(tn, parm->index);
75 return ret; 75 return ret;
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
232 return tcf_idr_search(tn, a, index); 232 return tcf_idr_search(tn, a, index);
233} 233}
234 234
235static int tcf_sample_delete(struct net *net, u32 index)
236{
237 struct tc_action_net *tn = net_generic(net, sample_net_id);
238
239 return tcf_idr_delete_index(tn, index);
240}
241
242static struct tc_action_ops act_sample_ops = { 235static struct tc_action_ops act_sample_ops = {
243 .kind = "sample", 236 .kind = "sample",
244 .type = TCA_ACT_SAMPLE, 237 .type = TCA_ACT_SAMPLE,
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
249 .cleanup = tcf_sample_cleanup, 242 .cleanup = tcf_sample_cleanup,
250 .walk = tcf_sample_walker, 243 .walk = tcf_sample_walker,
251 .lookup = tcf_sample_search, 244 .lookup = tcf_sample_search,
252 .delete = tcf_sample_delete,
253 .size = sizeof(struct tcf_sample), 245 .size = sizeof(struct tcf_sample),
254}; 246};
255 247
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e616523ba3c1..52400d49f81f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
196 return tcf_idr_search(tn, a, index); 196 return tcf_idr_search(tn, a, index);
197} 197}
198 198
199static int tcf_simp_delete(struct net *net, u32 index)
200{
201 struct tc_action_net *tn = net_generic(net, simp_net_id);
202
203 return tcf_idr_delete_index(tn, index);
204}
205
206static struct tc_action_ops act_simp_ops = { 199static struct tc_action_ops act_simp_ops = {
207 .kind = "simple", 200 .kind = "simple",
208 .type = TCA_ACT_SIMP, 201 .type = TCA_ACT_SIMP,
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
213 .init = tcf_simp_init, 206 .init = tcf_simp_init,
214 .walk = tcf_simp_walker, 207 .walk = tcf_simp_walker,
215 .lookup = tcf_simp_search, 208 .lookup = tcf_simp_search,
216 .delete = tcf_simp_delete,
217 .size = sizeof(struct tcf_defact), 209 .size = sizeof(struct tcf_defact),
218}; 210};
219 211
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 926d7bc4a89d..73e44ce2a883 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
299 return tcf_idr_search(tn, a, index); 299 return tcf_idr_search(tn, a, index);
300} 300}
301 301
302static int tcf_skbedit_delete(struct net *net, u32 index)
303{
304 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
305
306 return tcf_idr_delete_index(tn, index);
307}
308
309static struct tc_action_ops act_skbedit_ops = { 302static struct tc_action_ops act_skbedit_ops = {
310 .kind = "skbedit", 303 .kind = "skbedit",
311 .type = TCA_ACT_SKBEDIT, 304 .type = TCA_ACT_SKBEDIT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
316 .cleanup = tcf_skbedit_cleanup, 309 .cleanup = tcf_skbedit_cleanup,
317 .walk = tcf_skbedit_walker, 310 .walk = tcf_skbedit_walker,
318 .lookup = tcf_skbedit_search, 311 .lookup = tcf_skbedit_search,
319 .delete = tcf_skbedit_delete,
320 .size = sizeof(struct tcf_skbedit), 312 .size = sizeof(struct tcf_skbedit),
321}; 313};
322 314
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index d6a1af0c4171..588077fafd6c 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
259 return tcf_idr_search(tn, a, index); 259 return tcf_idr_search(tn, a, index);
260} 260}
261 261
262static int tcf_skbmod_delete(struct net *net, u32 index)
263{
264 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
265
266 return tcf_idr_delete_index(tn, index);
267}
268
269static struct tc_action_ops act_skbmod_ops = { 262static struct tc_action_ops act_skbmod_ops = {
270 .kind = "skbmod", 263 .kind = "skbmod",
271 .type = TCA_ACT_SKBMOD, 264 .type = TCA_ACT_SKBMOD,
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
276 .cleanup = tcf_skbmod_cleanup, 269 .cleanup = tcf_skbmod_cleanup,
277 .walk = tcf_skbmod_walker, 270 .walk = tcf_skbmod_walker,
278 .lookup = tcf_skbmod_search, 271 .lookup = tcf_skbmod_search,
279 .delete = tcf_skbmod_delete,
280 .size = sizeof(struct tcf_skbmod), 272 .size = sizeof(struct tcf_skbmod),
281}; 273};
282 274
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 8f09cf08d8fe..681f6f04e7da 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
317 &metadata->u.tun_info, 317 &metadata->u.tun_info,
318 opts_len, extack); 318 opts_len, extack);
319 if (ret < 0) 319 if (ret < 0)
320 goto err_out; 320 goto release_tun_meta;
321 } 321 }
322 322
323 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; 323 metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
333 &act_tunnel_key_ops, bind, true); 333 &act_tunnel_key_ops, bind, true);
334 if (ret) { 334 if (ret) {
335 NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); 335 NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
336 goto err_out; 336 goto release_tun_meta;
337 } 337 }
338 338
339 ret = ACT_P_CREATED; 339 ret = ACT_P_CREATED;
340 } else if (!ovr) { 340 } else if (!ovr) {
341 tcf_idr_release(*a, bind);
342 NL_SET_ERR_MSG(extack, "TC IDR already exists"); 341 NL_SET_ERR_MSG(extack, "TC IDR already exists");
343 return -EEXIST; 342 ret = -EEXIST;
343 goto release_tun_meta;
344 } 344 }
345 345
346 t = to_tunnel_key(*a); 346 t = to_tunnel_key(*a);
347 347
348 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 348 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
349 if (unlikely(!params_new)) { 349 if (unlikely(!params_new)) {
350 tcf_idr_release(*a, bind);
351 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); 350 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
352 return -ENOMEM; 351 ret = -ENOMEM;
352 exists = true;
353 goto release_tun_meta;
353 } 354 }
354 params_new->tcft_action = parm->t_action; 355 params_new->tcft_action = parm->t_action;
355 params_new->tcft_enc_metadata = metadata; 356 params_new->tcft_enc_metadata = metadata;
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
367 368
368 return ret; 369 return ret;
369 370
371release_tun_meta:
372 dst_release(&metadata->dst);
373
370err_out: 374err_out:
371 if (exists) 375 if (exists)
372 tcf_idr_release(*a, bind); 376 tcf_idr_release(*a, bind);
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
408 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, 412 nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
409 opt->type) || 413 opt->type) ||
410 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, 414 nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
411 opt->length * 4, opt + 1)) 415 opt->length * 4, opt + 1)) {
416 nla_nest_cancel(skb, start);
412 return -EMSGSIZE; 417 return -EMSGSIZE;
418 }
413 419
414 len -= sizeof(struct geneve_opt) + opt->length * 4; 420 len -= sizeof(struct geneve_opt) + opt->length * 4;
415 src += sizeof(struct geneve_opt) + opt->length * 4; 421 src += sizeof(struct geneve_opt) + opt->length * 4;
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
423 const struct ip_tunnel_info *info) 429 const struct ip_tunnel_info *info)
424{ 430{
425 struct nlattr *start; 431 struct nlattr *start;
426 int err; 432 int err = -EINVAL;
427 433
428 if (!info->options_len) 434 if (!info->options_len)
429 return 0; 435 return 0;
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
435 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) { 441 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
436 err = tunnel_key_geneve_opts_dump(skb, info); 442 err = tunnel_key_geneve_opts_dump(skb, info);
437 if (err) 443 if (err)
438 return err; 444 goto err_out;
439 } else { 445 } else {
440 return -EINVAL; 446err_out:
447 nla_nest_cancel(skb, start);
448 return err;
441 } 449 }
442 450
443 nla_nest_end(skb, start); 451 nla_nest_end(skb, start);
@@ -548,13 +556,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
548 return tcf_idr_search(tn, a, index); 556 return tcf_idr_search(tn, a, index);
549} 557}
550 558
551static int tunnel_key_delete(struct net *net, u32 index)
552{
553 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
554
555 return tcf_idr_delete_index(tn, index);
556}
557
558static struct tc_action_ops act_tunnel_key_ops = { 559static struct tc_action_ops act_tunnel_key_ops = {
559 .kind = "tunnel_key", 560 .kind = "tunnel_key",
560 .type = TCA_ACT_TUNNEL_KEY, 561 .type = TCA_ACT_TUNNEL_KEY,
@@ -565,7 +566,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
565 .cleanup = tunnel_key_release, 566 .cleanup = tunnel_key_release,
566 .walk = tunnel_key_walker, 567 .walk = tunnel_key_walker,
567 .lookup = tunnel_key_search, 568 .lookup = tunnel_key_search,
568 .delete = tunnel_key_delete,
569 .size = sizeof(struct tcf_tunnel_key), 569 .size = sizeof(struct tcf_tunnel_key),
570}; 570};
571 571
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 209e70ad2c09..033d273afe50 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
296 return tcf_idr_search(tn, a, index); 296 return tcf_idr_search(tn, a, index);
297} 297}
298 298
299static int tcf_vlan_delete(struct net *net, u32 index)
300{
301 struct tc_action_net *tn = net_generic(net, vlan_net_id);
302
303 return tcf_idr_delete_index(tn, index);
304}
305
306static struct tc_action_ops act_vlan_ops = { 299static struct tc_action_ops act_vlan_ops = {
307 .kind = "vlan", 300 .kind = "vlan",
308 .type = TCA_ACT_VLAN, 301 .type = TCA_ACT_VLAN,
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
313 .cleanup = tcf_vlan_cleanup, 306 .cleanup = tcf_vlan_cleanup,
314 .walk = tcf_vlan_walker, 307 .walk = tcf_vlan_walker,
315 .lookup = tcf_vlan_search, 308 .lookup = tcf_vlan_search,
316 .delete = tcf_vlan_delete,
317 .size = sizeof(struct tcf_vlan), 309 .size = sizeof(struct tcf_vlan),
318}; 310};
319 311
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 31bd1439cf60..0a75cb2e5e7b 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1252,7 +1252,7 @@ replay:
1252 } 1252 }
1253 chain = tcf_chain_get(block, chain_index, true); 1253 chain = tcf_chain_get(block, chain_index, true);
1254 if (!chain) { 1254 if (!chain) {
1255 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 1255 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
1256 err = -ENOMEM; 1256 err = -ENOMEM;
1257 goto errout; 1257 goto errout;
1258 } 1258 }
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1399 goto errout; 1399 goto errout;
1400 } 1400 }
1401 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 1401 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1402 err = -EINVAL; 1402 err = -ENOENT;
1403 goto errout; 1403 goto errout;
1404 } 1404 }
1405 1405
@@ -1902,6 +1902,8 @@ replay:
1902 RTM_NEWCHAIN, false); 1902 RTM_NEWCHAIN, false);
1903 break; 1903 break;
1904 case RTM_DELCHAIN: 1904 case RTM_DELCHAIN:
1905 tfilter_notify_chain(net, skb, block, q, parent, n,
1906 chain, RTM_DELTFILTER);
1905 /* Flush the chain first as the user requested chain removal. */ 1907 /* Flush the chain first as the user requested chain removal. */
1906 tcf_chain_flush(chain); 1908 tcf_chain_flush(chain);
1907 /* In case the chain was successfully deleted, put a reference 1909 /* In case the chain was successfully deleted, put a reference
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index d5d2a6dc3921..f218ccf1e2d9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
914 struct nlattr *opt = tca[TCA_OPTIONS]; 914 struct nlattr *opt = tca[TCA_OPTIONS];
915 struct nlattr *tb[TCA_U32_MAX + 1]; 915 struct nlattr *tb[TCA_U32_MAX + 1];
916 u32 htid, flags = 0; 916 u32 htid, flags = 0;
917 size_t sel_size;
917 int err; 918 int err;
918#ifdef CONFIG_CLS_U32_PERF 919#ifdef CONFIG_CLS_U32_PERF
919 size_t size; 920 size_t size;
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1076 } 1077 }
1077 1078
1078 s = nla_data(tb[TCA_U32_SEL]); 1079 s = nla_data(tb[TCA_U32_SEL]);
1080 sel_size = struct_size(s, keys, s->nkeys);
1081 if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1082 err = -EINVAL;
1083 goto erridr;
1084 }
1079 1085
1080 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 1086 n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1081 if (n == NULL) { 1087 if (n == NULL) {
1082 err = -ENOBUFS; 1088 err = -ENOBUFS;
1083 goto erridr; 1089 goto erridr;
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1092 } 1098 }
1093#endif 1099#endif
1094 1100
1095 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); 1101 memcpy(&n->sel, s, sel_size);
1096 RCU_INIT_POINTER(n->ht_up, ht); 1102 RCU_INIT_POINTER(n->ht_up, ht);
1097 n->handle = handle; 1103 n->handle = handle;
1098 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; 1104 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 35fc7252187c..c07c30b916d5 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -64,7 +64,6 @@
64#include <linux/vmalloc.h> 64#include <linux/vmalloc.h>
65#include <linux/reciprocal_div.h> 65#include <linux/reciprocal_div.h>
66#include <net/netlink.h> 66#include <net/netlink.h>
67#include <linux/version.h>
68#include <linux/if_vlan.h> 67#include <linux/if_vlan.h>
69#include <net/pkt_sched.h> 68#include <net/pkt_sched.h>
70#include <net/pkt_cls.h> 69#include <net/pkt_cls.h>
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
621} 620}
622 621
623static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, 622static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
624 int flow_mode) 623 int flow_mode, u16 flow_override, u16 host_override)
625{ 624{
626 u32 flow_hash = 0, srchost_hash, dsthost_hash; 625 u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
627 u16 reduced_hash, srchost_idx, dsthost_idx; 626 u16 reduced_hash, srchost_idx, dsthost_idx;
628 struct flow_keys keys, host_keys; 627 struct flow_keys keys, host_keys;
629 628
630 if (unlikely(flow_mode == CAKE_FLOW_NONE)) 629 if (unlikely(flow_mode == CAKE_FLOW_NONE))
631 return 0; 630 return 0;
632 631
632 /* If both overrides are set we can skip packet dissection entirely */
633 if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
634 (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
635 goto skip_hash;
636
633 skb_flow_dissect_flow_keys(skb, &keys, 637 skb_flow_dissect_flow_keys(skb, &keys,
634 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 638 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
635 639
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
676 if (flow_mode & CAKE_FLOW_FLOWS) 680 if (flow_mode & CAKE_FLOW_FLOWS)
677 flow_hash = flow_hash_from_keys(&keys); 681 flow_hash = flow_hash_from_keys(&keys);
678 682
683skip_hash:
684 if (flow_override)
685 flow_hash = flow_override - 1;
686 if (host_override) {
687 dsthost_hash = host_override - 1;
688 srchost_hash = host_override - 1;
689 }
690
679 if (!(flow_mode & CAKE_FLOW_FLOWS)) { 691 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
680 if (flow_mode & CAKE_FLOW_SRC_IP) 692 if (flow_mode & CAKE_FLOW_SRC_IP)
681 flow_hash ^= srchost_hash; 693 flow_hash ^= srchost_hash;
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1571 struct cake_sched_data *q = qdisc_priv(sch); 1583 struct cake_sched_data *q = qdisc_priv(sch);
1572 struct tcf_proto *filter; 1584 struct tcf_proto *filter;
1573 struct tcf_result res; 1585 struct tcf_result res;
1574 u32 flow = 0; 1586 u16 flow = 0, host = 0;
1575 int result; 1587 int result;
1576 1588
1577 filter = rcu_dereference_bh(q->filter_list); 1589 filter = rcu_dereference_bh(q->filter_list);
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1595#endif 1607#endif
1596 if (TC_H_MIN(res.classid) <= CAKE_QUEUES) 1608 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1597 flow = TC_H_MIN(res.classid); 1609 flow = TC_H_MIN(res.classid);
1610 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1611 host = TC_H_MAJ(res.classid) >> 16;
1598 } 1612 }
1599hash: 1613hash:
1600 *t = cake_select_tin(sch, skb); 1614 *t = cake_select_tin(sch, skb);
1601 return flow ?: cake_hash(*t, skb, flow_mode) + 1; 1615 return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1602} 1616}
1603 1617
1604static void cake_reconfigure(struct Qdisc *sch); 1618static void cake_reconfigure(struct Qdisc *sch);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d74d00b29942..42191ed9902b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
1048 if (!ctx->packet || !ctx->packet->has_cookie_echo) 1048 if (!ctx->packet || !ctx->packet->has_cookie_echo)
1049 return; 1049 return;
1050 1050
1051 /* fallthru */ 1051 /* fall through */
1052 case SCTP_STATE_ESTABLISHED: 1052 case SCTP_STATE_ESTABLISHED:
1053 case SCTP_STATE_SHUTDOWN_PENDING: 1053 case SCTP_STATE_SHUTDOWN_PENDING:
1054 case SCTP_STATE_SHUTDOWN_RECEIVED: 1054 case SCTP_STATE_SHUTDOWN_RECEIVED:
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index ef5c9a82d4e8..a644292f9faf 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
215struct sctp_ht_iter { 215struct sctp_ht_iter {
216 struct seq_net_private p; 216 struct seq_net_private p;
217 struct rhashtable_iter hti; 217 struct rhashtable_iter hti;
218 int start_fail;
219}; 218};
220 219
221static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) 220static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
224 223
225 sctp_transport_walk_start(&iter->hti); 224 sctp_transport_walk_start(&iter->hti);
226 225
227 iter->start_fail = 0;
228 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); 226 return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
229} 227}
230 228
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
232{ 230{
233 struct sctp_ht_iter *iter = seq->private; 231 struct sctp_ht_iter *iter = seq->private;
234 232
235 if (iter->start_fail)
236 return;
237 sctp_transport_walk_stop(&iter->hti); 233 sctp_transport_walk_stop(&iter->hti);
238} 234}
239 235
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
264 } 260 }
265 261
266 transport = (struct sctp_transport *)v; 262 transport = (struct sctp_transport *)v;
267 if (!sctp_transport_hold(transport))
268 return 0;
269 assoc = transport->asoc; 263 assoc = transport->asoc;
270 epb = &assoc->base; 264 epb = &assoc->base;
271 sk = epb->sk; 265 sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
322 } 316 }
323 317
324 transport = (struct sctp_transport *)v; 318 transport = (struct sctp_transport *)v;
325 if (!sctp_transport_hold(transport))
326 return 0;
327 assoc = transport->asoc; 319 assoc = transport->asoc;
328 320
329 list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, 321 list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e96b15a66aba..f73e9d38d5ba 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2658 } 2658 }
2659 2659
2660 if (params->spp_flags & SPP_IPV6_FLOWLABEL) { 2660 if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
2661 if (trans && trans->ipaddr.sa.sa_family == AF_INET6) { 2661 if (trans) {
2662 trans->flowlabel = params->spp_ipv6_flowlabel & 2662 if (trans->ipaddr.sa.sa_family == AF_INET6) {
2663 SCTP_FLOWLABEL_VAL_MASK;
2664 trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2665 } else if (asoc) {
2666 list_for_each_entry(trans,
2667 &asoc->peer.transport_addr_list,
2668 transports) {
2669 if (trans->ipaddr.sa.sa_family != AF_INET6)
2670 continue;
2671 trans->flowlabel = params->spp_ipv6_flowlabel & 2663 trans->flowlabel = params->spp_ipv6_flowlabel &
2672 SCTP_FLOWLABEL_VAL_MASK; 2664 SCTP_FLOWLABEL_VAL_MASK;
2673 trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK; 2665 trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2674 } 2666 }
2667 } else if (asoc) {
2668 struct sctp_transport *t;
2669
2670 list_for_each_entry(t, &asoc->peer.transport_addr_list,
2671 transports) {
2672 if (t->ipaddr.sa.sa_family != AF_INET6)
2673 continue;
2674 t->flowlabel = params->spp_ipv6_flowlabel &
2675 SCTP_FLOWLABEL_VAL_MASK;
2676 t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
2677 }
2675 asoc->flowlabel = params->spp_ipv6_flowlabel & 2678 asoc->flowlabel = params->spp_ipv6_flowlabel &
2676 SCTP_FLOWLABEL_VAL_MASK; 2679 SCTP_FLOWLABEL_VAL_MASK;
2677 asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK; 2680 asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2687 trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; 2690 trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
2688 trans->dscp |= SCTP_DSCP_SET_MASK; 2691 trans->dscp |= SCTP_DSCP_SET_MASK;
2689 } else if (asoc) { 2692 } else if (asoc) {
2690 list_for_each_entry(trans, 2693 struct sctp_transport *t;
2691 &asoc->peer.transport_addr_list, 2694
2695 list_for_each_entry(t, &asoc->peer.transport_addr_list,
2692 transports) { 2696 transports) {
2693 trans->dscp = params->spp_dscp & 2697 t->dscp = params->spp_dscp &
2694 SCTP_DSCP_VAL_MASK; 2698 SCTP_DSCP_VAL_MASK;
2695 trans->dscp |= SCTP_DSCP_SET_MASK; 2699 t->dscp |= SCTP_DSCP_SET_MASK;
2696 } 2700 }
2697 asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK; 2701 asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
2698 asoc->dscp |= SCTP_DSCP_SET_MASK; 2702 asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
5005 break; 5009 break;
5006 } 5010 }
5007 5011
5012 if (!sctp_transport_hold(t))
5013 continue;
5014
5008 if (net_eq(sock_net(t->asoc->base.sk), net) && 5015 if (net_eq(sock_net(t->asoc->base.sk), net) &&
5009 t->asoc->peer.primary_path == t) 5016 t->asoc->peer.primary_path == t)
5010 break; 5017 break;
5018
5019 sctp_transport_put(t);
5011 } 5020 }
5012 5021
5013 return t; 5022 return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
5017 struct rhashtable_iter *iter, 5026 struct rhashtable_iter *iter,
5018 int pos) 5027 int pos)
5019{ 5028{
5020 void *obj = SEQ_START_TOKEN; 5029 struct sctp_transport *t;
5021 5030
5022 while (pos && (obj = sctp_transport_get_next(net, iter)) && 5031 if (!pos)
5023 !IS_ERR(obj)) 5032 return SEQ_START_TOKEN;
5024 pos--;
5025 5033
5026 return obj; 5034 while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
5035 if (!--pos)
5036 break;
5037 sctp_transport_put(t);
5038 }
5039
5040 return t;
5027} 5041}
5028 5042
5029int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), 5043int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
5082 5096
5083 tsp = sctp_transport_get_idx(net, &hti, *pos + 1); 5097 tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
5084 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) { 5098 for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
5085 if (!sctp_transport_hold(tsp))
5086 continue;
5087 ret = cb(tsp, p); 5099 ret = cb(tsp, p);
5088 if (ret) 5100 if (ret)
5089 break; 5101 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 12cac85da994..033696e6f74f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
260bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) 260bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
261{ 261{
262 struct dst_entry *dst = sctp_transport_dst_check(t); 262 struct dst_entry *dst = sctp_transport_dst_check(t);
263 struct sock *sk = t->asoc->base.sk;
263 bool change = true; 264 bool change = true;
264 265
265 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { 266 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
271 pmtu = SCTP_TRUNC4(pmtu); 272 pmtu = SCTP_TRUNC4(pmtu);
272 273
273 if (dst) { 274 if (dst) {
274 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); 275 struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
276 union sctp_addr addr;
277
278 pf->af->from_sk(&addr, sk);
279 pf->to_sk_daddr(&t->ipaddr, sk);
280 dst->ops->update_pmtu(dst, sk, NULL, pmtu);
281 pf->to_sk_daddr(&addr, sk);
282
275 dst = sctp_transport_dst_check(t); 283 dst = sctp_transport_dst_check(t);
276 } 284 }
277 285
278 if (!dst) { 286 if (!dst) {
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); 287 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
280 dst = t->dst; 288 dst = t->dst;
281 } 289 }
282 290
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 2d8a1e15e4f9..015231789ed2 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
742 smc->sk.sk_err = -rc; 742 smc->sk.sk_err = -rc;
743 743
744out: 744out:
745 smc->sk.sk_state_change(&smc->sk); 745 if (smc->sk.sk_err)
746 smc->sk.sk_state_change(&smc->sk);
747 else
748 smc->sk.sk_write_space(&smc->sk);
746 kfree(smc->connect_info); 749 kfree(smc->connect_info);
747 smc->connect_info = NULL; 750 smc->connect_info = NULL;
748 release_sock(&smc->sk); 751 release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1150} 1153}
1151 1154
1152/* listen worker: finish RDMA setup */ 1155/* listen worker: finish RDMA setup */
1153static void smc_listen_rdma_finish(struct smc_sock *new_smc, 1156static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1154 struct smc_clc_msg_accept_confirm *cclc, 1157 struct smc_clc_msg_accept_confirm *cclc,
1155 int local_contact) 1158 int local_contact)
1156{ 1159{
1157 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; 1160 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1158 int reason_code = 0; 1161 int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
1175 if (reason_code) 1178 if (reason_code)
1176 goto decline; 1179 goto decline;
1177 } 1180 }
1178 return; 1181 return 0;
1179 1182
1180decline: 1183decline:
1181 mutex_unlock(&smc_create_lgr_pending); 1184 mutex_unlock(&smc_create_lgr_pending);
1182 smc_listen_decline(new_smc, reason_code, local_contact); 1185 smc_listen_decline(new_smc, reason_code, local_contact);
1186 return reason_code;
1183} 1187}
1184 1188
1185/* setup for RDMA connection of server */ 1189/* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
1276 } 1280 }
1277 1281
1278 /* finish worker */ 1282 /* finish worker */
1279 if (!ism_supported) 1283 if (!ism_supported) {
1280 smc_listen_rdma_finish(new_smc, &cclc, local_contact); 1284 if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
1285 return;
1286 }
1281 smc_conn_save_peer_info(new_smc, &cclc); 1287 smc_conn_save_peer_info(new_smc, &cclc);
1282 mutex_unlock(&smc_create_lgr_pending); 1288 mutex_unlock(&smc_create_lgr_pending);
1283 smc_listen_out_connected(new_smc); 1289 smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1529 return EPOLLNVAL; 1535 return EPOLLNVAL;
1530 1536
1531 smc = smc_sk(sock->sk); 1537 smc = smc_sk(sock->sk);
1532 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { 1538 if (smc->use_fallback) {
1533 /* delegate to CLC child sock */ 1539 /* delegate to CLC child sock */
1534 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1540 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1535 sk->sk_err = smc->clcsock->sk->sk_err; 1541 sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1560 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 1566 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1561 if (sk->sk_state == SMC_APPCLOSEWAIT1) 1567 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1562 mask |= EPOLLIN; 1568 mask |= EPOLLIN;
1569 if (smc->conn.urg_state == SMC_URG_VALID)
1570 mask |= EPOLLPRI;
1563 } 1571 }
1564 if (smc->conn.urg_state == SMC_URG_VALID)
1565 mask |= EPOLLPRI;
1566 } 1572 }
1567 1573
1568 return mask; 1574 return mask;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 83aba9ade060..52241d679cc9 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
446 vec[i++].iov_len = sizeof(trl); 446 vec[i++].iov_len = sizeof(trl);
447 /* due to the few bytes needed for clc-handshake this cannot block */ 447 /* due to the few bytes needed for clc-handshake this cannot block */
448 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); 448 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
449 if (len < sizeof(pclc)) { 449 if (len < 0) {
450 if (len >= 0) { 450 smc->sk.sk_err = smc->clcsock->sk->sk_err;
451 reason_code = -ENETUNREACH; 451 reason_code = -smc->sk.sk_err;
452 smc->sk.sk_err = -reason_code; 452 } else if (len < (int)sizeof(pclc)) {
453 } else { 453 reason_code = -ENETUNREACH;
454 smc->sk.sk_err = smc->clcsock->sk->sk_err; 454 smc->sk.sk_err = -reason_code;
455 reason_code = -smc->sk.sk_err;
456 }
457 } 455 }
458 456
459 return reason_code; 457 return reason_code;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ac961dfb1ea1..ea2b87f29469 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
100 struct smc_cdc_conn_state_flags *txflags = 100 struct smc_cdc_conn_state_flags *txflags =
101 &smc->conn.local_tx_ctrl.conn_state_flags; 101 &smc->conn.local_tx_ctrl.conn_state_flags;
102 102
103 sk->sk_err = ECONNABORTED; 103 if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
104 if (smc->clcsock && smc->clcsock->sk) { 104 sk->sk_err = ECONNABORTED;
105 smc->clcsock->sk->sk_err = ECONNABORTED; 105 if (smc->clcsock && smc->clcsock->sk) {
106 smc->clcsock->sk->sk_state_change(smc->clcsock->sk); 106 smc->clcsock->sk->sk_err = ECONNABORTED;
107 smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
108 }
107 } 109 }
108 switch (sk->sk_state) { 110 switch (sk->sk_state) {
109 case SMC_INIT:
110 sk->sk_state = SMC_PEERABORTWAIT;
111 break;
112 case SMC_ACTIVE: 111 case SMC_ACTIVE:
113 sk->sk_state = SMC_PEERABORTWAIT; 112 sk->sk_state = SMC_PEERABORTWAIT;
114 release_sock(sk); 113 release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
143 case SMC_PEERFINCLOSEWAIT: 142 case SMC_PEERFINCLOSEWAIT:
144 sock_put(sk); /* passive closing */ 143 sock_put(sk); /* passive closing */
145 break; 144 break;
145 case SMC_INIT:
146 case SMC_PEERABORTWAIT: 146 case SMC_PEERABORTWAIT:
147 case SMC_CLOSED: 147 case SMC_CLOSED:
148 break; 148 break;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 01c6ce042a1c..7cb3e4f07c10 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
461}; 461};
462 462
463/* SMC_PNETID family definition */ 463/* SMC_PNETID family definition */
464static struct genl_family smc_pnet_nl_family = { 464static struct genl_family smc_pnet_nl_family __ro_after_init = {
465 .hdrsize = 0, 465 .hdrsize = 0,
466 .name = SMCR_GENL_FAMILY_NAME, 466 .name = SMCR_GENL_FAMILY_NAME,
467 .version = SMCR_GENL_FAMILY_VERSION, 467 .version = SMCR_GENL_FAMILY_VERSION,
diff --git a/net/socket.c b/net/socket.c
index e6945e318f02..01f3f8f32d6f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
941EXPORT_SYMBOL(dlci_ioctl_set); 941EXPORT_SYMBOL(dlci_ioctl_set);
942 942
943static long sock_do_ioctl(struct net *net, struct socket *sock, 943static long sock_do_ioctl(struct net *net, struct socket *sock,
944 unsigned int cmd, unsigned long arg) 944 unsigned int cmd, unsigned long arg,
945 unsigned int ifreq_size)
945{ 946{
946 int err; 947 int err;
947 void __user *argp = (void __user *)arg; 948 void __user *argp = (void __user *)arg;
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
967 } else { 968 } else {
968 struct ifreq ifr; 969 struct ifreq ifr;
969 bool need_copyout; 970 bool need_copyout;
970 if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) 971 if (copy_from_user(&ifr, argp, ifreq_size))
971 return -EFAULT; 972 return -EFAULT;
972 err = dev_ioctl(net, cmd, &ifr, &need_copyout); 973 err = dev_ioctl(net, cmd, &ifr, &need_copyout);
973 if (!err && need_copyout) 974 if (!err && need_copyout)
974 if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) 975 if (copy_to_user(argp, &ifr, ifreq_size))
975 return -EFAULT; 976 return -EFAULT;
976 } 977 }
977 return err; 978 return err;
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1070 err = open_related_ns(&net->ns, get_net_ns); 1071 err = open_related_ns(&net->ns, get_net_ns);
1071 break; 1072 break;
1072 default: 1073 default:
1073 err = sock_do_ioctl(net, sock, cmd, arg); 1074 err = sock_do_ioctl(net, sock, cmd, arg,
1075 sizeof(struct ifreq));
1074 break; 1076 break;
1075 } 1077 }
1076 return err; 1078 return err;
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
2750 int err; 2752 int err;
2751 2753
2752 set_fs(KERNEL_DS); 2754 set_fs(KERNEL_DS);
2753 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); 2755 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
2756 sizeof(struct compat_ifreq));
2754 set_fs(old_fs); 2757 set_fs(old_fs);
2755 if (!err) 2758 if (!err)
2756 err = compat_put_timeval(&ktv, up); 2759 err = compat_put_timeval(&ktv, up);
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
2766 int err; 2769 int err;
2767 2770
2768 set_fs(KERNEL_DS); 2771 set_fs(KERNEL_DS);
2769 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); 2772 err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
2773 sizeof(struct compat_ifreq));
2770 set_fs(old_fs); 2774 set_fs(old_fs);
2771 if (!err) 2775 if (!err)
2772 err = compat_put_timespec(&kts, up); 2776 err = compat_put_timespec(&kts, up);
@@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
3072 } 3076 }
3073 3077
3074 set_fs(KERNEL_DS); 3078 set_fs(KERNEL_DS);
3075 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); 3079 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
3080 sizeof(struct compat_ifreq));
3076 set_fs(old_fs); 3081 set_fs(old_fs);
3077 3082
3078out: 3083out:
@@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3185 case SIOCBONDSETHWADDR: 3190 case SIOCBONDSETHWADDR:
3186 case SIOCBONDCHANGEACTIVE: 3191 case SIOCBONDCHANGEACTIVE:
3187 case SIOCGIFNAME: 3192 case SIOCGIFNAME:
3188 return sock_do_ioctl(net, sock, cmd, arg); 3193 return sock_do_ioctl(net, sock, cmd, arg,
3194 sizeof(struct compat_ifreq));
3189 } 3195 }
3190 3196
3191 return -ENOIOCTLCMD; 3197 return -ENOIOCTLCMD;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 9ee6cfea56dd..d8026543bf4c 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
51 * struct tipc_bc_base - base structure for keeping broadcast send state 51 * struct tipc_bc_base - base structure for keeping broadcast send state
52 * @link: broadcast send link structure 52 * @link: broadcast send link structure
53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages 53 * @inputq: data input queue; will only carry SOCK_WAKEUP messages
54 * @dest: array keeping number of reachable destinations per bearer 54 * @dests: array keeping number of reachable destinations per bearer
55 * @primary_bearer: a bearer having links to all broadcast destinations, if any 55 * @primary_bearer: a bearer having links to all broadcast destinations, if any
56 * @bcast_support: indicates if primary bearer, if any, supports broadcast 56 * @bcast_support: indicates if primary bearer, if any, supports broadcast
57 * @rcast_support: indicates if all peer nodes support replicast 57 * @rcast_support: indicates if all peer nodes support replicast
58 * @rc_ratio: dest count as percentage of cluster size where send method changes 58 * @rc_ratio: dest count as percentage of cluster size where send method changes
59 * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast 59 * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
60 */ 60 */
61struct tipc_bc_base { 61struct tipc_bc_base {
62 struct tipc_link *link; 62 struct tipc_link *link;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 418f03d0be90..645c16052052 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
609 609
610 switch (evt) { 610 switch (evt) {
611 case NETDEV_CHANGE: 611 case NETDEV_CHANGE:
612 if (netif_carrier_ok(dev)) 612 if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
613 test_and_set_bit_lock(0, &b->up);
613 break; 614 break;
614 /* else: fall through */ 615 }
615 case NETDEV_UP: 616 /* fall through */
616 test_and_set_bit_lock(0, &b->up);
617 break;
618 case NETDEV_GOING_DOWN: 617 case NETDEV_GOING_DOWN:
619 clear_bit_unlock(0, &b->up); 618 clear_bit_unlock(0, &b->up);
620 tipc_reset_bearer(net, b); 619 tipc_reset_bearer(net, b);
621 break; 620 break;
621 case NETDEV_UP:
622 test_and_set_bit_lock(0, &b->up);
623 break;
622 case NETDEV_CHANGEMTU: 624 case NETDEV_CHANGEMTU:
623 if (tipc_mtu_bad(dev, 0)) { 625 if (tipc_mtu_bad(dev, 0)) {
624 bearer_disable(net, b); 626 bearer_disable(net, b);
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
index aaabb0b776dd..73137f4aeb68 100644
--- a/net/tipc/diag.c
+++ b/net/tipc/diag.c
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
84 84
85 if (h->nlmsg_flags & NLM_F_DUMP) { 85 if (h->nlmsg_flags & NLM_F_DUMP) {
86 struct netlink_dump_control c = { 86 struct netlink_dump_control c = {
87 .start = tipc_dump_start,
87 .dump = tipc_diag_dump, 88 .dump = tipc_diag_dump,
89 .done = tipc_dump_done,
88 }; 90 };
89 netlink_dump_start(net->diag_nlsk, skb, h, &c); 91 netlink_dump_start(net->diag_nlsk, skb, h, &c);
90 return 0; 92 return 0;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b1f0bee54eac..fb886b525d95 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
410 return l->name; 410 return l->name;
411} 411}
412 412
413u32 tipc_link_state(struct tipc_link *l)
414{
415 return l->state;
416}
417
413/** 418/**
414 * tipc_link_create - create a new link 419 * tipc_link_create - create a new link
415 * @n: pointer to associated node 420 * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
841 l->in_session = false; 846 l->in_session = false;
842 l->session++; 847 l->session++;
843 l->mtu = l->advertised_mtu; 848 l->mtu = l->advertised_mtu;
849 spin_lock_bh(&l->wakeupq.lock);
850 spin_lock_bh(&l->inputq->lock);
851 skb_queue_splice_init(&l->wakeupq, l->inputq);
852 spin_unlock_bh(&l->inputq->lock);
853 spin_unlock_bh(&l->wakeupq.lock);
854
844 __skb_queue_purge(&l->transmq); 855 __skb_queue_purge(&l->transmq);
845 __skb_queue_purge(&l->deferdq); 856 __skb_queue_purge(&l->deferdq);
846 skb_queue_splice_init(&l->wakeupq, l->inputq);
847 __skb_queue_purge(&l->backlogq); 857 __skb_queue_purge(&l->backlogq);
848 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 858 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
849 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; 859 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1380 __skb_queue_tail(xmitq, skb); 1390 __skb_queue_tail(xmitq, skb);
1381} 1391}
1382 1392
1393void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1394 struct sk_buff_head *xmitq)
1395{
1396 u32 onode = tipc_own_addr(l->net);
1397 struct tipc_msg *hdr, *ihdr;
1398 struct sk_buff_head tnlq;
1399 struct sk_buff *skb;
1400 u32 dnode = l->addr;
1401
1402 skb_queue_head_init(&tnlq);
1403 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1404 INT_H_SIZE, BASIC_H_SIZE,
1405 dnode, onode, 0, 0, 0);
1406 if (!skb) {
1407 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1408 return;
1409 }
1410
1411 hdr = buf_msg(skb);
1412 msg_set_msgcnt(hdr, 1);
1413 msg_set_bearer_id(hdr, l->peer_bearer_id);
1414
1415 ihdr = (struct tipc_msg *)msg_data(hdr);
1416 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1417 BASIC_H_SIZE, dnode);
1418 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1419 __skb_queue_tail(&tnlq, skb);
1420 tipc_link_xmit(l, &tnlq, xmitq);
1421}
1422
1383/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets 1423/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1384 * with contents of the link's transmit and backlog queues. 1424 * with contents of the link's transmit and backlog queues.
1385 */ 1425 */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1476 return false; 1516 return false;
1477 if (session != curr_session) 1517 if (session != curr_session)
1478 return false; 1518 return false;
1519 /* Extra sanity check */
1520 if (!link_is_up(l) && msg_ack(hdr))
1521 return false;
1479 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) 1522 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1480 return true; 1523 return true;
1481 /* Accept only STATE with new sequence number */ 1524 /* Accept only STATE with new sequence number */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 7bc494a33fdf..90488c538a4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
88 struct tipc_link **link); 88 struct tipc_link **link);
89void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, 89void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
90 int mtyp, struct sk_buff_head *xmitq); 90 int mtyp, struct sk_buff_head *xmitq);
91void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
92 struct sk_buff_head *xmitq);
91void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq); 93void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
92int tipc_link_fsm_evt(struct tipc_link *l, int evt); 94int tipc_link_fsm_evt(struct tipc_link *l, int evt);
93bool tipc_link_is_up(struct tipc_link *l); 95bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
107u16 tipc_link_acked(struct tipc_link *l); 109u16 tipc_link_acked(struct tipc_link *l);
108u32 tipc_link_id(struct tipc_link *l); 110u32 tipc_link_id(struct tipc_link *l);
109char *tipc_link_name(struct tipc_link *l); 111char *tipc_link_name(struct tipc_link *l);
112u32 tipc_link_state(struct tipc_link *l);
110char tipc_link_plane(struct tipc_link *l); 113char tipc_link_plane(struct tipc_link *l);
111int tipc_link_prio(struct tipc_link *l); 114int tipc_link_prio(struct tipc_link *l);
112int tipc_link_window(struct tipc_link *l); 115int tipc_link_window(struct tipc_link *l);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 88f027b502f6..66d5b2c5987a 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
980 980
981struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port) 981struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
982{ 982{
983 u64 value = (u64)node << 32 | port;
984 struct tipc_dest *dst; 983 struct tipc_dest *dst;
985 984
986 list_for_each_entry(dst, l, list) { 985 list_for_each_entry(dst, l, list) {
987 if (dst->value != value) 986 if (dst->node == node && dst->port == port)
988 continue; 987 return dst;
989 return dst;
990 } 988 }
991 return NULL; 989 return NULL;
992} 990}
993 991
994bool tipc_dest_push(struct list_head *l, u32 node, u32 port) 992bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
995{ 993{
996 u64 value = (u64)node << 32 | port;
997 struct tipc_dest *dst; 994 struct tipc_dest *dst;
998 995
999 if (tipc_dest_find(l, node, port)) 996 if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
1002 dst = kmalloc(sizeof(*dst), GFP_ATOMIC); 999 dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
1003 if (unlikely(!dst)) 1000 if (unlikely(!dst))
1004 return false; 1001 return false;
1005 dst->value = value; 1002 dst->node = node;
1003 dst->port = port;
1006 list_add(&dst->list, l); 1004 list_add(&dst->list, l);
1007 return true; 1005 return true;
1008} 1006}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 0febba41da86..892bd750b85f 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
133 133
134struct tipc_dest { 134struct tipc_dest {
135 struct list_head list; 135 struct list_head list;
136 union { 136 u32 port;
137 struct { 137 u32 node;
138 u32 port;
139 u32 node;
140 };
141 u64 value;
142 };
143}; 138};
144 139
145struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); 140struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6ff2254088f6..99ee419210ba 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
167 }, 167 },
168 { 168 {
169 .cmd = TIPC_NL_SOCK_GET, 169 .cmd = TIPC_NL_SOCK_GET,
170 .start = tipc_dump_start,
170 .dumpit = tipc_nl_sk_dump, 171 .dumpit = tipc_nl_sk_dump,
172 .done = tipc_dump_done,
171 .policy = tipc_nl_policy, 173 .policy = tipc_nl_policy,
172 }, 174 },
173 { 175 {
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index a2f76743c73a..6376467e78f8 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
185 return -ENOMEM; 185 return -ENOMEM;
186 186
187 buf->sk = msg->dst_sk; 187 buf->sk = msg->dst_sk;
188 if (__tipc_dump_start(&cb, msg->net)) {
189 kfree_skb(buf);
190 return -ENOMEM;
191 }
188 192
189 do { 193 do {
190 int rem; 194 int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
216 err = 0; 220 err = 0;
217 221
218err_out: 222err_out:
223 tipc_dump_done(&cb);
219 kfree_skb(buf); 224 kfree_skb(buf);
220 225
221 if (err == -EMSGSIZE) { 226 if (err == -EMSGSIZE) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 68014f1b6976..2afc4f8c37a7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -111,6 +111,7 @@ struct tipc_node {
111 int action_flags; 111 int action_flags;
112 struct list_head list; 112 struct list_head list;
113 int state; 113 int state;
114 bool failover_sent;
114 u16 sync_point; 115 u16 sync_point;
115 int link_cnt; 116 int link_cnt;
116 u16 working_links; 117 u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
680 *slot0 = bearer_id; 681 *slot0 = bearer_id;
681 *slot1 = bearer_id; 682 *slot1 = bearer_id;
682 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT); 683 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
684 n->failover_sent = false;
683 n->action_flags |= TIPC_NOTIFY_NODE_UP; 685 n->action_flags |= TIPC_NOTIFY_NODE_UP;
684 tipc_link_set_active(nl, true); 686 tipc_link_set_active(nl, true);
685 tipc_bcast_add_peer(n->net, nl, xmitq); 687 tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
911 bool reset = true; 913 bool reset = true;
912 char *if_name; 914 char *if_name;
913 unsigned long intv; 915 unsigned long intv;
916 u16 session;
914 917
915 *dupl_addr = false; 918 *dupl_addr = false;
916 *respond = false; 919 *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
997 goto exit; 1000 goto exit;
998 1001
999 if_name = strchr(b->name, ':') + 1; 1002 if_name = strchr(b->name, ':') + 1;
1003 get_random_bytes(&session, sizeof(u16));
1000 if (!tipc_link_create(net, if_name, b->identity, b->tolerance, 1004 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1001 b->net_plane, b->mtu, b->priority, 1005 b->net_plane, b->mtu, b->priority,
1002 b->window, mod(tipc_net(net)->random), 1006 b->window, session,
1003 tipc_own_addr(net), addr, peer_id, 1007 tipc_own_addr(net), addr, peer_id,
1004 n->capabilities, 1008 n->capabilities,
1005 tipc_bc_sndlink(n->net), n->bc_entry.link, 1009 tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1615 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), 1619 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1616 tipc_link_inputq(l)); 1620 tipc_link_inputq(l));
1617 } 1621 }
1622 /* If parallel link was already down, and this happened before
1623 * the tunnel link came up, FAILOVER was never sent. Ensure that
1624 * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
1625 */
1626 if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
1627 tipc_link_create_dummy_tnl_msg(l, xmitq);
1628 n->failover_sent = true;
1629 }
1618 /* If pkts arrive out of order, use lowest calculated syncpt */ 1630 /* If pkts arrive out of order, use lowest calculated syncpt */
1619 if (less(syncpt, n->sync_point)) 1631 if (less(syncpt, n->sync_point))
1620 n->sync_point = syncpt; 1632 n->sync_point = syncpt;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index c1e93c9515bc..b6f99b021d09 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
576 sk_stop_timer(sk, &sk->sk_timer); 576 sk_stop_timer(sk, &sk->sk_timer);
577 tipc_sk_remove(tsk); 577 tipc_sk_remove(tsk);
578 578
579 sock_orphan(sk);
579 /* Reject any messages that accumulated in backlog queue */ 580 /* Reject any messages that accumulated in backlog queue */
580 release_sock(sk); 581 release_sock(sk);
581 tipc_dest_list_purge(&tsk->cong_links); 582 tipc_dest_list_purge(&tsk->cong_links);
@@ -1418,8 +1419,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1418 /* Handle implicit connection setup */ 1419 /* Handle implicit connection setup */
1419 if (unlikely(dest)) { 1420 if (unlikely(dest)) {
1420 rc = __tipc_sendmsg(sock, m, dlen); 1421 rc = __tipc_sendmsg(sock, m, dlen);
1421 if (dlen && (dlen == rc)) 1422 if (dlen && dlen == rc) {
1423 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1422 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr)); 1424 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1425 }
1423 return rc; 1426 return rc;
1424 } 1427 }
1425 1428
@@ -2672,6 +2675,8 @@ void tipc_sk_reinit(struct net *net)
2672 2675
2673 rhashtable_walk_stop(&iter); 2676 rhashtable_walk_stop(&iter);
2674 } while (tsk == ERR_PTR(-EAGAIN)); 2677 } while (tsk == ERR_PTR(-EAGAIN));
2678
2679 rhashtable_walk_exit(&iter);
2675} 2680}
2676 2681
2677static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) 2682static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3232,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3227 struct netlink_callback *cb, 3232 struct netlink_callback *cb,
3228 struct tipc_sock *tsk)) 3233 struct tipc_sock *tsk))
3229{ 3234{
3230 struct net *net = sock_net(skb->sk); 3235 struct rhashtable_iter *iter = (void *)cb->args[4];
3231 struct tipc_net *tn = tipc_net(net);
3232 const struct bucket_table *tbl;
3233 u32 prev_portid = cb->args[1];
3234 u32 tbl_id = cb->args[0];
3235 struct rhash_head *pos;
3236 struct tipc_sock *tsk; 3236 struct tipc_sock *tsk;
3237 int err; 3237 int err;
3238 3238
3239 rcu_read_lock(); 3239 rhashtable_walk_start(iter);
3240 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); 3240 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3241 for (; tbl_id < tbl->size; tbl_id++) { 3241 if (IS_ERR(tsk)) {
3242 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { 3242 err = PTR_ERR(tsk);
3243 spin_lock_bh(&tsk->sk.sk_lock.slock); 3243 if (err == -EAGAIN) {
3244 if (prev_portid && prev_portid != tsk->portid) { 3244 err = 0;
3245 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3246 continue; 3245 continue;
3247 } 3246 }
3247 break;
3248 }
3248 3249
3249 err = skb_handler(skb, cb, tsk); 3250 sock_hold(&tsk->sk);
3250 if (err) { 3251 rhashtable_walk_stop(iter);
3251 prev_portid = tsk->portid; 3252 lock_sock(&tsk->sk);
3252 spin_unlock_bh(&tsk->sk.sk_lock.slock); 3253 err = skb_handler(skb, cb, tsk);
3253 goto out; 3254 if (err) {
3254 } 3255 release_sock(&tsk->sk);
3255 3256 sock_put(&tsk->sk);
3256 prev_portid = 0; 3257 goto out;
3257 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3258 } 3258 }
3259 release_sock(&tsk->sk);
3260 rhashtable_walk_start(iter);
3261 sock_put(&tsk->sk);
3259 } 3262 }
3263 rhashtable_walk_stop(iter);
3260out: 3264out:
3261 rcu_read_unlock();
3262 cb->args[0] = tbl_id;
3263 cb->args[1] = prev_portid;
3264
3265 return skb->len; 3265 return skb->len;
3266} 3266}
3267EXPORT_SYMBOL(tipc_nl_sk_walk); 3267EXPORT_SYMBOL(tipc_nl_sk_walk);
3268 3268
3269int tipc_dump_start(struct netlink_callback *cb)
3270{
3271 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3272}
3273EXPORT_SYMBOL(tipc_dump_start);
3274
3275int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3276{
3277 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3278 struct rhashtable_iter *iter = (void *)cb->args[4];
3279 struct tipc_net *tn = tipc_net(net);
3280
3281 if (!iter) {
3282 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3283 if (!iter)
3284 return -ENOMEM;
3285
3286 cb->args[4] = (long)iter;
3287 }
3288
3289 rhashtable_walk_enter(&tn->sk_rht, iter);
3290 return 0;
3291}
3292
3293int tipc_dump_done(struct netlink_callback *cb)
3294{
3295 struct rhashtable_iter *hti = (void *)cb->args[4];
3296
3297 rhashtable_walk_exit(hti);
3298 kfree(hti);
3299 return 0;
3300}
3301EXPORT_SYMBOL(tipc_dump_done);
3302
3269int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb, 3303int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3270 struct tipc_sock *tsk, u32 sk_filter_state, 3304 struct tipc_sock *tsk, u32 sk_filter_state,
3271 u64 (*tipc_diag_gen_cookie)(struct sock *sk)) 3305 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index aff9b2ae5a1f..5e575f205afe 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
68 int (*skb_handler)(struct sk_buff *skb, 68 int (*skb_handler)(struct sk_buff *skb,
69 struct netlink_callback *cb, 69 struct netlink_callback *cb,
70 struct tipc_sock *tsk)); 70 struct tipc_sock *tsk));
71int tipc_dump_start(struct netlink_callback *cb);
72int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
73int tipc_dump_done(struct netlink_callback *cb);
71#endif 74#endif
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index c8e34ef22c30..2627b5d812e9 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
313 conn_put(con); 313 conn_put(con);
314} 314}
315 315
316/* tipc_conn_queue_evt() - interrupt level call from a subscription instance 316/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
317 * The queued work is launched into tipc_send_work()->tipc_send_to_sock() 317 * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
318 */ 318 */
319void tipc_topsrv_queue_evt(struct net *net, int conid, 319void tipc_topsrv_queue_evt(struct net *net, int conid,
320 u32 event, struct tipc_event *evt) 320 u32 event, struct tipc_event *evt)
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 292742e50bfa..961b07d4d41c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
686 goto free_marker_record; 686 goto free_marker_record;
687 } 687 }
688 688
689 crypto_info = &ctx->crypto_send; 689 crypto_info = &ctx->crypto_send.info;
690 switch (crypto_info->cipher_type) { 690 switch (crypto_info->cipher_type) {
691 case TLS_CIPHER_AES_GCM_128: 691 case TLS_CIPHER_AES_GCM_128:
692 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; 692 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
780 780
781 ctx->priv_ctx_tx = offload_ctx; 781 ctx->priv_ctx_tx = offload_ctx;
782 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX, 782 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
783 &ctx->crypto_send, 783 &ctx->crypto_send.info,
784 tcp_sk(sk)->write_seq); 784 tcp_sk(sk)->write_seq);
785 if (rc) 785 if (rc)
786 goto release_netdev; 786 goto release_netdev;
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
862 goto release_ctx; 862 goto release_ctx;
863 863
864 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX, 864 rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
865 &ctx->crypto_recv, 865 &ctx->crypto_recv.info,
866 tcp_sk(sk)->copied_seq); 866 tcp_sk(sk)->copied_seq);
867 if (rc) { 867 if (rc) {
868 pr_err_ratelimited("%s: The netdev has refused to offload this socket\n", 868 pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 6102169239d1..450a6dbc5a88 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
320 goto free_req; 320 goto free_req;
321 321
322 iv = buf; 322 iv = buf;
323 memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt, 323 memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
324 TLS_CIPHER_AES_GCM_128_SALT_SIZE); 324 TLS_CIPHER_AES_GCM_128_SALT_SIZE);
325 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE + 325 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
326 TLS_CIPHER_AES_GCM_128_IV_SIZE; 326 TLS_CIPHER_AES_GCM_128_IV_SIZE;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 93c0c225ab34..523622dc74f8 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
213{ 213{
214 struct tls_context *ctx = tls_get_ctx(sk); 214 struct tls_context *ctx = tls_get_ctx(sk);
215 215
216 /* We are already sending pages, ignore notification */ 216 /* If in_tcp_sendpages call lower protocol write space handler
217 if (ctx->in_tcp_sendpages) 217 * to ensure we wake up any waiting operations there. For example
218 * if do_tcp_sendpages where to call sk_wait_event.
219 */
220 if (ctx->in_tcp_sendpages) {
221 ctx->sk_write_space(sk);
218 return; 222 return;
223 }
219 224
220 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { 225 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
221 gfp_t sk_allocation = sk->sk_allocation; 226 gfp_t sk_allocation = sk->sk_allocation;
@@ -236,6 +241,16 @@ static void tls_write_space(struct sock *sk)
236 ctx->sk_write_space(sk); 241 ctx->sk_write_space(sk);
237} 242}
238 243
244static void tls_ctx_free(struct tls_context *ctx)
245{
246 if (!ctx)
247 return;
248
249 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
250 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
251 kfree(ctx);
252}
253
239static void tls_sk_proto_close(struct sock *sk, long timeout) 254static void tls_sk_proto_close(struct sock *sk, long timeout)
240{ 255{
241 struct tls_context *ctx = tls_get_ctx(sk); 256 struct tls_context *ctx = tls_get_ctx(sk);
@@ -289,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
289#else 304#else
290 { 305 {
291#endif 306#endif
292 kfree(ctx); 307 tls_ctx_free(ctx);
293 ctx = NULL; 308 ctx = NULL;
294 } 309 }
295 310
@@ -300,7 +315,7 @@ skip_tx_cleanup:
300 * for sk->sk_prot->unhash [tls_hw_unhash] 315 * for sk->sk_prot->unhash [tls_hw_unhash]
301 */ 316 */
302 if (free_ctx) 317 if (free_ctx)
303 kfree(ctx); 318 tls_ctx_free(ctx);
304} 319}
305 320
306static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval, 321static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
@@ -325,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
325 } 340 }
326 341
327 /* get user crypto info */ 342 /* get user crypto info */
328 crypto_info = &ctx->crypto_send; 343 crypto_info = &ctx->crypto_send.info;
329 344
330 if (!TLS_CRYPTO_INFO_READY(crypto_info)) { 345 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
331 rc = -EBUSY; 346 rc = -EBUSY;
@@ -412,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
412 } 427 }
413 428
414 if (tx) 429 if (tx)
415 crypto_info = &ctx->crypto_send; 430 crypto_info = &ctx->crypto_send.info;
416 else 431 else
417 crypto_info = &ctx->crypto_recv; 432 crypto_info = &ctx->crypto_recv.info;
418 433
419 /* Currently we don't support set crypto info more than one time */ 434 /* Currently we don't support set crypto info more than one time */
420 if (TLS_CRYPTO_INFO_READY(crypto_info)) { 435 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -494,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
494 goto out; 509 goto out;
495 510
496err_crypto_info: 511err_crypto_info:
497 memset(crypto_info, 0, sizeof(*crypto_info)); 512 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
498out: 513out:
499 return rc; 514 return rc;
500} 515}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 52fbe727d7c1..b9c6ecfbcfea 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
125 &ctx->sg_encrypted_num_elem, 125 &ctx->sg_encrypted_num_elem,
126 &ctx->sg_encrypted_size, 0); 126 &ctx->sg_encrypted_size, 0);
127 127
128 if (rc == -ENOSPC)
129 ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
130
128 return rc; 131 return rc;
129} 132}
130 133
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
138 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, 141 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
139 tls_ctx->pending_open_record_frags); 142 tls_ctx->pending_open_record_frags);
140 143
144 if (rc == -ENOSPC)
145 ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
146
141 return rc; 147 return rc;
142} 148}
143 149
@@ -925,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk,
925 if (control != TLS_RECORD_TYPE_DATA) 931 if (control != TLS_RECORD_TYPE_DATA)
926 goto recv_end; 932 goto recv_end;
927 } 933 }
934 } else {
935 /* MSG_PEEK right now cannot look beyond current skb
936 * from strparser, meaning we cannot advance skb here
937 * and thus unpause strparser since we'd loose original
938 * one.
939 */
940 break;
928 } 941 }
942
929 /* If we have a new message from strparser, continue now. */ 943 /* If we have a new message from strparser, continue now. */
930 if (copied >= target && !ctx->recv_pkt) 944 if (copied >= target && !ctx->recv_pkt)
931 break; 945 break;
@@ -1049,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
1049 goto read_failure; 1063 goto read_failure;
1050 } 1064 }
1051 1065
1052 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) || 1066 if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
1053 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) { 1067 header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
1054 ret = -EINVAL; 1068 ret = -EINVAL;
1055 goto read_failure; 1069 goto read_failure;
1056 } 1070 }
@@ -1130,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
1130 1144
1131int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) 1145int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1132{ 1146{
1133 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
1134 struct tls_crypto_info *crypto_info; 1147 struct tls_crypto_info *crypto_info;
1135 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; 1148 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
1136 struct tls_sw_context_tx *sw_ctx_tx = NULL; 1149 struct tls_sw_context_tx *sw_ctx_tx = NULL;
@@ -1175,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1175 1188
1176 if (tx) { 1189 if (tx) {
1177 crypto_init_wait(&sw_ctx_tx->async_wait); 1190 crypto_init_wait(&sw_ctx_tx->async_wait);
1178 crypto_info = &ctx->crypto_send; 1191 crypto_info = &ctx->crypto_send.info;
1179 cctx = &ctx->tx; 1192 cctx = &ctx->tx;
1180 aead = &sw_ctx_tx->aead_send; 1193 aead = &sw_ctx_tx->aead_send;
1181 } else { 1194 } else {
1182 crypto_init_wait(&sw_ctx_rx->async_wait); 1195 crypto_init_wait(&sw_ctx_rx->async_wait);
1183 crypto_info = &ctx->crypto_recv; 1196 crypto_info = &ctx->crypto_recv.info;
1184 cctx = &ctx->rx; 1197 cctx = &ctx->rx;
1185 aead = &sw_ctx_rx->aead_recv; 1198 aead = &sw_ctx_rx->aead_recv;
1186 } 1199 }
@@ -1259,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
1259 1272
1260 ctx->push_pending_record = tls_sw_push_pending_record; 1273 ctx->push_pending_record = tls_sw_push_pending_record;
1261 1274
1262 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); 1275 rc = crypto_aead_setkey(*aead, gcm_128_info->key,
1263
1264 rc = crypto_aead_setkey(*aead, keyval,
1265 TLS_CIPHER_AES_GCM_128_KEY_SIZE); 1276 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
1266 if (rc) 1277 if (rc)
1267 goto free_aead; 1278 goto free_aead;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5fb9b7dd9831..176edfefcbaa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
669 goto nla_put_failure; 669 goto nla_put_failure;
670 670
671 if (nla_put_u16(msg, NL80211_WMMR_CW_MIN, 671 if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
672 rule->wmm_rule->client[j].cw_min) || 672 rule->wmm_rule.client[j].cw_min) ||
673 nla_put_u16(msg, NL80211_WMMR_CW_MAX, 673 nla_put_u16(msg, NL80211_WMMR_CW_MAX,
674 rule->wmm_rule->client[j].cw_max) || 674 rule->wmm_rule.client[j].cw_max) ||
675 nla_put_u8(msg, NL80211_WMMR_AIFSN, 675 nla_put_u8(msg, NL80211_WMMR_AIFSN,
676 rule->wmm_rule->client[j].aifsn) || 676 rule->wmm_rule.client[j].aifsn) ||
677 nla_put_u8(msg, NL80211_WMMR_TXOP, 677 nla_put_u16(msg, NL80211_WMMR_TXOP,
678 rule->wmm_rule->client[j].cot)) 678 rule->wmm_rule.client[j].cot))
679 goto nla_put_failure; 679 goto nla_put_failure;
680 680
681 nla_nest_end(msg, nl_wmm_rule); 681 nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
766 766
767 if (large) { 767 if (large) {
768 const struct ieee80211_reg_rule *rule = 768 const struct ieee80211_reg_rule *rule =
769 freq_reg_info(wiphy, chan->center_freq); 769 freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
770 770
771 if (!IS_ERR(rule) && rule->wmm_rule) { 771 if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
772 if (nl80211_msg_put_wmm_rules(msg, rule)) 772 if (nl80211_msg_put_wmm_rules(msg, rule))
773 goto nla_put_failure; 773 goto nla_put_failure;
774 } 774 }
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
3756 return false; 3756 return false;
3757 3757
3758 /* check availability */ 3758 /* check availability */
3759 ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
3759 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit) 3760 if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
3760 mcs[ridx] |= rbit; 3761 mcs[ridx] |= rbit;
3761 else 3762 else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
10230 struct wireless_dev *wdev = dev->ieee80211_ptr; 10231 struct wireless_dev *wdev = dev->ieee80211_ptr;
10231 s32 last, low, high; 10232 s32 last, low, high;
10232 u32 hyst; 10233 u32 hyst;
10233 int i, n; 10234 int i, n, low_index;
10234 int err; 10235 int err;
10235 10236
10236 /* RSSI reporting disabled? */ 10237 /* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
10267 if (last < wdev->cqm_config->rssi_thresholds[i]) 10268 if (last < wdev->cqm_config->rssi_thresholds[i])
10268 break; 10269 break;
10269 10270
10270 low = i > 0 ? 10271 low_index = i - 1;
10271 (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN; 10272 if (low_index >= 0) {
10272 high = i < n ? 10273 low_index = array_index_nospec(low_index, n);
10273 (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX; 10274 low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
10275 } else {
10276 low = S32_MIN;
10277 }
10278 if (i < n) {
10279 i = array_index_nospec(i, n);
10280 high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
10281 } else {
10282 high = S32_MAX;
10283 }
10274 10284
10275 return rdev_set_cqm_rssi_range_config(rdev, dev, low, high); 10285 return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
10276} 10286}
@@ -12205,6 +12215,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
12205 return -EOPNOTSUPP; 12215 return -EOPNOTSUPP;
12206 12216
12207 if (!info->attrs[NL80211_ATTR_MDID] || 12217 if (!info->attrs[NL80211_ATTR_MDID] ||
12218 !info->attrs[NL80211_ATTR_IE] ||
12208 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 12219 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
12209 return -EINVAL; 12220 return -EINVAL;
12210 12221
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4fc66a117b7d..765dedb12361 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
425reg_copy_regd(const struct ieee80211_regdomain *src_regd) 425reg_copy_regd(const struct ieee80211_regdomain *src_regd)
426{ 426{
427 struct ieee80211_regdomain *regd; 427 struct ieee80211_regdomain *regd;
428 int size_of_regd, size_of_wmms; 428 int size_of_regd;
429 unsigned int i; 429 unsigned int i;
430 struct ieee80211_wmm_rule *d_wmm, *s_wmm;
431 430
432 size_of_regd = 431 size_of_regd =
433 sizeof(struct ieee80211_regdomain) + 432 sizeof(struct ieee80211_regdomain) +
434 src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule); 433 src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
435 size_of_wmms = src_regd->n_wmm_rules *
436 sizeof(struct ieee80211_wmm_rule);
437 434
438 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); 435 regd = kzalloc(size_of_regd, GFP_KERNEL);
439 if (!regd) 436 if (!regd)
440 return ERR_PTR(-ENOMEM); 437 return ERR_PTR(-ENOMEM);
441 438
442 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); 439 memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
443 440
444 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 441 for (i = 0; i < src_regd->n_reg_rules; i++)
445 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
446 memcpy(d_wmm, s_wmm, size_of_wmms);
447
448 for (i = 0; i < src_regd->n_reg_rules; i++) {
449 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i], 442 memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
450 sizeof(struct ieee80211_reg_rule)); 443 sizeof(struct ieee80211_reg_rule));
451 if (!src_regd->reg_rules[i].wmm_rule)
452 continue;
453 444
454 regd->reg_rules[i].wmm_rule = d_wmm +
455 (src_regd->reg_rules[i].wmm_rule - s_wmm) /
456 sizeof(struct ieee80211_wmm_rule);
457 }
458 return regd; 445 return regd;
459} 446}
460 447
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
860 return true; 847 return true;
861} 848}
862 849
863static void set_wmm_rule(struct ieee80211_wmm_rule *rule, 850static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
864 struct fwdb_wmm_rule *wmm) 851 struct fwdb_wmm_rule *wmm)
865{ 852{
853 struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
866 unsigned int i; 854 unsigned int i;
867 855
868 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 856 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
876 rule->ap[i].aifsn = wmm->ap[i].aifsn; 864 rule->ap[i].aifsn = wmm->ap[i].aifsn;
877 rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); 865 rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
878 } 866 }
867
868 rrule->has_wmm = true;
879} 869}
880 870
881static int __regdb_query_wmm(const struct fwdb_header *db, 871static int __regdb_query_wmm(const struct fwdb_header *db,
882 const struct fwdb_country *country, int freq, 872 const struct fwdb_country *country, int freq,
883 u32 *dbptr, struct ieee80211_wmm_rule *rule) 873 struct ieee80211_reg_rule *rule)
884{ 874{
885 unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; 875 unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
886 struct fwdb_collection *coll = (void *)((u8 *)db + ptr); 876 struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
901 wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; 891 wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
902 wmm = (void *)((u8 *)db + wmm_ptr); 892 wmm = (void *)((u8 *)db + wmm_ptr);
903 set_wmm_rule(rule, wmm); 893 set_wmm_rule(rule, wmm);
904 if (dbptr)
905 *dbptr = wmm_ptr;
906 return 0; 894 return 0;
907 } 895 }
908 } 896 }
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
910 return -ENODATA; 898 return -ENODATA;
911} 899}
912 900
913int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr, 901int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
914 struct ieee80211_wmm_rule *rule)
915{ 902{
916 const struct fwdb_header *hdr = regdb; 903 const struct fwdb_header *hdr = regdb;
917 const struct fwdb_country *country; 904 const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
925 country = &hdr->country[0]; 912 country = &hdr->country[0];
926 while (country->coll_ptr) { 913 while (country->coll_ptr) {
927 if (alpha2_equal(alpha2, country->alpha2)) 914 if (alpha2_equal(alpha2, country->alpha2))
928 return __regdb_query_wmm(regdb, country, freq, dbptr, 915 return __regdb_query_wmm(regdb, country, freq, rule);
929 rule);
930 916
931 country++; 917 country++;
932 } 918 }
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
935} 921}
936EXPORT_SYMBOL(reg_query_regdb_wmm); 922EXPORT_SYMBOL(reg_query_regdb_wmm);
937 923
938struct wmm_ptrs {
939 struct ieee80211_wmm_rule *rule;
940 u32 ptr;
941};
942
943static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
944 u32 wmm_ptr, int n_wmms)
945{
946 int i;
947
948 for (i = 0; i < n_wmms; i++) {
949 if (wmm_ptrs[i].ptr == wmm_ptr)
950 return wmm_ptrs[i].rule;
951 }
952 return NULL;
953}
954
955static int regdb_query_country(const struct fwdb_header *db, 924static int regdb_query_country(const struct fwdb_header *db,
956 const struct fwdb_country *country) 925 const struct fwdb_country *country)
957{ 926{
958 unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; 927 unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
959 struct fwdb_collection *coll = (void *)((u8 *)db + ptr); 928 struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
960 struct ieee80211_regdomain *regdom; 929 struct ieee80211_regdomain *regdom;
961 struct ieee80211_regdomain *tmp_rd; 930 unsigned int size_of_regd, i;
962 unsigned int size_of_regd, i, n_wmms = 0;
963 struct wmm_ptrs *wmm_ptrs;
964 931
965 size_of_regd = sizeof(struct ieee80211_regdomain) + 932 size_of_regd = sizeof(struct ieee80211_regdomain) +
966 coll->n_rules * sizeof(struct ieee80211_reg_rule); 933 coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
969 if (!regdom) 936 if (!regdom)
970 return -ENOMEM; 937 return -ENOMEM;
971 938
972 wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
973 if (!wmm_ptrs) {
974 kfree(regdom);
975 return -ENOMEM;
976 }
977
978 regdom->n_reg_rules = coll->n_rules; 939 regdom->n_reg_rules = coll->n_rules;
979 regdom->alpha2[0] = country->alpha2[0]; 940 regdom->alpha2[0] = country->alpha2[0];
980 regdom->alpha2[1] = country->alpha2[1]; 941 regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
1013 1000 * be16_to_cpu(rule->cac_timeout); 974 1000 * be16_to_cpu(rule->cac_timeout);
1014 if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { 975 if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
1015 u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; 976 u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
1016 struct ieee80211_wmm_rule *wmm_pos = 977 struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
1017 find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
1018 struct fwdb_wmm_rule *wmm;
1019 struct ieee80211_wmm_rule *wmm_rule;
1020 978
1021 if (wmm_pos) { 979 set_wmm_rule(rrule, wmm);
1022 rrule->wmm_rule = wmm_pos;
1023 continue;
1024 }
1025 wmm = (void *)((u8 *)db + wmm_ptr);
1026 tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
1027 sizeof(struct ieee80211_wmm_rule),
1028 GFP_KERNEL);
1029
1030 if (!tmp_rd) {
1031 kfree(regdom);
1032 kfree(wmm_ptrs);
1033 return -ENOMEM;
1034 }
1035 regdom = tmp_rd;
1036
1037 wmm_rule = (struct ieee80211_wmm_rule *)
1038 ((u8 *)regdom + size_of_regd + n_wmms *
1039 sizeof(struct ieee80211_wmm_rule));
1040
1041 set_wmm_rule(wmm_rule, wmm);
1042 wmm_ptrs[n_wmms].ptr = wmm_ptr;
1043 wmm_ptrs[n_wmms++].rule = wmm_rule;
1044 } 980 }
1045 } 981 }
1046 kfree(wmm_ptrs);
1047 982
1048 return reg_schedule_apply(regdom); 983 return reg_schedule_apply(regdom);
1049} 984}
@@ -2932,6 +2867,7 @@ static int regulatory_hint_core(const char *alpha2)
2932 request->alpha2[0] = alpha2[0]; 2867 request->alpha2[0] = alpha2[0];
2933 request->alpha2[1] = alpha2[1]; 2868 request->alpha2[1] = alpha2[1];
2934 request->initiator = NL80211_REGDOM_SET_BY_CORE; 2869 request->initiator = NL80211_REGDOM_SET_BY_CORE;
2870 request->wiphy_idx = WIPHY_IDX_INVALID;
2935 2871
2936 queue_regulatory_request(request); 2872 queue_regulatory_request(request);
2937 2873
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d36c3eb7b931..d0e7472dd9fd 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
1058 return NULL; 1058 return NULL;
1059} 1059}
1060 1060
1061/*
1062 * Update RX channel information based on the available frame payload
1063 * information. This is mainly for the 2.4 GHz band where frames can be received
1064 * from neighboring channels and the Beacon frames use the DSSS Parameter Set
1065 * element to indicate the current (transmitting) channel, but this might also
1066 * be needed on other bands if RX frequency does not match with the actual
1067 * operating channel of a BSS.
1068 */
1061static struct ieee80211_channel * 1069static struct ieee80211_channel *
1062cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, 1070cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
1063 struct ieee80211_channel *channel) 1071 struct ieee80211_channel *channel,
1072 enum nl80211_bss_scan_width scan_width)
1064{ 1073{
1065 const u8 *tmp; 1074 const u8 *tmp;
1066 u32 freq; 1075 u32 freq;
1067 int channel_number = -1; 1076 int channel_number = -1;
1077 struct ieee80211_channel *alt_channel;
1068 1078
1069 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen); 1079 tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
1070 if (tmp && tmp[1] == 1) { 1080 if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
1078 } 1088 }
1079 } 1089 }
1080 1090
1081 if (channel_number < 0) 1091 if (channel_number < 0) {
1092 /* No channel information in frame payload */
1082 return channel; 1093 return channel;
1094 }
1083 1095
1084 freq = ieee80211_channel_to_frequency(channel_number, channel->band); 1096 freq = ieee80211_channel_to_frequency(channel_number, channel->band);
1085 channel = ieee80211_get_channel(wiphy, freq); 1097 alt_channel = ieee80211_get_channel(wiphy, freq);
1086 if (!channel) 1098 if (!alt_channel) {
1087 return NULL; 1099 if (channel->band == NL80211_BAND_2GHZ) {
1088 if (channel->flags & IEEE80211_CHAN_DISABLED) 1100 /*
1101 * Better not allow unexpected channels when that could
1102 * be going beyond the 1-11 range (e.g., discovering
1103 * BSS on channel 12 when radio is configured for
1104 * channel 11.
1105 */
1106 return NULL;
1107 }
1108
1109 /* No match for the payload channel number - ignore it */
1110 return channel;
1111 }
1112
1113 if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
1114 scan_width == NL80211_BSS_CHAN_WIDTH_5) {
1115 /*
1116 * Ignore channel number in 5 and 10 MHz channels where there
1117 * may not be an n:1 or 1:n mapping between frequencies and
1118 * channel numbers.
1119 */
1120 return channel;
1121 }
1122
1123 /*
1124 * Use the channel determined through the payload channel number
1125 * instead of the RX channel reported by the driver.
1126 */
1127 if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
1089 return NULL; 1128 return NULL;
1090 return channel; 1129 return alt_channel;
1091} 1130}
1092 1131
1093/* Returned bss is reference counted and must be cleaned up appropriately. */ 1132/* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
1112 (data->signal < 0 || data->signal > 100))) 1151 (data->signal < 0 || data->signal > 100)))
1113 return NULL; 1152 return NULL;
1114 1153
1115 channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan); 1154 channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
1155 data->scan_width);
1116 if (!channel) 1156 if (!channel)
1117 return NULL; 1157 return NULL;
1118 1158
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
1210 return NULL; 1250 return NULL;
1211 1251
1212 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable, 1252 channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
1213 ielen, data->chan); 1253 ielen, data->chan, data->scan_width);
1214 if (!channel) 1254 if (!channel)
1215 return NULL; 1255 return NULL;
1216 1256
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e0825a019e9f..959ed3acd240 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
1456 u8 *op_class) 1456 u8 *op_class)
1457{ 1457{
1458 u8 vht_opclass; 1458 u8 vht_opclass;
1459 u16 freq = chandef->center_freq1; 1459 u32 freq = chandef->center_freq1;
1460 1460
1461 if (freq >= 2412 && freq <= 2472) { 1461 if (freq >= 2412 && freq <= 2472) {
1462 if (chandef->width > NL80211_CHAN_WIDTH_40) 1462 if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 911ca6d3cb5a..bfe2dbea480b 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
74 return 0; 74 return 0;
75 75
76 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit) 76 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
77 return force_zc ? -ENOTSUPP : 0; /* fail or fallback */ 77 return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
78 78
79 bpf.command = XDP_QUERY_XSK_UMEM; 79 bpf.command = XDP_QUERY_XSK_UMEM;
80 80
81 rtnl_lock(); 81 rtnl_lock();
82 err = xdp_umem_query(dev, queue_id); 82 err = xdp_umem_query(dev, queue_id);
83 if (err) { 83 if (err) {
84 err = err < 0 ? -ENOTSUPP : -EBUSY; 84 err = err < 0 ? -EOPNOTSUPP : -EBUSY;
85 goto err_rtnl_unlock; 85 goto err_rtnl_unlock;
86 } 86 }
87 87
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b89c9c7f8c5c..be3520e429c9 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -458,6 +458,7 @@ resume:
458 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 458 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
459 goto drop; 459 goto drop;
460 } 460 }
461 crypto_done = false;
461 } while (!err); 462 } while (!err);
462 463
463 err = xfrm_rcv_cb(skb, family, x->type->proto, 0); 464 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 45ba07ab3e4f..261995d37ced 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
100 spin_unlock_bh(&x->lock); 100 spin_unlock_bh(&x->lock);
101 101
102 skb_dst_force(skb); 102 skb_dst_force(skb);
103 if (!skb_dst(skb)) {
104 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
105 goto error_nolock;
106 }
103 107
104 if (xfrm_offload(skb)) { 108 if (xfrm_offload(skb)) {
105 x->type_offload->encap(x, skb); 109 x->type_offload->encap(x, skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 3110c3fbee20..f094d4b3520d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2491 } 2491 }
2492 2492
2493 skb_dst_force(skb); 2493 skb_dst_force(skb);
2494 if (!skb_dst(skb)) {
2495 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2496 return 0;
2497 }
2494 2498
2495 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 2499 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2496 if (IS_ERR(dst)) { 2500 if (IS_ERR(dst)) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8b8185..df7ca2dabc48 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
151 err = -EINVAL; 151 err = -EINVAL;
152 switch (p->family) { 152 switch (p->family) {
153 case AF_INET: 153 case AF_INET:
154 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
155 goto out;
156
154 break; 157 break;
155 158
156 case AF_INET6: 159 case AF_INET6:
157#if IS_ENABLED(CONFIG_IPV6) 160#if IS_ENABLED(CONFIG_IPV6)
161 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
162 goto out;
163
158 break; 164 break;
159#else 165#else
160 err = -EAFNOSUPPORT; 166 err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1396 1402
1397 switch (p->sel.family) { 1403 switch (p->sel.family) {
1398 case AF_INET: 1404 case AF_INET:
1405 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
1406 return -EINVAL;
1407
1399 break; 1408 break;
1400 1409
1401 case AF_INET6: 1410 case AF_INET6:
1402#if IS_ENABLED(CONFIG_IPV6) 1411#if IS_ENABLED(CONFIG_IPV6)
1412 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
1413 return -EINVAL;
1414
1403 break; 1415 break;
1404#else 1416#else
1405 return -EAFNOSUPPORT; 1417 return -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1480 (ut[i].family != prev_family)) 1492 (ut[i].family != prev_family))
1481 return -EINVAL; 1493 return -EINVAL;
1482 1494
1495 if (ut[i].mode >= XFRM_MODE_MAX)
1496 return -EINVAL;
1497
1483 prev_family = ut[i].family; 1498 prev_family = ut[i].family;
1484 1499
1485 switch (ut[i].family) { 1500 switch (ut[i].family) {
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index c75413d05a63..ce53639a864a 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
153# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 153# Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
154cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) 154cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
155 155
156# cc-if-fullversion
157# Usage: EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
158cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
159
160# cc-ldoption 156# cc-ldoption
161# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 157# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
162cc-ldoption = $(call try-run,\ 158cc-ldoption = $(call try-run,\
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 1c48572223d1..5a2d1c9578a0 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -246,8 +246,6 @@ objtool_args += --no-fp
246endif 246endif
247ifdef CONFIG_GCOV_KERNEL 247ifdef CONFIG_GCOV_KERNEL
248objtool_args += --no-unreachable 248objtool_args += --no-unreachable
249else
250objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
251endif 249endif
252ifdef CONFIG_RETPOLINE 250ifdef CONFIG_RETPOLINE
253ifneq ($(RETPOLINE_CFLAGS),) 251ifneq ($(RETPOLINE_CFLAGS),)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 5219280bf7ff..161b0224d6ae 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -380,6 +380,7 @@ our $Attribute = qr{
380 __noclone| 380 __noclone|
381 __deprecated| 381 __deprecated|
382 __read_mostly| 382 __read_mostly|
383 __ro_after_init|
383 __kprobes| 384 __kprobes|
384 $InitAttribute| 385 $InitAttribute|
385 ____cacheline_aligned| 386 ____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
3311 # known declaration macros 3312 # known declaration macros
3312 $sline =~ /^\+\s+$declaration_macros/ || 3313 $sline =~ /^\+\s+$declaration_macros/ ||
3313 # start of struct or union or enum 3314 # start of struct or union or enum
3314 $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ || 3315 $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
3315 # start or end of block or continuation of declaration 3316 # start or end of block or continuation of declaration
3316 $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ || 3317 $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
3317 # bitfield continuation 3318 # bitfield continuation
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 999d585eaa73..e083bcae343f 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -11,13 +11,14 @@ DEPMOD=$1
11KERNELRELEASE=$2 11KERNELRELEASE=$2
12 12
13if ! test -r System.map ; then 13if ! test -r System.map ; then
14 echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
14 exit 0 15 exit 0
15fi 16fi
16 17
17if [ -z $(command -v $DEPMOD) ]; then 18if [ -z $(command -v $DEPMOD) ]; then
18 echo "'make modules_install' requires $DEPMOD. Please install it." >&2 19 echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
19 echo "This is probably in the kmod package." >&2 20 echo "This is probably in the kmod package." >&2
20 exit 1 21 exit 0
21fi 22fi
22 23
23# older versions of depmod require the version string to start with three 24# older versions of depmod require the version string to start with three
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 4a7bd2192073..67ed9f6ccdf8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
221 221
222# check if necessary packages are available, and configure build flags 222# check if necessary packages are available, and configure build flags
223define filechk_conf_cfg 223define filechk_conf_cfg
224 $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
225 $(CONFIG_SHELL) $< 224 $(CONFIG_SHELL) $<
226endef 225endef
227 226
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644
index 7a1c40bfb58c..000000000000
--- a/scripts/kconfig/check-pkgconfig.sh
+++ /dev/null
@@ -1,8 +0,0 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3# Check for pkg-config presence
4
5if [ -z $(command -v pkg-config) ]; then
6 echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
7 exit 1
8fi
diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh
index 533b3d8f8f08..480ecd8b9f41 100755
--- a/scripts/kconfig/gconf-cfg.sh
+++ b/scripts/kconfig/gconf-cfg.sh
@@ -3,6 +3,13 @@
3 3
4PKG="gtk+-2.0 gmodule-2.0 libglade-2.0" 4PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
5 5
6if [ -z "$(command -v pkg-config)" ]; then
7 echo >&2 "*"
8 echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
9 echo >&2 "*"
10 exit 1
11fi
12
6if ! pkg-config --exists $PKG; then 13if ! pkg-config --exists $PKG; then
7 echo >&2 "*" 14 echo >&2 "*"
8 echo >&2 "* Unable to find the GTK+ installation. Please make sure that" 15 echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index e6f9facd0077..c812872d7f9d 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -4,20 +4,23 @@
4PKG="ncursesw" 4PKG="ncursesw"
5PKG2="ncurses" 5PKG2="ncurses"
6 6
7if pkg-config --exists $PKG; then 7if [ -n "$(command -v pkg-config)" ]; then
8 echo cflags=\"$(pkg-config --cflags $PKG)\" 8 if pkg-config --exists $PKG; then
9 echo libs=\"$(pkg-config --libs $PKG)\" 9 echo cflags=\"$(pkg-config --cflags $PKG)\"
10 exit 0 10 echo libs=\"$(pkg-config --libs $PKG)\"
11fi 11 exit 0
12 fi
12 13
13if pkg-config --exists $PKG2; then 14 if pkg-config --exists $PKG2; then
14 echo cflags=\"$(pkg-config --cflags $PKG2)\" 15 echo cflags=\"$(pkg-config --cflags $PKG2)\"
15 echo libs=\"$(pkg-config --libs $PKG2)\" 16 echo libs=\"$(pkg-config --libs $PKG2)\"
16 exit 0 17 exit 0
18 fi
17fi 19fi
18 20
19# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses 21# Check the default paths in case pkg-config is not installed.
20# by pkg-config. 22# (Even if it is installed, some distributions such as openSUSE cannot
23# find ncurses by pkg-config.)
21if [ -f /usr/include/ncursesw/ncurses.h ]; then 24if [ -f /usr/include/ncursesw/ncurses.h ]; then
22 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" 25 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
23 echo libs=\"-lncursesw\" 26 echo libs=\"-lncursesw\"
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 83b5836615fb..143c05fec161 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
490 switch (prop->type) { 490 switch (prop->type) {
491 case P_MENU: 491 case P_MENU:
492 child_count++; 492 child_count++;
493 prompt = prompt;
494 if (single_menu_mode) { 493 if (single_menu_mode) {
495 item_make("%s%*c%s", 494 item_make("%s%*c%s",
496 menu->data ? "-->" : "++>", 495 menu->data ? "-->" : "++>",
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh
index 42f5ac73548e..001559ef0a60 100644
--- a/scripts/kconfig/nconf-cfg.sh
+++ b/scripts/kconfig/nconf-cfg.sh
@@ -4,20 +4,23 @@
4PKG="ncursesw menuw panelw" 4PKG="ncursesw menuw panelw"
5PKG2="ncurses menu panel" 5PKG2="ncurses menu panel"
6 6
7if pkg-config --exists $PKG; then 7if [ -n "$(command -v pkg-config)" ]; then
8 echo cflags=\"$(pkg-config --cflags $PKG)\" 8 if pkg-config --exists $PKG; then
9 echo libs=\"$(pkg-config --libs $PKG)\" 9 echo cflags=\"$(pkg-config --cflags $PKG)\"
10 exit 0 10 echo libs=\"$(pkg-config --libs $PKG)\"
11fi 11 exit 0
12 fi
12 13
13if pkg-config --exists $PKG2; then 14 if pkg-config --exists $PKG2; then
14 echo cflags=\"$(pkg-config --cflags $PKG2)\" 15 echo cflags=\"$(pkg-config --cflags $PKG2)\"
15 echo libs=\"$(pkg-config --libs $PKG2)\" 16 echo libs=\"$(pkg-config --libs $PKG2)\"
16 exit 0 17 exit 0
18 fi
17fi 19fi
18 20
19# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses 21# Check the default paths in case pkg-config is not installed.
20# by pkg-config. 22# (Even if it is installed, some distributions such as openSUSE cannot
23# find ncurses by pkg-config.)
21if [ -f /usr/include/ncursesw/ncurses.h ]; then 24if [ -f /usr/include/ncursesw/ncurses.h ]; then
22 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\" 25 echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
23 echo libs=\"-lncursesw -lmenuw -lpanelw\" 26 echo libs=\"-lncursesw -lmenuw -lpanelw\"
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 0862e1562536..02ccc0ae1031 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -4,6 +4,13 @@
4PKG="Qt5Core Qt5Gui Qt5Widgets" 4PKG="Qt5Core Qt5Gui Qt5Widgets"
5PKG2="QtCore QtGui" 5PKG2="QtCore QtGui"
6 6
7if [ -z "$(command -v pkg-config)" ]; then
8 echo >&2 "*"
9 echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
10 echo >&2 "*"
11 exit 1
12fi
13
7if pkg-config --exists $PKG; then 14if pkg-config --exists $PKG; then
8 echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\" 15 echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
9 echo libs=\"$(pkg-config --libs $PKG)\" 16 echo libs=\"$(pkg-config --libs $PKG)\"
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index fe06e77c15eb..f599031260d5 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
389 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$"; 389 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
390 $type = ".quad"; 390 $type = ".quad";
391 $alignment = 2; 391 $alignment = 2;
392} elsif ($arch eq "nds32") {
393 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
394 $alignment = 2;
392} else { 395} else {
393 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; 396 die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
394} 397}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 71f39410691b..79f7dd57d571 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
74 fi 74 fi
75 75
76 # Check for uncommitted changes 76 # Check for uncommitted changes
77 if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then 77 if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
78 printf '%s' -dirty 78 printf '%s' -dirty
79 fi 79 fi
80 80
diff --git a/scripts/subarch.include b/scripts/subarch.include
new file mode 100644
index 000000000000..650682821126
--- /dev/null
+++ b/scripts/subarch.include
@@ -0,0 +1,13 @@
1# SUBARCH tells the usermode build what the underlying arch is. That is set
2# first, and if a usermode build is happening, the "ARCH=um" on the command
3# line overrides the setting of ARCH below. If a native build is happening,
4# then ARCH is assigned, getting whatever value it gets normally, and
5# SUBARCH is subsequently ignored.
6
7SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
8 -e s/sun4u/sparc64/ \
9 -e s/arm.*/arm/ -e s/sa110/arm/ \
10 -e s/s390x/s390/ -e s/parisc64/parisc/ \
11 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
12 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
13 -e s/riscv.*/riscv/)
diff --git a/security/Kconfig b/security/Kconfig
index 27d8b2688f75..d9aa521b5206 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
57config PAGE_TABLE_ISOLATION 57config PAGE_TABLE_ISOLATION
58 bool "Remove the kernel mapping in user mode" 58 bool "Remove the kernel mapping in user mode"
59 default y 59 default y
60 depends on X86 && !UML 60 depends on (X86_64 || X86_PAE) && !UML
61 help 61 help
62 This feature reduces the number of hardware side channels by 62 This feature reduces the number of hardware side channels by
63 ensuring that the majority of kernel addresses are not mapped 63 ensuring that the majority of kernel addresses are not mapped
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index f2f22d00db18..4ccec1bcf6f5 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
79 struct aa_label *label = aa_secid_to_label(secid); 79 struct aa_label *label = aa_secid_to_label(secid);
80 int len; 80 int len;
81 81
82 AA_BUG(!secdata);
83 AA_BUG(!seclen); 82 AA_BUG(!seclen);
84 83
85 if (!label) 84 if (!label)
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 69517e18ef07..08d5662039e3 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
129 runtime->avail = 0; 129 runtime->avail = 0;
130 else 130 else
131 runtime->avail = runtime->buffer_size; 131 runtime->avail = runtime->buffer_size;
132 runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL); 132 runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
133 if (!runtime->buffer) { 133 if (!runtime->buffer) {
134 kfree(runtime); 134 kfree(runtime);
135 return -ENOMEM; 135 return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
655 if (params->avail_min < 1 || params->avail_min > params->buffer_size) 655 if (params->avail_min < 1 || params->avail_min > params->buffer_size)
656 return -EINVAL; 656 return -EINVAL;
657 if (params->buffer_size != runtime->buffer_size) { 657 if (params->buffer_size != runtime->buffer_size) {
658 newbuf = kvmalloc(params->buffer_size, GFP_KERNEL); 658 newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
659 if (!newbuf) 659 if (!newbuf)
660 return -ENOMEM; 660 return -ENOMEM;
661 spin_lock_irq(&runtime->lock); 661 spin_lock_irq(&runtime->lock);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 730ea91d9be8..93676354f87f 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
263error: 263error:
264 mutex_unlock(&devices_mutex); 264 mutex_unlock(&devices_mutex);
265 snd_bebob_stream_destroy_duplex(bebob); 265 snd_bebob_stream_destroy_duplex(bebob);
266 kfree(bebob->maudio_special_quirk);
267 bebob->maudio_special_quirk = NULL;
266 snd_card_free(bebob->card); 268 snd_card_free(bebob->card);
267 dev_info(&bebob->unit->device, 269 dev_info(&bebob->unit->device,
268 "Sound card registration failed: %d\n", err); 270 "Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index bd55620c6a47..c266997ad299 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
96 struct fw_device *device = fw_parent_device(unit); 96 struct fw_device *device = fw_parent_device(unit);
97 int err, rcode; 97 int err, rcode;
98 u64 date; 98 u64 date;
99 __le32 cues[3] = { 99 __le32 *cues;
100 cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
101 cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
102 cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
103 };
104 100
105 /* check date of software used to build */ 101 /* check date of software used to build */
106 err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE, 102 err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
107 &date, sizeof(u64)); 103 &date, sizeof(u64));
108 if (err < 0) 104 if (err < 0)
109 goto end; 105 return err;
110 /* 106 /*
111 * firmware version 5058 or later has date later than "20070401", but 107 * firmware version 5058 or later has date later than "20070401", but
112 * 'date' is not null-terminated. 108 * 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
114 if (date < 0x3230303730343031LL) { 110 if (date < 0x3230303730343031LL) {
115 dev_err(&unit->device, 111 dev_err(&unit->device,
116 "Use firmware version 5058 or later\n"); 112 "Use firmware version 5058 or later\n");
117 err = -ENOSYS; 113 return -ENXIO;
118 goto end;
119 } 114 }
120 115
116 cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
117 if (!cues)
118 return -ENOMEM;
119
120 cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
121 cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
122 cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
123
121 rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST, 124 rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
122 device->node_id, device->generation, 125 device->node_id, device->generation,
123 device->max_speed, BEBOB_ADDR_REG_REQ, 126 device->max_speed, BEBOB_ADDR_REG_REQ,
124 cues, sizeof(cues)); 127 cues, 3 * sizeof(*cues));
128 kfree(cues);
125 if (rcode != RCODE_COMPLETE) { 129 if (rcode != RCODE_COMPLETE) {
126 dev_err(&unit->device, 130 dev_err(&unit->device,
127 "Failed to send a cue to load firmware\n"); 131 "Failed to send a cue to load firmware\n");
128 err = -EIO; 132 err = -EIO;
129 } 133 }
130end: 134
131 return err; 135 return err;
132} 136}
133 137
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
290 bebob->midi_output_ports = 2; 294 bebob->midi_output_ports = 2;
291 } 295 }
292end: 296end:
293 if (err < 0) {
294 kfree(params);
295 bebob->maudio_special_quirk = NULL;
296 }
297 mutex_unlock(&bebob->mutex); 297 mutex_unlock(&bebob->mutex);
298 return err; 298 return err;
299} 299}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d23f31a..ef689997d6a5 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
49 fw_unit_put(dg00x->unit); 49 fw_unit_put(dg00x->unit);
50 50
51 mutex_destroy(&dg00x->mutex); 51 mutex_destroy(&dg00x->mutex);
52 kfree(dg00x);
52} 53}
53 54
54static void dg00x_card_free(struct snd_card *card) 55static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index ad7a0a32557d..64c3cb0fb926 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
146{ 146{
147 __le32 *reg; 147 __le32 *reg;
148 int i; 148 int i;
149 int err;
149 150
150 reg = kcalloc(18, sizeof(__le32), GFP_KERNEL); 151 reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
151 if (reg == NULL) 152 if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
163 reg[i] = cpu_to_le32(0x00000001); 164 reg[i] = cpu_to_le32(0x00000001);
164 } 165 }
165 166
166 return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST, 167 err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
167 FF400_FETCH_PCM_FRAMES, reg, 168 FF400_FETCH_PCM_FRAMES, reg,
168 sizeof(__le32) * 18, 0); 169 sizeof(__le32) * 18, 0);
170 kfree(reg);
171 return err;
169} 172}
170 173
171static void ff400_dump_sync_status(struct snd_ff *ff, 174static void ff400_dump_sync_status(struct snd_ff *ff,
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613d3da0..f2d073365cf6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@ error:
301 snd_efw_transaction_remove_instance(efw); 301 snd_efw_transaction_remove_instance(efw);
302 snd_efw_stream_destroy_duplex(efw); 302 snd_efw_stream_destroy_duplex(efw);
303 snd_card_free(efw->card); 303 snd_card_free(efw->card);
304 kfree(efw->resp_buf);
305 efw->resp_buf = NULL;
304 dev_info(&efw->unit->device, 306 dev_info(&efw->unit->device,
305 "Sound card registration failed: %d\n", err); 307 "Sound card registration failed: %d\n", err);
306} 308}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1e5b2c802635..2ea8be6c8584 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
130 130
131 kfree(oxfw->spec); 131 kfree(oxfw->spec);
132 mutex_destroy(&oxfw->mutex); 132 mutex_destroy(&oxfw->mutex);
133 kfree(oxfw);
133} 134}
134 135
135/* 136/*
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
207static void do_registration(struct work_struct *work) 208static void do_registration(struct work_struct *work)
208{ 209{
209 struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); 210 struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
211 int i;
210 int err; 212 int err;
211 213
212 if (oxfw->registered) 214 if (oxfw->registered)
@@ -269,7 +271,15 @@ error:
269 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); 271 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
270 if (oxfw->has_output) 272 if (oxfw->has_output)
271 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); 273 snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
274 for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
275 kfree(oxfw->tx_stream_formats[i]);
276 oxfw->tx_stream_formats[i] = NULL;
277 kfree(oxfw->rx_stream_formats[i]);
278 oxfw->rx_stream_formats[i] = NULL;
279 }
272 snd_card_free(oxfw->card); 280 snd_card_free(oxfw->card);
281 kfree(oxfw->spec);
282 oxfw->spec = NULL;
273 dev_info(&oxfw->unit->device, 283 dev_info(&oxfw->unit->device,
274 "Sound card registration failed: %d\n", err); 284 "Sound card registration failed: %d\n", err);
275} 285}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 44ad41fb7374..d3fdc463a884 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
93 fw_unit_put(tscm->unit); 93 fw_unit_put(tscm->unit);
94 94
95 mutex_destroy(&tscm->mutex); 95 mutex_destroy(&tscm->mutex);
96 kfree(tscm);
96} 97}
97 98
98static void tscm_card_free(struct snd_card *card) 99static void tscm_card_free(struct snd_card *card)
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 1bd27576db98..a835558ddbc9 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
146 */ 146 */
147void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream) 147void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
148{ 148{
149 snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN); 149 snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
150 AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
150} 151}
151EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start); 152EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
152 153
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
171 172
172 snd_hdac_ext_link_stream_clear(stream); 173 snd_hdac_ext_link_stream_clear(stream);
173 174
174 snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST); 175 snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
176 AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
175 udelay(3); 177 udelay(3);
176 timeout = 50; 178 timeout = 50;
177 do { 179 do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
242void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link, 244void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
243 int stream) 245 int stream)
244{ 246{
245 snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream)); 247 snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
246} 248}
247EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id); 249EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
248 250
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
415 bool enable, int index) 417 bool enable, int index)
416{ 418{
417 u32 mask = 0; 419 u32 mask = 0;
418 u32 register_mask = 0;
419 420
420 if (!bus->spbcap) { 421 if (!bus->spbcap) {
421 dev_err(bus->dev, "Address of SPB capability is NULL\n"); 422 dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
424 425
425 mask |= (1 << index); 426 mask |= (1 << index);
426 427
427 register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
428
429 mask |= register_mask;
430
431 if (enable) 428 if (enable)
432 snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask); 429 snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
433 else 430 else
434 snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0); 431 snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
435} 432}
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
503 bool enable, int index) 500 bool enable, int index)
504{ 501{
505 u32 mask = 0; 502 u32 mask = 0;
506 u32 register_mask = 0;
507 503
508 if (!bus->drsmcap) { 504 if (!bus->drsmcap) {
509 dev_err(bus->dev, "Address of DRSM capability is NULL\n"); 505 dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
512 508
513 mask |= (1 << index); 509 mask |= (1 << index);
514 510
515 register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
516
517 mask |= register_mask;
518
519 if (enable) 511 if (enable)
520 snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask); 512 snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
521 else 513 else
522 snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0); 514 snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
523} 515}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 560ec0986e1a..74244d8e2909 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
40 */ 40 */
41void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus) 41void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
42{ 42{
43 WARN_ON_ONCE(!bus->rb.area);
44
43 spin_lock_irq(&bus->reg_lock); 45 spin_lock_irq(&bus->reg_lock);
44 /* CORB set up */ 46 /* CORB set up */
45 bus->corb.addr = bus->rb.addr; 47 bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
383EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset); 385EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
384 386
385/* reset codec link */ 387/* reset codec link */
386static int azx_reset(struct hdac_bus *bus, bool full_reset) 388int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
387{ 389{
388 if (!full_reset) 390 if (!full_reset)
389 goto skip_reset; 391 goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
408 skip_reset: 410 skip_reset:
409 /* check to see if controller is ready */ 411 /* check to see if controller is ready */
410 if (!snd_hdac_chip_readb(bus, GCTL)) { 412 if (!snd_hdac_chip_readb(bus, GCTL)) {
411 dev_dbg(bus->dev, "azx_reset: controller not ready!\n"); 413 dev_dbg(bus->dev, "controller not ready!\n");
412 return -EBUSY; 414 return -EBUSY;
413 } 415 }
414 416
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
423 425
424 return 0; 426 return 0;
425} 427}
428EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
426 429
427/* enable interrupts */ 430/* enable interrupts */
428static void azx_int_enable(struct hdac_bus *bus) 431static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
477 return false; 480 return false;
478 481
479 /* reset controller */ 482 /* reset controller */
480 azx_reset(bus, full_reset); 483 snd_hdac_bus_reset_link(bus, full_reset);
481 484
482 /* initialize interrupts */ 485 /* clear interrupts */
483 azx_int_clear(bus); 486 azx_int_clear(bus);
484 azx_int_enable(bus);
485 487
486 /* initialize the codec command I/O */ 488 /* initialize the codec command I/O */
487 snd_hdac_bus_init_cmd_io(bus); 489 snd_hdac_bus_init_cmd_io(bus);
488 490
491 /* enable interrupts after CORB/RIRB buffers are initialized above */
492 azx_int_enable(bus);
493
489 /* program the position buffer */ 494 /* program the position buffer */
490 if (bus->use_posbuf && bus->posbuf.addr) { 495 if (bus->use_posbuf && bus->posbuf.addr) {
491 snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr); 496 snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 90713741c2dc..6ebe817801ea 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
2540 emu->support_tlv = 1; 2540 emu->support_tlv = 1;
2541 return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp); 2541 return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
2542 case SNDRV_EMU10K1_IOCTL_INFO: 2542 case SNDRV_EMU10K1_IOCTL_INFO:
2543 info = kmalloc(sizeof(*info), GFP_KERNEL); 2543 info = kzalloc(sizeof(*info), GFP_KERNEL);
2544 if (!info) 2544 if (!info)
2545 return -ENOMEM; 2545 return -ENOMEM;
2546 snd_emu10k1_fx8010_info(emu, info); 2546 snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 0a5085537034..26d348b47867 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
3935 3935
3936 list_for_each_codec(codec, bus) { 3936 list_for_each_codec(codec, bus) {
3937 /* FIXME: maybe a better way needed for forced reset */ 3937 /* FIXME: maybe a better way needed for forced reset */
3938 cancel_delayed_work_sync(&codec->jackpoll_work); 3938 if (current_work() != &codec->jackpoll_work.work)
3939 cancel_delayed_work_sync(&codec->jackpoll_work);
3939#ifdef CONFIG_PM 3940#ifdef CONFIG_PM
3940 if (hda_codec_is_power_on(codec)) { 3941 if (hda_codec_is_power_on(codec)) {
3941 hda_call_codec_suspend(codec); 3942 hda_call_codec_suspend(codec);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1b2ce304152a..aa4c672dbaf7 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -365,8 +365,10 @@ enum {
365 */ 365 */
366#ifdef SUPPORT_VGA_SWITCHEROO 366#ifdef SUPPORT_VGA_SWITCHEROO
367#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo) 367#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
368#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link)
368#else 369#else
369#define use_vga_switcheroo(chip) 0 370#define use_vga_switcheroo(chip) 0
371#define needs_eld_notify_link(chip) false
370#endif 372#endif
371 373
372#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ 374#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
453#endif 455#endif
454 456
455static int azx_acquire_irq(struct azx *chip, int do_disconnect); 457static int azx_acquire_irq(struct azx *chip, int do_disconnect);
458static void set_default_power_save(struct azx *chip);
456 459
457/* 460/*
458 * initialize the PCI registers 461 * initialize the PCI registers
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev)
1201 azx_bus(chip)->codec_powered || !chip->running) 1204 azx_bus(chip)->codec_powered || !chip->running)
1202 return -EBUSY; 1205 return -EBUSY;
1203 1206
1207 /* ELD notification gets broken when HD-audio bus is off */
1208 if (needs_eld_notify_link(hda))
1209 return -EBUSY;
1210
1204 return 0; 1211 return 0;
1205} 1212}
1206 1213
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
1298 return true; 1305 return true;
1299} 1306}
1300 1307
1308/*
1309 * The discrete GPU cannot power down unless the HDA controller runtime
1310 * suspends, so activate runtime PM on codecs even if power_save == 0.
1311 */
1312static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
1313{
1314 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
1315 struct hda_codec *codec;
1316
1317 if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
1318 list_for_each_codec(codec, &chip->bus)
1319 codec->auto_runtime_pm = 1;
1320 /* reset the power save setup */
1321 if (chip->running)
1322 set_default_power_save(chip);
1323 }
1324}
1325
1326static void azx_vs_gpu_bound(struct pci_dev *pci,
1327 enum vga_switcheroo_client_id client_id)
1328{
1329 struct snd_card *card = pci_get_drvdata(pci);
1330 struct azx *chip = card->private_data;
1331 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
1332
1333 if (client_id == VGA_SWITCHEROO_DIS)
1334 hda->need_eld_notify_link = 0;
1335 setup_vga_switcheroo_runtime_pm(chip);
1336}
1337
1301static void init_vga_switcheroo(struct azx *chip) 1338static void init_vga_switcheroo(struct azx *chip)
1302{ 1339{
1303 struct hda_intel *hda = container_of(chip, struct hda_intel, chip); 1340 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip)
1306 dev_info(chip->card->dev, 1343 dev_info(chip->card->dev,
1307 "Handle vga_switcheroo audio client\n"); 1344 "Handle vga_switcheroo audio client\n");
1308 hda->use_vga_switcheroo = 1; 1345 hda->use_vga_switcheroo = 1;
1346 hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
1309 chip->driver_caps |= AZX_DCAPS_PM_RUNTIME; 1347 chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
1310 pci_dev_put(p); 1348 pci_dev_put(p);
1311 } 1349 }
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip)
1314static const struct vga_switcheroo_client_ops azx_vs_ops = { 1352static const struct vga_switcheroo_client_ops azx_vs_ops = {
1315 .set_gpu_state = azx_vs_set_state, 1353 .set_gpu_state = azx_vs_set_state,
1316 .can_switch = azx_vs_can_switch, 1354 .can_switch = azx_vs_can_switch,
1355 .gpu_bound = azx_vs_gpu_bound,
1317}; 1356};
1318 1357
1319static int register_vga_switcheroo(struct azx *chip) 1358static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip)
1339#define init_vga_switcheroo(chip) /* NOP */ 1378#define init_vga_switcheroo(chip) /* NOP */
1340#define register_vga_switcheroo(chip) 0 1379#define register_vga_switcheroo(chip) 0
1341#define check_hdmi_disabled(pci) false 1380#define check_hdmi_disabled(pci) false
1381#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */
1342#endif /* SUPPORT_VGA_SWITCHER */ 1382#endif /* SUPPORT_VGA_SWITCHER */
1343 1383
1344/* 1384/*
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip)
1352 1392
1353 if (azx_has_pm_runtime(chip) && chip->running) 1393 if (azx_has_pm_runtime(chip) && chip->running)
1354 pm_runtime_get_noresume(&pci->dev); 1394 pm_runtime_get_noresume(&pci->dev);
1395 chip->running = 0;
1355 1396
1356 azx_del_card_list(chip); 1397 azx_del_card_list(chip);
1357 1398
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2230}; 2271};
2231#endif /* CONFIG_PM */ 2272#endif /* CONFIG_PM */
2232 2273
2274static void set_default_power_save(struct azx *chip)
2275{
2276 int val = power_save;
2277
2278#ifdef CONFIG_PM
2279 if (pm_blacklist) {
2280 const struct snd_pci_quirk *q;
2281
2282 q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
2283 if (q && val) {
2284 dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
2285 q->subvendor, q->subdevice);
2286 val = 0;
2287 }
2288 }
2289#endif /* CONFIG_PM */
2290 snd_hda_set_power_save(&chip->bus, val * 1000);
2291}
2292
2233/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ 2293/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
2234static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { 2294static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
2235 [AZX_DRIVER_NVIDIA] = 8, 2295 [AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip)
2241 struct hda_intel *hda = container_of(chip, struct hda_intel, chip); 2301 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
2242 struct hdac_bus *bus = azx_bus(chip); 2302 struct hdac_bus *bus = azx_bus(chip);
2243 struct pci_dev *pci = chip->pci; 2303 struct pci_dev *pci = chip->pci;
2244 struct hda_codec *codec;
2245 int dev = chip->dev_index; 2304 int dev = chip->dev_index;
2246 int val;
2247 int err; 2305 int err;
2248 2306
2249 hda->probe_continued = 1; 2307 hda->probe_continued = 1;
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip)
2322 if (err < 0) 2380 if (err < 0)
2323 goto out_free; 2381 goto out_free;
2324 2382
2383 setup_vga_switcheroo_runtime_pm(chip);
2384
2325 chip->running = 1; 2385 chip->running = 1;
2326 azx_add_card_list(chip); 2386 azx_add_card_list(chip);
2327 2387
2328 val = power_save; 2388 set_default_power_save(chip);
2329#ifdef CONFIG_PM
2330 if (pm_blacklist) {
2331 const struct snd_pci_quirk *q;
2332
2333 q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
2334 if (q && val) {
2335 dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
2336 q->subvendor, q->subdevice);
2337 val = 0;
2338 }
2339 }
2340#endif /* CONFIG_PM */
2341 /*
2342 * The discrete GPU cannot power down unless the HDA controller runtime
2343 * suspends, so activate runtime PM on codecs even if power_save == 0.
2344 */
2345 if (use_vga_switcheroo(hda))
2346 list_for_each_codec(codec, &chip->bus)
2347 codec->auto_runtime_pm = 1;
2348 2389
2349 snd_hda_set_power_save(&chip->bus, val * 1000);
2350 if (azx_has_pm_runtime(chip)) 2390 if (azx_has_pm_runtime(chip))
2351 pm_runtime_put_autosuspend(&pci->dev); 2391 pm_runtime_put_autosuspend(&pci->dev);
2352 2392
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index e3a3d318d2e5..f59719e06b91 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -37,6 +37,7 @@ struct hda_intel {
37 37
38 /* vga_switcheroo setup */ 38 /* vga_switcheroo setup */
39 unsigned int use_vga_switcheroo:1; 39 unsigned int use_vga_switcheroo:1;
40 unsigned int need_eld_notify_link:1;
40 unsigned int vga_switcheroo_registered:1; 41 unsigned int vga_switcheroo_registered:1;
41 unsigned int init_failed:1; /* delayed init failed */ 42 unsigned int init_failed:1; /* delayed init failed */
42 43
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index e359938e3d7e..77b265bd0505 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/iopoll.h>
19#include <linux/sizes.h> 20#include <linux/sizes.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
184 acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data); 185 acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
185} 186}
186 187
188static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
189{
190 u32 dma_ctrl;
191 int ret;
192
193 /* clear the reset bit */
194 dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
195 dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
196 acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
197 /* check the reset bit before programming configuration registers */
198 ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
199 dma_ctrl,
200 !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
201 100, ACP_DMA_RESET_TIME);
202 if (ret < 0)
203 pr_err("Failed to clear reset of channel : %d\n", ch_num);
204}
205
187/* 206/*
188 * Initialize the DMA descriptor information for transfer between 207 * Initialize the DMA descriptor information for transfer between
189 * system memory <-> ACP SRAM 208 * system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
236 config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, 255 config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
237 &dmadscr[i]); 256 &dmadscr[i]);
238 } 257 }
258 pre_config_reset(acp_mmio, ch);
239 config_acp_dma_channel(acp_mmio, ch, 259 config_acp_dma_channel(acp_mmio, ch,
240 dma_dscr_idx - 1, 260 dma_dscr_idx - 1,
241 NUM_DSCRS_PER_CHANNEL, 261 NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
275 config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx, 295 config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
276 &dmadscr[i]); 296 &dmadscr[i]);
277 } 297 }
298 pre_config_reset(acp_mmio, ch);
278 /* Configure the DMA channel with the above descriptore */ 299 /* Configure the DMA channel with the above descriptore */
279 config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1, 300 config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
280 NUM_DSCRS_PER_CHANNEL, 301 NUM_DSCRS_PER_CHANNEL,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 275677de669f..407554175282 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
157 SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 157 SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
158 3, 1, 0), 158 3, 1, 0),
159 SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), 159 SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
160 SOC_SINGLE("MMTLR Data Switch", 0, 160 SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
161 1, 1, 0), 161 0, 1, 0),
162 SOC_ENUM("Mono Channel Select", spdif_mono_select_enum), 162 SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
163 SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24), 163 SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
164}; 164};
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 92b7125ea169..1093f766d0d2 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
520{ 520{
521 switch (reg) { 521 switch (reg) {
522 case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3: 522 case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
523 case MAX98373_R203E_AMP_PATH_GAIN:
523 case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK: 524 case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
524 case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK: 525 case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
525 case MAX98373_R20B6_BDE_CUR_STATE_READBACK: 526 case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component)
729 /* Software Reset */ 730 /* Software Reset */
730 regmap_write(max98373->regmap, 731 regmap_write(max98373->regmap,
731 MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); 732 MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
733 usleep_range(10000, 11000);
732 734
733 /* IV default slot configuration */ 735 /* IV default slot configuration */
734 regmap_write(max98373->regmap, 736 regmap_write(max98373->regmap,
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev)
817 819
818 regmap_write(max98373->regmap, 820 regmap_write(max98373->regmap,
819 MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET); 821 MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
822 usleep_range(10000, 11000);
820 regcache_cache_only(max98373->regmap, false); 823 regcache_cache_only(max98373->regmap, false);
821 regcache_sync(max98373->regmap); 824 regcache_sync(max98373->regmap);
822 return 0; 825 return 0;
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index dca82dd6e3bf..32fe76c3134a 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
64 {RT5514_ANA_CTRL_LDO10, 0x00028604}, 64 {RT5514_ANA_CTRL_LDO10, 0x00028604},
65 {RT5514_ANA_CTRL_ADCFED, 0x00000800}, 65 {RT5514_ANA_CTRL_ADCFED, 0x00000800},
66 {RT5514_ASRC_IN_CTRL1, 0x00000003}, 66 {RT5514_ASRC_IN_CTRL1, 0x00000003},
67 {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, 67 {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
68 {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, 68 {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
69}; 69};
70 70
71static const struct reg_default rt5514_reg[] = { 71static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
92 {RT5514_ASRC_IN_CTRL1, 0x00000003}, 92 {RT5514_ASRC_IN_CTRL1, 0x00000003},
93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, 93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, 94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
95 {RT5514_DOWNFILTER0_CTRL3, 0x10000352}, 95 {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
96 {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f}, 96 {RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
97 {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f}, 97 {RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
98 {RT5514_DOWNFILTER1_CTRL3, 0x10000352}, 98 {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
99 {RT5514_ANA_CTRL_LDO10, 0x00028604}, 99 {RT5514_ANA_CTRL_LDO10, 0x00028604},
100 {RT5514_ANA_CTRL_LDO18_16, 0x02000345}, 100 {RT5514_ANA_CTRL_LDO18_16, 0x02000345},
101 {RT5514_ANA_CTRL_ADC12, 0x0000a2a8}, 101 {RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 640d400ca013..afe7d5b19313 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
750} 750}
751 751
752static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0); 752static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
753static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0); 753static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
754static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0); 754static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
755static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0); 755static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
756 756
757/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */ 757/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
1114 1114
1115 /* DAC Digital Volume */ 1115 /* DAC Digital Volume */
1116 SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL, 1116 SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
1117 RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv), 1117 RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
1118 1118
1119 /* IN Boost Volume */ 1119 /* IN Boost Volume */
1120 SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL, 1120 SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
1124 SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL, 1124 SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
1125 RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1), 1125 RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
1126 SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL, 1126 SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
1127 RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv), 1127 RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
1128 1128
1129 /* ADC Boost Volume Control */ 1129 /* ADC Boost Volume Control */
1130 SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST, 1130 SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680ac78e4..6df158669420 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
117 struct sigmadsp_control *ctrl, void *data) 117 struct sigmadsp_control *ctrl, void *data)
118{ 118{
119 /* safeload loads up to 20 bytes in a atomic operation */ 119 /* safeload loads up to 20 bytes in a atomic operation */
120 if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops && 120 if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
121 sigmadsp->ops->safeload)
122 return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data, 121 return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
123 ctrl->num_bytes); 122 ctrl->num_bytes);
124 else 123 else
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 14999b999fd3..0d6145549a98 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
424 TAS6424_FAULT_PVDD_UV | 424 TAS6424_FAULT_PVDD_UV |
425 TAS6424_FAULT_VBAT_UV; 425 TAS6424_FAULT_VBAT_UV;
426 426
427 if (reg) 427 if (!reg) {
428 tas6424->last_fault1 = reg;
428 goto check_global_fault2_reg; 429 goto check_global_fault2_reg;
430 }
429 431
430 /* 432 /*
431 * Only flag errors once for a given occurrence. This is needed as 433 * Only flag errors once for a given occurrence. This is needed as
@@ -461,8 +463,10 @@ check_global_fault2_reg:
461 TAS6424_FAULT_OTSD_CH3 | 463 TAS6424_FAULT_OTSD_CH3 |
462 TAS6424_FAULT_OTSD_CH4; 464 TAS6424_FAULT_OTSD_CH4;
463 465
464 if (!reg) 466 if (!reg) {
467 tas6424->last_fault2 = reg;
465 goto check_warn_reg; 468 goto check_warn_reg;
469 }
466 470
467 if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD)) 471 if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
468 dev_crit(dev, "experienced a global overtemp shutdown\n"); 472 dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +501,10 @@ check_warn_reg:
497 TAS6424_WARN_VDD_OTW_CH3 | 501 TAS6424_WARN_VDD_OTW_CH3 |
498 TAS6424_WARN_VDD_OTW_CH4; 502 TAS6424_WARN_VDD_OTW_CH4;
499 503
500 if (!reg) 504 if (!reg) {
505 tas6424->last_warn = reg;
501 goto out; 506 goto out;
507 }
502 508
503 if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV)) 509 if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
504 dev_warn(dev, "experienced a VDD under voltage condition\n"); 510 dev_warn(dev, "experienced a VDD under voltage condition\n");
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c2c5ba..79541960f45d 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/acpi.h>
16 17
17#include "wm8804.h" 18#include "wm8804.h"
18 19
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
40}; 41};
41MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id); 42MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
42 43
44#if defined(CONFIG_OF)
43static const struct of_device_id wm8804_of_match[] = { 45static const struct of_device_id wm8804_of_match[] = {
44 { .compatible = "wlf,wm8804", }, 46 { .compatible = "wlf,wm8804", },
45 { } 47 { }
46}; 48};
47MODULE_DEVICE_TABLE(of, wm8804_of_match); 49MODULE_DEVICE_TABLE(of, wm8804_of_match);
50#endif
51
52#ifdef CONFIG_ACPI
53static const struct acpi_device_id wm8804_acpi_match[] = {
54 { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
55 { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
56 { },
57};
58MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
59#endif
48 60
49static struct i2c_driver wm8804_i2c_driver = { 61static struct i2c_driver wm8804_i2c_driver = {
50 .driver = { 62 .driver = {
51 .name = "wm8804", 63 .name = "wm8804",
52 .pm = &wm8804_pm, 64 .pm = &wm8804_pm,
53 .of_match_table = wm8804_of_match, 65 .of_match_table = of_match_ptr(wm8804_of_match),
66 .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
54 }, 67 },
55 .probe = wm8804_i2c_probe, 68 .probe = wm8804_i2c_probe,
56 .remove = wm8804_i2c_remove, 69 .remove = wm8804_i2c_remove,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 953d94d50586..ade34c26ad2f 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
719 719
720static struct platform_driver wm9712_component_driver = { 720static struct platform_driver wm9712_component_driver = {
721 .driver = { 721 .driver = {
722 .name = "wm9712-component", 722 .name = "wm9712-codec",
723 }, 723 },
724 724
725 .probe = wm9712_probe, 725 .probe = wm9712_probe,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index d32844f94d74..b6dc524830b2 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
575 BYT_RT5640_MONO_SPEAKER | 575 BYT_RT5640_MONO_SPEAKER |
576 BYT_RT5640_MCLK_EN), 576 BYT_RT5640_MCLK_EN),
577 }, 577 },
578 { /* Linx Linx7 tablet */
579 .matches = {
580 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
581 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
582 },
583 .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
584 BYT_RT5640_MONO_SPEAKER |
585 BYT_RT5640_JD_NOT_INV |
586 BYT_RT5640_SSP0_AIF1 |
587 BYT_RT5640_MCLK_EN),
588 },
578 { /* MSI S100 tablet */ 589 { /* MSI S100 tablet */
579 .matches = { 590 .matches = {
580 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), 591 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
602 BYT_RT5640_SSP0_AIF1 | 613 BYT_RT5640_SSP0_AIF1 |
603 BYT_RT5640_MCLK_EN), 614 BYT_RT5640_MCLK_EN),
604 }, 615 },
616 { /* Onda v975w */
617 .matches = {
618 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
619 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
620 /* The above are too generic, also match BIOS info */
621 DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
622 DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
623 },
624 .driver_data = (void *)(BYT_RT5640_IN1_MAP |
625 BYT_RT5640_JD_SRC_JD2_IN4N |
626 BYT_RT5640_OVCD_TH_2000UA |
627 BYT_RT5640_OVCD_SF_0P75 |
628 BYT_RT5640_DIFF_MIC |
629 BYT_RT5640_MCLK_EN),
630 },
605 { /* Pipo W4 */ 631 { /* Pipo W4 */
606 .matches = { 632 .matches = {
607 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), 633 DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index dce649485649..1d17be0f78a0 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus)
834 return -ENXIO; 834 return -ENXIO;
835 } 835 }
836 836
837 skl_init_chip(bus, true); 837 snd_hdac_bus_reset_link(bus, true);
838 838
839 snd_hdac_bus_parse_capabilities(bus); 839 snd_hdac_bus_parse_capabilities(bus);
840 840
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index dc94c5c53788..c6b51571be94 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
960{ 960{
961 int i; 961 int i;
962 962
963 for (i = 0; i < MAX_SESSIONS; i++) 963 for (i = 0; i < MAX_SESSIONS; i++) {
964 routing_data->sessions[i].port_id = -1; 964 routing_data->sessions[i].port_id = -1;
965 routing_data->sessions[i].fedai_id = -1;
966 }
965 967
966 return 0; 968 return 0;
967} 969}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 3a3064dda57f..051f96405346 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
462 goto rsnd_adg_get_clkout_end; 462 goto rsnd_adg_get_clkout_end;
463 463
464 req_size = prop->length / sizeof(u32); 464 req_size = prop->length / sizeof(u32);
465 if (req_size > REQ_SIZE) {
466 dev_err(dev,
467 "too many clock-frequency, use top %d\n", REQ_SIZE);
468 req_size = REQ_SIZE;
469 }
465 470
466 of_property_read_u32_array(np, "clock-frequency", req_rate, req_size); 471 of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
467 req_48kHz_rate = 0; 472 req_48kHz_rate = 0;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f8425d8b44d2..d23c2bbff0cf 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status,
478 (func_call && (mod)->ops->fn) ? #fn : ""); \ 478 (func_call && (mod)->ops->fn) ? #fn : ""); \
479 if (func_call && (mod)->ops->fn) \ 479 if (func_call && (mod)->ops->fn) \
480 tmp = (mod)->ops->fn(mod, io, param); \ 480 tmp = (mod)->ops->fn(mod, io, param); \
481 if (tmp) \ 481 if (tmp && (tmp != -EPROBE_DEFER)) \
482 dev_err(dev, "%s[%d] : %s error %d\n", \ 482 dev_err(dev, "%s[%d] : %s error %d\n", \
483 rsnd_mod_name(mod), rsnd_mod_id(mod), \ 483 rsnd_mod_name(mod), rsnd_mod_id(mod), \
484 #fn, tmp); \ 484 #fn, tmp); \
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
958 rsnd_dai_stream_quit(io); 958 rsnd_dai_stream_quit(io);
959} 959}
960 960
961static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
962 struct snd_soc_dai *dai)
963{
964 struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
965 struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
966 struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
967
968 return rsnd_dai_call(prepare, io, priv);
969}
970
961static const struct snd_soc_dai_ops rsnd_soc_dai_ops = { 971static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
962 .startup = rsnd_soc_dai_startup, 972 .startup = rsnd_soc_dai_startup,
963 .shutdown = rsnd_soc_dai_shutdown, 973 .shutdown = rsnd_soc_dai_shutdown,
964 .trigger = rsnd_soc_dai_trigger, 974 .trigger = rsnd_soc_dai_trigger,
965 .set_fmt = rsnd_soc_dai_set_fmt, 975 .set_fmt = rsnd_soc_dai_set_fmt,
966 .set_tdm_slot = rsnd_soc_set_dai_tdm_slot, 976 .set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
977 .prepare = rsnd_soc_dai_prepare,
967}; 978};
968 979
969void rsnd_parse_connect_common(struct rsnd_dai *rdai, 980void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1550,6 +1561,14 @@ exit_snd_probe:
1550 rsnd_dai_call(remove, &rdai->capture, priv); 1561 rsnd_dai_call(remove, &rdai->capture, priv);
1551 } 1562 }
1552 1563
1564 /*
1565 * adg is very special mod which can't use rsnd_dai_call(remove),
1566 * and it registers ADG clock on probe.
1567 * It should be unregister if probe failed.
1568 * Mainly it is assuming -EPROBE_DEFER case
1569 */
1570 rsnd_adg_remove(priv);
1571
1553 return ret; 1572 return ret;
1554} 1573}
1555 1574
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index fe63ef8600d0..d65ea7bc4dac 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
241 /* try to get DMAEngine channel */ 241 /* try to get DMAEngine channel */
242 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to); 242 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
243 if (IS_ERR_OR_NULL(chan)) { 243 if (IS_ERR_OR_NULL(chan)) {
244 /* Let's follow when -EPROBE_DEFER case */
245 if (PTR_ERR(chan) == -EPROBE_DEFER)
246 return PTR_ERR(chan);
247
244 /* 248 /*
245 * DMA failed. try to PIO mode 249 * DMA failed. try to PIO mode
246 * see 250 * see
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 96d93330b1e1..8f7a0abfa751 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -280,6 +280,9 @@ struct rsnd_mod_ops {
280 int (*nolock_stop)(struct rsnd_mod *mod, 280 int (*nolock_stop)(struct rsnd_mod *mod,
281 struct rsnd_dai_stream *io, 281 struct rsnd_dai_stream *io,
282 struct rsnd_priv *priv); 282 struct rsnd_priv *priv);
283 int (*prepare)(struct rsnd_mod *mod,
284 struct rsnd_dai_stream *io,
285 struct rsnd_priv *priv);
283}; 286};
284 287
285struct rsnd_dai_stream; 288struct rsnd_dai_stream;
@@ -309,6 +312,7 @@ struct rsnd_mod {
309 * H 0: fallback 312 * H 0: fallback
310 * H 0: hw_params 313 * H 0: hw_params
311 * H 0: pointer 314 * H 0: pointer
315 * H 0: prepare
312 */ 316 */
313#define __rsnd_mod_shift_nolock_start 0 317#define __rsnd_mod_shift_nolock_start 0
314#define __rsnd_mod_shift_nolock_stop 0 318#define __rsnd_mod_shift_nolock_stop 0
@@ -323,6 +327,7 @@ struct rsnd_mod {
323#define __rsnd_mod_shift_fallback 28 /* always called */ 327#define __rsnd_mod_shift_fallback 28 /* always called */
324#define __rsnd_mod_shift_hw_params 28 /* always called */ 328#define __rsnd_mod_shift_hw_params 28 /* always called */
325#define __rsnd_mod_shift_pointer 28 /* always called */ 329#define __rsnd_mod_shift_pointer 28 /* always called */
330#define __rsnd_mod_shift_prepare 28 /* always called */
326 331
327#define __rsnd_mod_add_probe 0 332#define __rsnd_mod_add_probe 0
328#define __rsnd_mod_add_remove 0 333#define __rsnd_mod_add_remove 0
@@ -337,6 +342,7 @@ struct rsnd_mod {
337#define __rsnd_mod_add_fallback 0 342#define __rsnd_mod_add_fallback 0
338#define __rsnd_mod_add_hw_params 0 343#define __rsnd_mod_add_hw_params 0
339#define __rsnd_mod_add_pointer 0 344#define __rsnd_mod_add_pointer 0
345#define __rsnd_mod_add_prepare 0
340 346
341#define __rsnd_mod_call_probe 0 347#define __rsnd_mod_call_probe 0
342#define __rsnd_mod_call_remove 0 348#define __rsnd_mod_call_remove 0
@@ -351,6 +357,7 @@ struct rsnd_mod {
351#define __rsnd_mod_call_pointer 0 357#define __rsnd_mod_call_pointer 0
352#define __rsnd_mod_call_nolock_start 0 358#define __rsnd_mod_call_nolock_start 0
353#define __rsnd_mod_call_nolock_stop 1 359#define __rsnd_mod_call_nolock_stop 1
360#define __rsnd_mod_call_prepare 0
354 361
355#define rsnd_mod_to_priv(mod) ((mod)->priv) 362#define rsnd_mod_to_priv(mod) ((mod)->priv)
356#define rsnd_mod_name(mod) ((mod)->ops->name) 363#define rsnd_mod_name(mod) ((mod)->ops->name)
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 8304e4ec9242..3f880ec66459 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
283 if (rsnd_ssi_is_multi_slave(mod, io)) 283 if (rsnd_ssi_is_multi_slave(mod, io))
284 return 0; 284 return 0;
285 285
286 if (ssi->usrcnt > 1) { 286 if (ssi->rate) {
287 if (ssi->rate != rate) { 287 if (ssi->rate != rate) {
288 dev_err(dev, "SSI parent/child should use same rate\n"); 288 dev_err(dev, "SSI parent/child should use same rate\n");
289 return -EINVAL; 289 return -EINVAL;
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
434 struct rsnd_priv *priv) 434 struct rsnd_priv *priv)
435{ 435{
436 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 436 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
437 int ret;
438 437
439 if (!rsnd_ssi_is_run_mods(mod, io)) 438 if (!rsnd_ssi_is_run_mods(mod, io))
440 return 0; 439 return 0;
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
443 442
444 rsnd_mod_power_on(mod); 443 rsnd_mod_power_on(mod);
445 444
446 ret = rsnd_ssi_master_clk_start(mod, io);
447 if (ret < 0)
448 return ret;
449
450 rsnd_ssi_config_init(mod, io); 445 rsnd_ssi_config_init(mod, io);
451 446
452 rsnd_ssi_register_setup(mod); 447 rsnd_ssi_register_setup(mod);
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
852 return 0; 847 return 0;
853} 848}
854 849
850static int rsnd_ssi_prepare(struct rsnd_mod *mod,
851 struct rsnd_dai_stream *io,
852 struct rsnd_priv *priv)
853{
854 return rsnd_ssi_master_clk_start(mod, io);
855}
856
855static struct rsnd_mod_ops rsnd_ssi_pio_ops = { 857static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
856 .name = SSI_NAME, 858 .name = SSI_NAME,
857 .probe = rsnd_ssi_common_probe, 859 .probe = rsnd_ssi_common_probe,
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
864 .pointer = rsnd_ssi_pio_pointer, 866 .pointer = rsnd_ssi_pio_pointer,
865 .pcm_new = rsnd_ssi_pcm_new, 867 .pcm_new = rsnd_ssi_pcm_new,
866 .hw_params = rsnd_ssi_hw_params, 868 .hw_params = rsnd_ssi_hw_params,
869 .prepare = rsnd_ssi_prepare,
867}; 870};
868 871
869static int rsnd_ssi_dma_probe(struct rsnd_mod *mod, 872static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
940 .pcm_new = rsnd_ssi_pcm_new, 943 .pcm_new = rsnd_ssi_pcm_new,
941 .fallback = rsnd_ssi_fallback, 944 .fallback = rsnd_ssi_fallback,
942 .hw_params = rsnd_ssi_hw_params, 945 .hw_params = rsnd_ssi_hw_params,
946 .prepare = rsnd_ssi_prepare,
943}; 947};
944 948
945int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod) 949int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9cfe10d8040c..473eefe8658e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
1447 sink = codec_dai->playback_widget; 1447 sink = codec_dai->playback_widget;
1448 source = cpu_dai->capture_widget; 1448 source = cpu_dai->capture_widget;
1449 if (sink && source) { 1449 if (sink && source) {
1450 ret = snd_soc_dapm_new_pcm(card, dai_link->params, 1450 ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
1451 dai_link->num_params, 1451 dai_link->num_params,
1452 source, sink); 1452 source, sink);
1453 if (ret != 0) { 1453 if (ret != 0) {
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
1460 sink = cpu_dai->playback_widget; 1460 sink = cpu_dai->playback_widget;
1461 source = codec_dai->capture_widget; 1461 source = codec_dai->capture_widget;
1462 if (sink && source) { 1462 if (sink && source) {
1463 ret = snd_soc_dapm_new_pcm(card, dai_link->params, 1463 ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
1464 dai_link->num_params, 1464 dai_link->num_params,
1465 source, sink); 1465 source, sink);
1466 if (ret != 0) { 1466 if (ret != 0) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e96793050c9..461d951917c0 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3652{ 3652{
3653 struct snd_soc_dapm_path *source_p, *sink_p; 3653 struct snd_soc_dapm_path *source_p, *sink_p;
3654 struct snd_soc_dai *source, *sink; 3654 struct snd_soc_dai *source, *sink;
3655 struct snd_soc_pcm_runtime *rtd = w->priv;
3655 const struct snd_soc_pcm_stream *config = w->params + w->params_select; 3656 const struct snd_soc_pcm_stream *config = w->params + w->params_select;
3656 struct snd_pcm_substream substream; 3657 struct snd_pcm_substream substream;
3657 struct snd_pcm_hw_params *params = NULL; 3658 struct snd_pcm_hw_params *params = NULL;
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3711 goto out; 3712 goto out;
3712 } 3713 }
3713 substream.runtime = runtime; 3714 substream.runtime = runtime;
3715 substream.private_data = rtd;
3714 3716
3715 switch (event) { 3717 switch (event) {
3716 case SND_SOC_DAPM_PRE_PMU: 3718 case SND_SOC_DAPM_PRE_PMU:
@@ -3895,6 +3897,7 @@ outfree_w_param:
3895} 3897}
3896 3898
3897int snd_soc_dapm_new_pcm(struct snd_soc_card *card, 3899int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
3900 struct snd_soc_pcm_runtime *rtd,
3898 const struct snd_soc_pcm_stream *params, 3901 const struct snd_soc_pcm_stream *params,
3899 unsigned int num_params, 3902 unsigned int num_params,
3900 struct snd_soc_dapm_widget *source, 3903 struct snd_soc_dapm_widget *source,
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
3963 3966
3964 w->params = params; 3967 w->params = params;
3965 w->num_params = num_params; 3968 w->num_params = num_params;
3969 w->priv = rtd;
3966 3970
3967 ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL); 3971 ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
3968 if (ret) 3972 if (ret)
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 16e006f708ca..4602464ebdfb 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
27#define __KVM_HAVE_GUEST_DEBUG 27#define __KVM_HAVE_GUEST_DEBUG
28#define __KVM_HAVE_IRQ_LINE 28#define __KVM_HAVE_IRQ_LINE
29#define __KVM_HAVE_READONLY_MEM 29#define __KVM_HAVE_READONLY_MEM
30#define __KVM_HAVE_VCPU_EVENTS
30 31
31#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 32#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
32 33
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
125struct kvm_arch_memory_slot { 126struct kvm_arch_memory_slot {
126}; 127};
127 128
129/* for KVM_GET/SET_VCPU_EVENTS */
130struct kvm_vcpu_events {
131 struct {
132 __u8 serror_pending;
133 __u8 serror_has_esr;
134 /* Align it to 8 bytes */
135 __u8 pad[6];
136 __u64 serror_esr;
137 } exception;
138 __u32 reserved[12];
139};
140
128/* If you need to interpret the index values, here is the key: */ 141/* If you need to interpret the index values, here is the key: */
129#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 142#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
130#define KVM_REG_ARM_COPROC_SHIFT 16 143#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 4e76630dd655..97c3478ee6e7 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,7 @@
39#define __KVM_HAVE_GUEST_DEBUG 39#define __KVM_HAVE_GUEST_DEBUG
40#define __KVM_HAVE_IRQ_LINE 40#define __KVM_HAVE_IRQ_LINE
41#define __KVM_HAVE_READONLY_MEM 41#define __KVM_HAVE_READONLY_MEM
42#define __KVM_HAVE_VCPU_EVENTS
42 43
43#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 44#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
44 45
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
154struct kvm_arch_memory_slot { 155struct kvm_arch_memory_slot {
155}; 156};
156 157
158/* for KVM_GET/SET_VCPU_EVENTS */
159struct kvm_vcpu_events {
160 struct {
161 __u8 serror_pending;
162 __u8 serror_has_esr;
163 /* Align it to 8 bytes */
164 __u8 pad[6];
165 __u64 serror_esr;
166 } exception;
167 __u32 reserved[12];
168};
169
157/* If you need to interpret the index values, here is the key: */ 170/* If you need to interpret the index values, here is the key: */
158#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 171#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
159#define KVM_REG_ARM_COPROC_SHIFT 16 172#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 4cdaa55fabfe..9a50f02b9894 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -4,7 +4,7 @@
4/* 4/*
5 * KVM s390 specific structures and definitions 5 * KVM s390 specific structures and definitions
6 * 6 *
7 * Copyright IBM Corp. 2008 7 * Copyright IBM Corp. 2008, 2018
8 * 8 *
9 * Author(s): Carsten Otte <cotte@de.ibm.com> 9 * Author(s): Carsten Otte <cotte@de.ibm.com>
10 * Christian Borntraeger <borntraeger@de.ibm.com> 10 * Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
225#define KVM_SYNC_FPRS (1UL << 8) 225#define KVM_SYNC_FPRS (1UL << 8)
226#define KVM_SYNC_GSCB (1UL << 9) 226#define KVM_SYNC_GSCB (1UL << 9)
227#define KVM_SYNC_BPBC (1UL << 10) 227#define KVM_SYNC_BPBC (1UL << 10)
228#define KVM_SYNC_ETOKEN (1UL << 11)
228/* length and alignment of the sdnx as a power of two */ 229/* length and alignment of the sdnx as a power of two */
229#define SDNXC 8 230#define SDNXC 8
230#define SDNXL (1UL << SDNXC) 231#define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
258 struct { 259 struct {
259 __u64 reserved1[2]; 260 __u64 reserved1[2];
260 __u64 gscb[4]; 261 __u64 gscb[4];
262 __u64 etoken;
263 __u64 etoken_extension;
261 }; 264 };
262 }; 265 };
263}; 266};
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index c535c2fdea13..86299efa804a 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -378,4 +378,41 @@ struct kvm_sync_regs {
378#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) 378#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
379#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) 379#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
380 380
381#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
382#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
383
384#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
385#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
386
387struct kvm_vmx_nested_state {
388 __u64 vmxon_pa;
389 __u64 vmcs_pa;
390
391 struct {
392 __u16 flags;
393 } smm;
394};
395
396/* for KVM_CAP_NESTED_STATE */
397struct kvm_nested_state {
398 /* KVM_STATE_* flags */
399 __u16 flags;
400
401 /* 0 for VMX, 1 for SVM. */
402 __u16 format;
403
404 /* 128 for SVM, 128 + VMCS size for VMX. */
405 __u32 size;
406
407 union {
408 /* VMXON, VMCS */
409 struct kvm_vmx_nested_state vmx;
410
411 /* Pad the header to 128 bytes. */
412 __u8 pad[120];
413 };
414
415 __u8 data[0];
416};
417
381#endif /* _ASM_X86_KVM_H */ 418#endif /* _ASM_X86_KVM_H */
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b2ec20e562bd..b455930a3eaf 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
68 [BPF_MAP_TYPE_DEVMAP] = "devmap", 68 [BPF_MAP_TYPE_DEVMAP] = "devmap",
69 [BPF_MAP_TYPE_SOCKMAP] = "sockmap", 69 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
70 [BPF_MAP_TYPE_CPUMAP] = "cpumap", 70 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
71 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
71 [BPF_MAP_TYPE_SOCKHASH] = "sockhash", 72 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
72 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage", 73 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
73}; 74};
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index 1832100d1b27..6d41323be291 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
194 } 194 }
195 195
196 while (argc) { 196 while (argc) {
197 if (argc < 2) 197 if (argc < 2) {
198 BAD_ARG(); 198 BAD_ARG();
199 goto err_close_map;
200 }
199 201
200 if (is_prefix(*argv, "cpu")) { 202 if (is_prefix(*argv, "cpu")) {
201 char *endptr; 203 char *endptr;
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
221 NEXT_ARG(); 223 NEXT_ARG();
222 } else { 224 } else {
223 BAD_ARG(); 225 BAD_ARG();
226 goto err_close_map;
224 } 227 }
225 228
226 do_all = false; 229 do_all = false;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index dbf6e8bd98ba..bbb2a8ef367c 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
286 * Found a match; just move the remaining 286 * Found a match; just move the remaining
287 * entries up. 287 * entries up.
288 */ 288 */
289 if (i == num_records) { 289 if (i == (num_records - 1)) {
290 kvp_file_info[pool].num_records--; 290 kvp_file_info[pool].num_records--;
291 kvp_update_file(pool); 291 kvp_update_file(pool);
292 return 0; 292 return 0;
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 6b0c36a58fcb..e56997288f2b 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -30,9 +30,12 @@ struct task_struct {
30 struct held_lock held_locks[MAX_LOCK_DEPTH]; 30 struct held_lock held_locks[MAX_LOCK_DEPTH];
31 gfp_t lockdep_reclaim_gfp; 31 gfp_t lockdep_reclaim_gfp;
32 int pid; 32 int pid;
33 int state;
33 char comm[17]; 34 char comm[17];
34}; 35};
35 36
37#define TASK_RUNNING 0
38
36extern struct task_struct *__curr(void); 39extern struct task_struct *__curr(void);
37 40
38#define current (__curr()) 41#define current (__curr())
diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/include/linux/nmi.h
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
index 664ced8cb1b0..e907ba6f15e5 100644
--- a/tools/include/tools/libc_compat.h
+++ b/tools/include/tools/libc_compat.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
2/* Copyright (C) 2018 Netronome Systems, Inc. */ 2/* Copyright (C) 2018 Netronome Systems, Inc. */
3 3
4#ifndef __TOOLS_LIBC_COMPAT_H 4#ifndef __TOOLS_LIBC_COMPAT_H
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
734__SYSCALL(__NR_statx, sys_statx) 734__SYSCALL(__NR_statx, sys_statx)
735#define __NR_io_pgetevents 292 735#define __NR_io_pgetevents 292
736__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 736__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
737#define __NR_rseq 293
738__SYSCALL(__NR_rseq, sys_rseq)
737 739
738#undef __NR_syscalls 740#undef __NR_syscalls
739#define __NR_syscalls 293 741#define __NR_syscalls 294
740 742
741/* 743/*
742 * 32 bit systems traditionally used different 744 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
687 */ 687 */
688#define DRM_CLIENT_CAP_ASPECT_RATIO 4 688#define DRM_CLIENT_CAP_ASPECT_RATIO 4
689 689
690/**
691 * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
692 *
693 * If set to 1, the DRM core will expose special connectors to be used for
694 * writing back to memory the scene setup in the commit. Depends on client
695 * also supporting DRM_CLIENT_CAP_ATOMIC
696 */
697#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
698
690/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 699/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
691struct drm_set_client_cap { 700struct drm_set_client_cap {
692 __u64 capability; 701 __u64 capability;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
164 IFLA_CARRIER_UP_COUNT, 164 IFLA_CARRIER_UP_COUNT,
165 IFLA_CARRIER_DOWN_COUNT, 165 IFLA_CARRIER_DOWN_COUNT,
166 IFLA_NEW_IFINDEX, 166 IFLA_NEW_IFINDEX,
167 IFLA_MIN_MTU,
168 IFLA_MAX_MTU,
167 __IFLA_MAX 169 __IFLA_MAX
168}; 170};
169 171
@@ -334,6 +336,7 @@ enum {
334 IFLA_BRPORT_GROUP_FWD_MASK, 336 IFLA_BRPORT_GROUP_FWD_MASK,
335 IFLA_BRPORT_NEIGH_SUPPRESS, 337 IFLA_BRPORT_NEIGH_SUPPRESS,
336 IFLA_BRPORT_ISOLATED, 338 IFLA_BRPORT_ISOLATED,
339 IFLA_BRPORT_BACKUP_PORT,
337 __IFLA_BRPORT_MAX 340 __IFLA_BRPORT_MAX
338}; 341};
339#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) 342#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
459 462
460#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1) 463#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
461 464
465/* XFRM section */
466enum {
467 IFLA_XFRM_UNSPEC,
468 IFLA_XFRM_LINK,
469 IFLA_XFRM_IF_ID,
470 __IFLA_XFRM_MAX
471};
472
473#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
474
462enum macsec_validation_type { 475enum macsec_validation_type {
463 MACSEC_VALIDATE_DISABLED = 0, 476 MACSEC_VALIDATE_DISABLED = 0,
464 MACSEC_VALIDATE_CHECK = 1, 477 MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
920 XDP_ATTACHED_DRV, 933 XDP_ATTACHED_DRV,
921 XDP_ATTACHED_SKB, 934 XDP_ATTACHED_SKB,
922 XDP_ATTACHED_HW, 935 XDP_ATTACHED_HW,
936 XDP_ATTACHED_MULTI,
923}; 937};
924 938
925enum { 939enum {
@@ -928,6 +942,9 @@ enum {
928 IFLA_XDP_ATTACHED, 942 IFLA_XDP_ATTACHED,
929 IFLA_XDP_FLAGS, 943 IFLA_XDP_FLAGS,
930 IFLA_XDP_PROG_ID, 944 IFLA_XDP_PROG_ID,
945 IFLA_XDP_DRV_PROG_ID,
946 IFLA_XDP_SKB_PROG_ID,
947 IFLA_XDP_HW_PROG_ID,
931 __IFLA_XDP_MAX, 948 __IFLA_XDP_MAX,
932}; 949};
933 950
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index b6270a3b38e9..07548de5c988 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt {
949#define KVM_CAP_GET_MSR_FEATURES 153 949#define KVM_CAP_GET_MSR_FEATURES 153
950#define KVM_CAP_HYPERV_EVENTFD 154 950#define KVM_CAP_HYPERV_EVENTFD 154
951#define KVM_CAP_HYPERV_TLBFLUSH 155 951#define KVM_CAP_HYPERV_TLBFLUSH 155
952#define KVM_CAP_S390_HPAGE_1M 156
953#define KVM_CAP_NESTED_STATE 157
954#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
952 955
953#ifdef KVM_CAP_IRQ_ROUTING 956#ifdef KVM_CAP_IRQ_ROUTING
954 957
@@ -1391,6 +1394,9 @@ struct kvm_enc_region {
1391/* Available with KVM_CAP_HYPERV_EVENTFD */ 1394/* Available with KVM_CAP_HYPERV_EVENTFD */
1392#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) 1395#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
1393 1396
1397/* Available with KVM_CAP_NESTED_STATE */
1398#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
1399#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
1394 1400
1395/* Secure Encrypted Virtualization command */ 1401/* Secure Encrypted Virtualization command */
1396enum sev_cmd_id { 1402enum sev_cmd_id {
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index eeb787b1c53c..f35eb72739c0 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146 146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, 147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
148}; 148};
149 149
150/* 150/*
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index c51f8e5cc608..84c3de89696a 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
65}; 65};
66 66
67#define VHOST_IOTLB_MSG 0x1 67#define VHOST_IOTLB_MSG 0x1
68#define VHOST_IOTLB_MSG_V2 0x2
68 69
69struct vhost_msg { 70struct vhost_msg {
70 int type; 71 int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
74 }; 75 };
75}; 76};
76 77
78struct vhost_msg_v2 {
79 __u32 type;
80 __u32 reserved;
81 union {
82 struct vhost_iotlb_msg iotlb;
83 __u8 padding[64];
84 };
85};
86
77struct vhost_memory_region { 87struct vhost_memory_region {
78 __u64 guest_phys_addr; 88 __u64 guest_phys_addr;
79 __u64 memory_size; /* bytes */ 89 __u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
160#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \ 170#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
161 struct vhost_vring_state) 171 struct vhost_vring_state)
162 172
173/* Set or get vhost backend capability */
174
175/* Use message type V2 */
176#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
177
178#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
179#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
180
163/* VHOST_NET specific defines */ 181/* VHOST_NET specific defines */
164 182
165/* Attach virtio net ring to a raw socket, or tap device. 183/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 56c4b3f8a01b..439b8a27488d 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
759 if len(vms) == 0: 759 if len(vms) == 0:
760 self.do_read = False 760 self.do_read = False
761 761
762 self.paths = filter(lambda x: "{}-".format(pid) in x, vms) 762 self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
763 763
764 else: 764 else:
765 self.paths = [] 765 self.paths = []
766 self.do_read = True 766 self.do_read = True
767 self.reset() 767
768 def _verify_paths(self):
769 """Remove invalid paths"""
770 for path in self.paths:
771 if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
772 self.paths.remove(path)
773 continue
768 774
769 def read(self, reset=0, by_guest=0): 775 def read(self, reset=0, by_guest=0):
770 """Returns a dict with format:'file name / field -> current value'. 776 """Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
780 # If no debugfs filtering support is available, then don't read. 786 # If no debugfs filtering support is available, then don't read.
781 if not self.do_read: 787 if not self.do_read:
782 return results 788 return results
789 self._verify_paths()
783 790
784 paths = self.paths 791 paths = self.paths
785 if self._pid == 0: 792 if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
1096 pid = self.stats.pid_filter 1103 pid = self.stats.pid_filter
1097 self.screen.erase() 1104 self.screen.erase()
1098 gname = self.get_gname_from_pid(pid) 1105 gname = self.get_gname_from_pid(pid)
1106 self._gname = gname
1099 if gname: 1107 if gname:
1100 gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...' 1108 gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
1101 if len(gname) > MAX_GUEST_NAME_LEN 1109 if len(gname) > MAX_GUEST_NAME_LEN
1102 else gname)) 1110 else gname))
1103 if pid > 0: 1111 if pid > 0:
1104 self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}' 1112 self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
1105 .format(pid, gname), curses.A_BOLD)
1106 else: 1113 else:
1107 self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD) 1114 self._headline = 'kvm statistics - summary'
1115 self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
1108 if self.stats.fields_filter: 1116 if self.stats.fields_filter:
1109 regex = self.stats.fields_filter 1117 regex = self.stats.fields_filter
1110 if len(regex) > MAX_REGEX_LEN: 1118 if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
1162 1170
1163 return sorted_items 1171 return sorted_items
1164 1172
1173 if not self._is_running_guest(self.stats.pid_filter):
1174 if self._gname:
1175 try: # ...to identify the guest by name in case it's back
1176 pids = self.get_pid_from_gname(self._gname)
1177 if len(pids) == 1:
1178 self._refresh_header(pids[0])
1179 self._update_pid(pids[0])
1180 return
1181 except:
1182 pass
1183 self._display_guest_dead()
1184 # leave final data on screen
1185 return
1165 row = 3 1186 row = 3
1166 self.screen.move(row, 0) 1187 self.screen.move(row, 0)
1167 self.screen.clrtobot() 1188 self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
1184 # print events 1205 # print events
1185 tavg = 0 1206 tavg = 0
1186 tcur = 0 1207 tcur = 0
1208 guest_removed = False
1187 for key, values in get_sorted_events(self, stats): 1209 for key, values in get_sorted_events(self, stats):
1188 if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0): 1210 if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
1189 break 1211 break
@@ -1191,7 +1213,10 @@ class Tui(object):
1191 key = self.get_gname_from_pid(key) 1213 key = self.get_gname_from_pid(key)
1192 if not key: 1214 if not key:
1193 continue 1215 continue
1194 cur = int(round(values.delta / sleeptime)) if values.delta else '' 1216 cur = int(round(values.delta / sleeptime)) if values.delta else 0
1217 if cur < 0:
1218 guest_removed = True
1219 continue
1195 if key[0] != ' ': 1220 if key[0] != ' ':
1196 if values.delta: 1221 if values.delta:
1197 tcur += values.delta 1222 tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
1204 values.value * 100 / float(ltotal), cur)) 1229 values.value * 100 / float(ltotal), cur))
1205 row += 1 1230 row += 1
1206 if row == 3: 1231 if row == 3:
1207 self.screen.addstr(4, 1, 'No matching events reported yet') 1232 if guest_removed:
1233 self.screen.addstr(4, 1, 'Guest removed, updating...')
1234 else:
1235 self.screen.addstr(4, 1, 'No matching events reported yet')
1208 if row > 4: 1236 if row > 4:
1209 tavg = int(round(tcur / sleeptime)) if tcur > 0 else '' 1237 tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
1210 self.screen.addstr(row, 1, '%-40s %10d %8s' % 1238 self.screen.addstr(row, 1, '%-40s %10d %8s' %
1211 ('Total', total, tavg), curses.A_BOLD) 1239 ('Total', total, tavg), curses.A_BOLD)
1212 self.screen.refresh() 1240 self.screen.refresh()
1213 1241
1242 def _display_guest_dead(self):
1243 marker = ' Guest is DEAD '
1244 y = min(len(self._headline), 80 - len(marker))
1245 self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
1246
1214 def _show_msg(self, text): 1247 def _show_msg(self, text):
1215 """Display message centered text and exit on key press""" 1248 """Display message centered text and exit on key press"""
1216 hint = 'Press any key to continue' 1249 hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
1219 (x, term_width) = self.screen.getmaxyx() 1252 (x, term_width) = self.screen.getmaxyx()
1220 row = 2 1253 row = 2
1221 for line in text: 1254 for line in text:
1222 start = (term_width - len(line)) / 2 1255 start = (term_width - len(line)) // 2
1223 self.screen.addstr(row, start, line) 1256 self.screen.addstr(row, start, line)
1224 row += 1 1257 row += 1
1225 self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint, 1258 self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
1226 curses.A_STANDOUT) 1259 curses.A_STANDOUT)
1227 self.screen.getkey() 1260 self.screen.getkey()
1228 1261
@@ -1319,6 +1352,12 @@ class Tui(object):
1319 msg = '"' + str(val) + '": Invalid value' 1352 msg = '"' + str(val) + '": Invalid value'
1320 self._refresh_header() 1353 self._refresh_header()
1321 1354
1355 def _is_running_guest(self, pid):
1356 """Check if pid is still a running process."""
1357 if not pid:
1358 return True
1359 return os.path.isdir(os.path.join('/proc/', str(pid)))
1360
1322 def _show_vm_selection_by_guest(self): 1361 def _show_vm_selection_by_guest(self):
1323 """Draws guest selection mask. 1362 """Draws guest selection mask.
1324 1363
@@ -1346,7 +1385,7 @@ class Tui(object):
1346 if not guest or guest == '0': 1385 if not guest or guest == '0':
1347 break 1386 break
1348 if guest.isdigit(): 1387 if guest.isdigit():
1349 if not os.path.isdir(os.path.join('/proc/', guest)): 1388 if not self._is_running_guest(guest):
1350 msg = '"' + guest + '": Not a running process' 1389 msg = '"' + guest + '": Not a running process'
1351 continue 1390 continue
1352 pid = int(guest) 1391 pid = int(guest)
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 13a861135127..6eb9bacd1948 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2abd0f112627..bdb94939fd60 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -50,6 +50,7 @@
50#include "libbpf.h" 50#include "libbpf.h"
51#include "bpf.h" 51#include "bpf.h"
52#include "btf.h" 52#include "btf.h"
53#include "str_error.h"
53 54
54#ifndef EM_BPF 55#ifndef EM_BPF
55#define EM_BPF 247 56#define EM_BPF 247
@@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
469 obj->efile.fd = open(obj->path, O_RDONLY); 470 obj->efile.fd = open(obj->path, O_RDONLY);
470 if (obj->efile.fd < 0) { 471 if (obj->efile.fd < 0) {
471 char errmsg[STRERR_BUFSIZE]; 472 char errmsg[STRERR_BUFSIZE];
472 char *cp = strerror_r(errno, errmsg, sizeof(errmsg)); 473 char *cp = str_error(errno, errmsg, sizeof(errmsg));
473 474
474 pr_warning("failed to open %s: %s\n", obj->path, cp); 475 pr_warning("failed to open %s: %s\n", obj->path, cp);
475 return -errno; 476 return -errno;
@@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
810 data->d_size, name, idx); 811 data->d_size, name, idx);
811 if (err) { 812 if (err) {
812 char errmsg[STRERR_BUFSIZE]; 813 char errmsg[STRERR_BUFSIZE];
813 char *cp = strerror_r(-err, errmsg, 814 char *cp = str_error(-err, errmsg, sizeof(errmsg));
814 sizeof(errmsg));
815 815
816 pr_warning("failed to alloc program %s (%s): %s", 816 pr_warning("failed to alloc program %s (%s): %s",
817 name, obj->path, cp); 817 name, obj->path, cp);
@@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj)
1140 1140
1141 *pfd = bpf_create_map_xattr(&create_attr); 1141 *pfd = bpf_create_map_xattr(&create_attr);
1142 if (*pfd < 0 && create_attr.btf_key_type_id) { 1142 if (*pfd < 0 && create_attr.btf_key_type_id) {
1143 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1143 cp = str_error(errno, errmsg, sizeof(errmsg));
1144 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1144 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1145 map->name, cp, errno); 1145 map->name, cp, errno);
1146 create_attr.btf_fd = 0; 1146 create_attr.btf_fd = 0;
@@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj)
1155 size_t j; 1155 size_t j;
1156 1156
1157 err = *pfd; 1157 err = *pfd;
1158 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1158 cp = str_error(errno, errmsg, sizeof(errmsg));
1159 pr_warning("failed to create map (name: '%s'): %s\n", 1159 pr_warning("failed to create map (name: '%s'): %s\n",
1160 map->name, cp); 1160 map->name, cp);
1161 for (j = 0; j < i; j++) 1161 for (j = 0; j < i; j++)
@@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1339 } 1339 }
1340 1340
1341 ret = -LIBBPF_ERRNO__LOAD; 1341 ret = -LIBBPF_ERRNO__LOAD;
1342 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1342 cp = str_error(errno, errmsg, sizeof(errmsg));
1343 pr_warning("load bpf program failed: %s\n", cp); 1343 pr_warning("load bpf program failed: %s\n", cp);
1344 1344
1345 if (log_buf && log_buf[0] != '\0') { 1345 if (log_buf && log_buf[0] != '\0') {
@@ -1654,7 +1654,7 @@ static int check_path(const char *path)
1654 1654
1655 dir = dirname(dname); 1655 dir = dirname(dname);
1656 if (statfs(dir, &st_fs)) { 1656 if (statfs(dir, &st_fs)) {
1657 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1657 cp = str_error(errno, errmsg, sizeof(errmsg));
1658 pr_warning("failed to statfs %s: %s\n", dir, cp); 1658 pr_warning("failed to statfs %s: %s\n", dir, cp);
1659 err = -errno; 1659 err = -errno;
1660 } 1660 }
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1690 } 1690 }
1691 1691
1692 if (bpf_obj_pin(prog->instances.fds[instance], path)) { 1692 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1693 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1693 cp = str_error(errno, errmsg, sizeof(errmsg));
1694 pr_warning("failed to pin program: %s\n", cp); 1694 pr_warning("failed to pin program: %s\n", cp);
1695 return -errno; 1695 return -errno;
1696 } 1696 }
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path)
1708 err = -errno; 1708 err = -errno;
1709 1709
1710 if (err) { 1710 if (err) {
1711 cp = strerror_r(-err, errmsg, sizeof(errmsg)); 1711 cp = str_error(-err, errmsg, sizeof(errmsg));
1712 pr_warning("failed to mkdir %s: %s\n", path, cp); 1712 pr_warning("failed to mkdir %s: %s\n", path, cp);
1713 } 1713 }
1714 return err; 1714 return err;
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
1770 } 1770 }
1771 1771
1772 if (bpf_obj_pin(map->fd, path)) { 1772 if (bpf_obj_pin(map->fd, path)) {
1773 cp = strerror_r(errno, errmsg, sizeof(errmsg)); 1773 cp = str_error(errno, errmsg, sizeof(errmsg));
1774 pr_warning("failed to pin map: %s\n", cp); 1774 pr_warning("failed to pin map: %s\n", cp);
1775 return -errno; 1775 return -errno;
1776 } 1776 }
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
new file mode 100644
index 000000000000..b8798114a357
--- /dev/null
+++ b/tools/lib/bpf/str_error.c
@@ -0,0 +1,18 @@
1// SPDX-License-Identifier: LGPL-2.1
2#undef _GNU_SOURCE
3#include <string.h>
4#include <stdio.h>
5#include "str_error.h"
6
7/*
8 * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
9 * libc, while checking strerror_r() return to avoid having to check this in
10 * all places calling it.
11 */
12char *str_error(int err, char *dst, int len)
13{
14 int ret = strerror_r(err, dst, len);
15 if (ret)
16 snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
17 return dst;
18}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
new file mode 100644
index 000000000000..355b1db571d1
--- /dev/null
+++ b/tools/lib/bpf/str_error.h
@@ -0,0 +1,6 @@
1// SPDX-License-Identifier: LGPL-2.1
2#ifndef BPF_STR_ERROR
3#define BPF_STR_ERROR
4
5char *str_error(int err, char *dst, int len);
6#endif // BPF_STR_ERROR
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 42261a9b280e..ac841bc5c35b 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
280 mv $@+ $@ 280 mv $@+ $@
281 281
282ifdef USE_ASCIIDOCTOR 282ifdef USE_ASCIIDOCTOR
283$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt 283$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt
284 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \ 284 $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
285 $(ASCIIDOC) -b manpage -d manpage \ 285 $(ASCIIDOC) -b manpage -d manpage \
286 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \ 286 $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index b3d1b12a5081..5224ade3d5af 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -777,14 +777,12 @@ endif
777 $(call QUIET_INSTALL, libexec) \ 777 $(call QUIET_INSTALL, libexec) \
778 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 778 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
779ifndef NO_LIBBPF 779ifndef NO_LIBBPF
780 $(call QUIET_INSTALL, lib) \ 780 $(call QUIET_INSTALL, bpf-headers) \
781 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' 781 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
782 $(call QUIET_INSTALL, include/bpf) \ 782 $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
783 $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf' 783 $(call QUIET_INSTALL, bpf-examples) \
784 $(call QUIET_INSTALL, lib) \ 784 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
785 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf' 785 $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
786 $(call QUIET_INSTALL, examples/bpf) \
787 $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
788endif 786endif
789 $(call QUIET_INSTALL, perf-archive) \ 787 $(call QUIET_INSTALL, perf-archive) \
790 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)' 788 $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index f013b115dc86..dbef716a1913 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
11 11
12out := $(OUTPUT)arch/arm64/include/generated/asm 12out := $(OUTPUT)arch/arm64/include/generated/asm
13header := $(out)/syscalls.c 13header := $(out)/syscalls.c
14sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h 14incpath := $(srctree)/tools
15sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
15sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/ 16sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
16systbl := $(sysprf)/mksyscalltbl 17systbl := $(sysprf)/mksyscalltbl
17 18
@@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl
19_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') 20_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
20 21
21$(header): $(sysdef) $(systbl) 22$(header): $(sysdef) $(systbl)
22 $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@ 23 $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
23 24
24clean:: 25clean::
25 $(call QUIET_CLEAN, arm64) $(RM) $(header) 26 $(call QUIET_CLEAN, arm64) $(RM) $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
index 52e197317d3e..2dbb8cade048 100755
--- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -11,7 +11,8 @@
11 11
12gcc=$1 12gcc=$1
13hostcc=$2 13hostcc=$2
14input=$3 14incpath=$3
15input=$4
15 16
16if ! test -r $input; then 17if ! test -r $input; then
17 echo "Could not read input file" >&2 18 echo "Could not read input file" >&2
@@ -28,7 +29,6 @@ create_table_from_c()
28 29
29 cat <<-_EoHEADER 30 cat <<-_EoHEADER
30 #include <stdio.h> 31 #include <stdio.h>
31 #define __ARCH_WANT_RENAMEAT
32 #include "$input" 32 #include "$input"
33 int main(int argc, char *argv[]) 33 int main(int argc, char *argv[])
34 { 34 {
@@ -42,7 +42,7 @@ create_table_from_c()
42 printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);" 42 printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
43 printf "}\n" 43 printf "}\n"
44 44
45 } | $hostcc -o $create_table_exe -x c - 45 } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
46 46
47 $create_table_exe 47 $create_table_exe
48 48
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 20e7d74d86cd..10a44e946f77 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
22 22
23#endif 23#endif
24 24
25#if !defined(_CALL_ELF) || _CALL_ELF != 2
26int arch__choose_best_symbol(struct symbol *syma, 25int arch__choose_best_symbol(struct symbol *syma,
27 struct symbol *symb __maybe_unused) 26 struct symbol *symb __maybe_unused)
28{ 27{
29 char *sym = syma->name; 28 char *sym = syma->name;
30 29
30#if !defined(_CALL_ELF) || _CALL_ELF != 2
31 /* Skip over any initial dot */ 31 /* Skip over any initial dot */
32 if (*sym == '.') 32 if (*sym == '.')
33 sym++; 33 sym++;
34#endif
34 35
35 /* Avoid "SyS" kernel syscall aliases */ 36 /* Avoid "SyS" kernel syscall aliases */
36 if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3)) 37 if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
41 return SYMBOL_A; 42 return SYMBOL_A;
42} 43}
43 44
45#if !defined(_CALL_ELF) || _CALL_ELF != 2
44/* Allow matching against dot variants */ 46/* Allow matching against dot variants */
45int arch__compare_symbol_names(const char *namea, const char *nameb) 47int arch__compare_symbol_names(const char *namea, const char *nameb)
46{ 48{
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index c1bd979b957b..613709cfbbd0 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -9,6 +9,7 @@ struct test;
9int test__rdpmc(struct test *test __maybe_unused, int subtest); 9int test__rdpmc(struct test *test __maybe_unused, int subtest);
10int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest); 10int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
11int test__insn_x86(struct test *test __maybe_unused, int subtest); 11int test__insn_x86(struct test *test __maybe_unused, int subtest);
12int test__bp_modify(struct test *test, int subtest);
12 13
13#ifdef HAVE_DWARF_UNWIND_SUPPORT 14#ifdef HAVE_DWARF_UNWIND_SUPPORT
14struct thread; 15struct thread;
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 8e2c5a38c3b9..586849ff83a0 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -5,3 +5,4 @@ libperf-y += arch-tests.o
5libperf-y += rdpmc.o 5libperf-y += rdpmc.o
6libperf-y += perf-time-to-tsc.o 6libperf-y += perf-time-to-tsc.o
7libperf-$(CONFIG_AUXTRACE) += insn-x86.o 7libperf-$(CONFIG_AUXTRACE) += insn-x86.o
8libperf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index cc1802ff5410..d47d3f8e3c8e 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -24,6 +24,12 @@ struct test arch_tests[] = {
24 .func = test__insn_x86, 24 .func = test__insn_x86,
25 }, 25 },
26#endif 26#endif
27#if defined(__x86_64__)
28 {
29 .desc = "x86 bp modify",
30 .func = test__bp_modify,
31 },
32#endif
27 { 33 {
28 .func = NULL, 34 .func = NULL,
29 }, 35 },
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
new file mode 100644
index 000000000000..f53e4406709f
--- /dev/null
+++ b/tools/perf/arch/x86/tests/bp-modify.c
@@ -0,0 +1,213 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3#include <sys/types.h>
4#include <sys/wait.h>
5#include <sys/user.h>
6#include <syscall.h>
7#include <unistd.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <sys/ptrace.h>
11#include <asm/ptrace.h>
12#include <errno.h>
13#include "debug.h"
14#include "tests/tests.h"
15#include "arch-tests.h"
16
17static noinline int bp_1(void)
18{
19 pr_debug("in %s\n", __func__);
20 return 0;
21}
22
23static noinline int bp_2(void)
24{
25 pr_debug("in %s\n", __func__);
26 return 0;
27}
28
29static int spawn_child(void)
30{
31 int child = fork();
32
33 if (child == 0) {
34 /*
35 * The child sets itself for as tracee and
36 * waits in signal for parent to trace it,
37 * then it calls bp_1 and quits.
38 */
39 int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL);
40
41 if (err) {
42 pr_debug("failed to PTRACE_TRACEME\n");
43 exit(1);
44 }
45
46 raise(SIGCONT);
47 bp_1();
48 exit(0);
49 }
50
51 return child;
52}
53
54/*
55 * This tests creates HW breakpoint, tries to
56 * change it and checks it was properly changed.
57 */
58static int bp_modify1(void)
59{
60 pid_t child;
61 int status;
62 unsigned long rip = 0, dr7 = 1;
63
64 child = spawn_child();
65
66 waitpid(child, &status, 0);
67 if (WIFEXITED(status)) {
68 pr_debug("tracee exited prematurely 1\n");
69 return TEST_FAIL;
70 }
71
72 /*
73 * The parent does following steps:
74 * - creates a new breakpoint (id 0) for bp_2 function
75 * - changes that breakponit to bp_1 function
76 * - waits for the breakpoint to hit and checks
77 * it has proper rip of bp_1 function
78 * - detaches the child
79 */
80 if (ptrace(PTRACE_POKEUSER, child,
81 offsetof(struct user, u_debugreg[0]), bp_2)) {
82 pr_debug("failed to set breakpoint, 1st time: %s\n",
83 strerror(errno));
84 goto out;
85 }
86
87 if (ptrace(PTRACE_POKEUSER, child,
88 offsetof(struct user, u_debugreg[0]), bp_1)) {
89 pr_debug("failed to set breakpoint, 2nd time: %s\n",
90 strerror(errno));
91 goto out;
92 }
93
94 if (ptrace(PTRACE_POKEUSER, child,
95 offsetof(struct user, u_debugreg[7]), dr7)) {
96 pr_debug("failed to set dr7: %s\n", strerror(errno));
97 goto out;
98 }
99
100 if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
101 pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
102 goto out;
103 }
104
105 waitpid(child, &status, 0);
106 if (WIFEXITED(status)) {
107 pr_debug("tracee exited prematurely 2\n");
108 return TEST_FAIL;
109 }
110
111 rip = ptrace(PTRACE_PEEKUSER, child,
112 offsetof(struct user_regs_struct, rip), NULL);
113 if (rip == (unsigned long) -1) {
114 pr_debug("failed to PTRACE_PEEKUSER: %s\n",
115 strerror(errno));
116 goto out;
117 }
118
119 pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
120
121out:
122 if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
123 pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
124 return TEST_FAIL;
125 }
126
127 return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
128}
129
130/*
131 * This tests creates HW breakpoint, tries to
132 * change it to bogus value and checks the original
133 * breakpoint is hit.
134 */
135static int bp_modify2(void)
136{
137 pid_t child;
138 int status;
139 unsigned long rip = 0, dr7 = 1;
140
141 child = spawn_child();
142
143 waitpid(child, &status, 0);
144 if (WIFEXITED(status)) {
145 pr_debug("tracee exited prematurely 1\n");
146 return TEST_FAIL;
147 }
148
149 /*
150 * The parent does following steps:
151 * - creates a new breakpoint (id 0) for bp_1 function
152 * - tries to change that breakpoint to (-1) address
153 * - waits for the breakpoint to hit and checks
154 * it has proper rip of bp_1 function
155 * - detaches the child
156 */
157 if (ptrace(PTRACE_POKEUSER, child,
158 offsetof(struct user, u_debugreg[0]), bp_1)) {
159 pr_debug("failed to set breakpoint: %s\n",
160 strerror(errno));
161 goto out;
162 }
163
164 if (ptrace(PTRACE_POKEUSER, child,
165 offsetof(struct user, u_debugreg[7]), dr7)) {
166 pr_debug("failed to set dr7: %s\n", strerror(errno));
167 goto out;
168 }
169
170 if (!ptrace(PTRACE_POKEUSER, child,
171 offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
172 pr_debug("failed, breakpoint set to bogus address\n");
173 goto out;
174 }
175
176 if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
177 pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
178 goto out;
179 }
180
181 waitpid(child, &status, 0);
182 if (WIFEXITED(status)) {
183 pr_debug("tracee exited prematurely 2\n");
184 return TEST_FAIL;
185 }
186
187 rip = ptrace(PTRACE_PEEKUSER, child,
188 offsetof(struct user_regs_struct, rip), NULL);
189 if (rip == (unsigned long) -1) {
190 pr_debug("failed to PTRACE_PEEKUSER: %s\n",
191 strerror(errno));
192 goto out;
193 }
194
195 pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
196
197out:
198 if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
199 pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
200 return TEST_FAIL;
201 }
202
203 return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
204}
205
206int test__bp_modify(struct test *test __maybe_unused,
207 int subtest __maybe_unused)
208{
209 TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
210 TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2());
211
212 return 0;
213}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 20061cf42288..28cd6a17491b 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -246,8 +246,14 @@ find_target:
246 246
247indirect_call: 247indirect_call:
248 tok = strchr(endptr, '*'); 248 tok = strchr(endptr, '*');
249 if (tok != NULL) 249 if (tok != NULL) {
250 ops->target.addr = strtoull(tok + 1, NULL, 16); 250 endptr++;
251
252 /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
253 * Do not parse such instruction. */
254 if (strstr(endptr, "(%r") == NULL)
255 ops->target.addr = strtoull(endptr, NULL, 16);
256 }
251 goto find_target; 257 goto find_target;
252} 258}
253 259
@@ -276,7 +282,19 @@ bool ins__is_call(const struct ins *ins)
276 return ins->ops == &call_ops || ins->ops == &s390_call_ops; 282 return ins->ops == &call_ops || ins->ops == &s390_call_ops;
277} 283}
278 284
279static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms) 285/*
286 * Prevents from matching commas in the comment section, e.g.:
287 * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
288 */
289static inline const char *validate_comma(const char *c, struct ins_operands *ops)
290{
291 if (ops->raw_comment && c > ops->raw_comment)
292 return NULL;
293
294 return c;
295}
296
297static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
280{ 298{
281 struct map *map = ms->map; 299 struct map *map = ms->map;
282 struct symbol *sym = ms->sym; 300 struct symbol *sym = ms->sym;
@@ -285,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
285 }; 303 };
286 const char *c = strchr(ops->raw, ','); 304 const char *c = strchr(ops->raw, ',');
287 u64 start, end; 305 u64 start, end;
306
307 ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
308 c = validate_comma(c, ops);
309
288 /* 310 /*
289 * Examples of lines to parse for the _cpp_lex_token@@Base 311 * Examples of lines to parse for the _cpp_lex_token@@Base
290 * function: 312 * function:
@@ -304,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
304 ops->target.addr = strtoull(c, NULL, 16); 326 ops->target.addr = strtoull(c, NULL, 16);
305 if (!ops->target.addr) { 327 if (!ops->target.addr) {
306 c = strchr(c, ','); 328 c = strchr(c, ',');
329 c = validate_comma(c, ops);
307 if (c++ != NULL) 330 if (c++ != NULL)
308 ops->target.addr = strtoull(c, NULL, 16); 331 ops->target.addr = strtoull(c, NULL, 16);
309 } 332 }
@@ -361,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
361 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name); 384 return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
362 385
363 c = strchr(ops->raw, ','); 386 c = strchr(ops->raw, ',');
387 c = validate_comma(c, ops);
388
364 if (c != NULL) { 389 if (c != NULL) {
365 const char *c2 = strchr(c + 1, ','); 390 const char *c2 = strchr(c + 1, ',');
366 391
392 c2 = validate_comma(c2, ops);
367 /* check for 3-op insn */ 393 /* check for 3-op insn */
368 if (c2 != NULL) 394 if (c2 != NULL)
369 c = c2; 395 c = c2;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 005a5fe8a8c6..5399ba2321bb 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -22,6 +22,7 @@ struct ins {
22 22
23struct ins_operands { 23struct ins_operands {
24 char *raw; 24 char *raw;
25 char *raw_comment;
25 struct { 26 struct {
26 char *raw; 27 char *raw;
27 char *name; 28 char *name;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c980bbff6353..1a61628a1c12 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
251{ 251{
252 struct perf_evsel *evsel = zalloc(perf_evsel__object.size); 252 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
253 253
254 if (evsel != NULL) 254 if (!evsel)
255 perf_evsel__init(evsel, attr, idx); 255 return NULL;
256 perf_evsel__init(evsel, attr, idx);
256 257
257 if (perf_evsel__is_bpf_output(evsel)) { 258 if (perf_evsel__is_bpf_output(evsel)) {
258 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | 259 evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 36d0763311ef..6a6929f208b4 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -576,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
576 return NULL; 576 return NULL;
577} 577}
578 578
579static bool map__contains_symbol(struct map *map, struct symbol *sym)
580{
581 u64 ip = map->unmap_ip(map, sym->start);
582
583 return ip >= map->start && ip < map->end;
584}
585
579struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 586struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
580 struct map **mapp) 587 struct map **mapp)
581{ 588{
@@ -591,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
591 598
592 if (sym == NULL) 599 if (sym == NULL)
593 continue; 600 continue;
601 if (!map__contains_symbol(pos, sym)) {
602 sym = NULL;
603 continue;
604 }
594 if (mapp != NULL) 605 if (mapp != NULL)
595 *mapp = pos; 606 *mapp = pos;
596 goto out; 607 goto out;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index c85d0d1a65ed..7b0ca7cbb7de 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -377,7 +377,7 @@ out:
377 377
378static int record_saved_cmdline(void) 378static int record_saved_cmdline(void)
379{ 379{
380 unsigned int size; 380 unsigned long long size;
381 char *path; 381 char *path;
382 struct stat st; 382 struct stat st;
383 int ret, err = 0; 383 int ret, err = 0;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 920b1d58a068..e76214f8d596 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -164,16 +164,15 @@ void parse_ftrace_printk(struct tep_handle *pevent,
164void parse_saved_cmdline(struct tep_handle *pevent, 164void parse_saved_cmdline(struct tep_handle *pevent,
165 char *file, unsigned int size __maybe_unused) 165 char *file, unsigned int size __maybe_unused)
166{ 166{
167 char *comm; 167 char comm[17]; /* Max comm length in the kernel is 16. */
168 char *line; 168 char *line;
169 char *next = NULL; 169 char *next = NULL;
170 int pid; 170 int pid;
171 171
172 line = strtok_r(file, "\n", &next); 172 line = strtok_r(file, "\n", &next);
173 while (line) { 173 while (line) {
174 sscanf(line, "%d %ms", &pid, &comm); 174 if (sscanf(line, "%d %16s", &pid, comm) == 2)
175 tep_register_comm(pevent, comm, pid); 175 tep_register_comm(pevent, comm, pid);
176 free(comm);
177 line = strtok_r(NULL, "\n", &next); 176 line = strtok_r(NULL, "\n", &next);
178 } 177 }
179} 178}
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 72c25a3cb658..d9a725478375 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
6 6
7include ../lib.mk 7include ../lib.mk
8 8
9all: 9all: khdr
10 @for DIR in $(SUBDIRS); do \ 10 @for DIR in $(SUBDIRS); do \
11 BUILD_TARGET=$(OUTPUT)/$$DIR; \ 11 BUILD_TARGET=$(OUTPUT)/$$DIR; \
12 mkdir $$BUILD_TARGET -p; \ 12 mkdir $$BUILD_TARGET -p; \
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config
index b4ad748a9dd9..b4ad748a9dd9 100644
--- a/tools/testing/selftests/android/ion/config
+++ b/tools/testing/selftests/android/config
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
index e03695287f76..88cfe88e466f 100644
--- a/tools/testing/selftests/android/ion/Makefile
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c
10 10
11TEST_PROGS := ion_test.sh 11TEST_PROGS := ion_test.sh
12 12
13KSFT_KHDR_INSTALL := 1
14top_srcdir = ../../../../..
13include ../../lib.mk 15include ../../lib.mk
14 16
15$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c 17$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 6f54f84144a0..9b552c0fc47d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
580 /* Test update without programs */ 580 /* Test update without programs */
581 for (i = 0; i < 6; i++) { 581 for (i = 0; i < 6; i++) {
582 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); 582 err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
583 if (err) { 583 if (i < 2 && !err) {
584 printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
585 i, sfd[i]);
586 goto out_sockmap;
587 } else if (i >= 2 && err) {
584 printf("Failed noprog update sockmap '%i:%i'\n", 588 printf("Failed noprog update sockmap '%i:%i'\n",
585 i, sfd[i]); 589 i, sfd[i]);
586 goto out_sockmap; 590 goto out_sockmap;
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
741 } 745 }
742 746
743 /* Test map update elem afterwards fd lives in fd and map_fd */ 747 /* Test map update elem afterwards fd lives in fd and map_fd */
744 for (i = 0; i < 6; i++) { 748 for (i = 2; i < 6; i++) {
745 err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); 749 err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
746 if (err) { 750 if (err) {
747 printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", 751 printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
845 } 849 }
846 850
847 /* Delete the elems without programs */ 851 /* Delete the elems without programs */
848 for (i = 0; i < 6; i++) { 852 for (i = 2; i < 6; i++) {
849 err = bpf_map_delete_elem(fd, &i); 853 err = bpf_map_delete_elem(fd, &i);
850 if (err) { 854 if (err) {
851 printf("Failed delete sockmap %i '%i:%i'\n", 855 printf("Failed delete sockmap %i '%i:%i'\n",
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 95eb3a53c381..adacda50a4b2 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1 +1,2 @@
1test_memcontrol 1test_memcontrol
2test_core
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 1c5d2b2a583b..14c9fe284806 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
89int cg_read_strcmp(const char *cgroup, const char *control, 89int cg_read_strcmp(const char *cgroup, const char *control,
90 const char *expected) 90 const char *expected)
91{ 91{
92 size_t size = strlen(expected) + 1; 92 size_t size;
93 char *buf; 93 char *buf;
94 int ret;
95
96 /* Handle the case of comparing against empty string */
97 if (!expected)
98 size = 32;
99 else
100 size = strlen(expected) + 1;
94 101
95 buf = malloc(size); 102 buf = malloc(size);
96 if (!buf) 103 if (!buf)
97 return -1; 104 return -1;
98 105
99 if (cg_read(cgroup, control, buf, size)) 106 if (cg_read(cgroup, control, buf, size)) {
107 free(buf);
100 return -1; 108 return -1;
109 }
101 110
102 return strcmp(expected, buf); 111 ret = strcmp(expected, buf);
112 free(buf);
113 return ret;
103} 114}
104 115
105int cg_read_strstr(const char *cgroup, const char *control, const char *needle) 116int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
@@ -337,3 +348,24 @@ int is_swap_enabled(void)
337 348
338 return cnt > 1; 349 return cnt > 1;
339} 350}
351
352int set_oom_adj_score(int pid, int score)
353{
354 char path[PATH_MAX];
355 int fd, len;
356
357 sprintf(path, "/proc/%d/oom_score_adj", pid);
358
359 fd = open(path, O_WRONLY | O_APPEND);
360 if (fd < 0)
361 return fd;
362
363 len = dprintf(fd, "%d", score);
364 if (len < 0) {
365 close(fd);
366 return len;
367 }
368
369 close(fd);
370 return 0;
371}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 1ff6f9f1abdc..9ac8b7958f83 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -40,3 +40,4 @@ extern int get_temp_fd(void);
40extern int alloc_pagecache(int fd, size_t size); 40extern int alloc_pagecache(int fd, size_t size);
41extern int alloc_anon(const char *cgroup, void *arg); 41extern int alloc_anon(const char *cgroup, void *arg);
42extern int is_swap_enabled(void); 42extern int is_swap_enabled(void);
43extern int set_oom_adj_score(int pid, int score);
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index cf0bddc9d271..28d321ba311b 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -2,6 +2,7 @@
2#define _GNU_SOURCE 2#define _GNU_SOURCE
3 3
4#include <linux/limits.h> 4#include <linux/limits.h>
5#include <linux/oom.h>
5#include <fcntl.h> 6#include <fcntl.h>
6#include <stdio.h> 7#include <stdio.h>
7#include <stdlib.h> 8#include <stdlib.h>
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
202 return 0; 203 return 0;
203} 204}
204 205
206static int alloc_anon_noexit(const char *cgroup, void *arg)
207{
208 int ppid = getppid();
209
210 if (alloc_anon(cgroup, arg))
211 return -1;
212
213 while (getppid() == ppid)
214 sleep(1);
215
216 return 0;
217}
218
219/*
220 * Wait until processes are killed asynchronously by the OOM killer
221 * If we exceed a timeout, fail.
222 */
223static int cg_test_proc_killed(const char *cgroup)
224{
225 int limit;
226
227 for (limit = 10; limit > 0; limit--) {
228 if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
229 return 0;
230
231 usleep(100000);
232 }
233 return -1;
234}
235
205/* 236/*
206 * First, this test creates the following hierarchy: 237 * First, this test creates the following hierarchy:
207 * A memory.min = 50M, memory.max = 200M 238 * A memory.min = 50M, memory.max = 200M
@@ -964,6 +995,177 @@ cleanup:
964 return ret; 995 return ret;
965} 996}
966 997
998/*
999 * This test disables swapping and tries to allocate anonymous memory
1000 * up to OOM with memory.group.oom set. Then it checks that all
1001 * processes in the leaf (but not the parent) were killed.
1002 */
1003static int test_memcg_oom_group_leaf_events(const char *root)
1004{
1005 int ret = KSFT_FAIL;
1006 char *parent, *child;
1007
1008 parent = cg_name(root, "memcg_test_0");
1009 child = cg_name(root, "memcg_test_0/memcg_test_1");
1010
1011 if (!parent || !child)
1012 goto cleanup;
1013
1014 if (cg_create(parent))
1015 goto cleanup;
1016
1017 if (cg_create(child))
1018 goto cleanup;
1019
1020 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
1021 goto cleanup;
1022
1023 if (cg_write(child, "memory.max", "50M"))
1024 goto cleanup;
1025
1026 if (cg_write(child, "memory.swap.max", "0"))
1027 goto cleanup;
1028
1029 if (cg_write(child, "memory.oom.group", "1"))
1030 goto cleanup;
1031
1032 cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
1033 cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
1034 cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
1035 if (!cg_run(child, alloc_anon, (void *)MB(100)))
1036 goto cleanup;
1037
1038 if (cg_test_proc_killed(child))
1039 goto cleanup;
1040
1041 if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
1042 goto cleanup;
1043
1044 if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
1045 goto cleanup;
1046
1047 ret = KSFT_PASS;
1048
1049cleanup:
1050 if (child)
1051 cg_destroy(child);
1052 if (parent)
1053 cg_destroy(parent);
1054 free(child);
1055 free(parent);
1056
1057 return ret;
1058}
1059
1060/*
1061 * This test disables swapping and tries to allocate anonymous memory
1062 * up to OOM with memory.group.oom set. Then it checks that all
1063 * processes in the parent and leaf were killed.
1064 */
1065static int test_memcg_oom_group_parent_events(const char *root)
1066{
1067 int ret = KSFT_FAIL;
1068 char *parent, *child;
1069
1070 parent = cg_name(root, "memcg_test_0");
1071 child = cg_name(root, "memcg_test_0/memcg_test_1");
1072
1073 if (!parent || !child)
1074 goto cleanup;
1075
1076 if (cg_create(parent))
1077 goto cleanup;
1078
1079 if (cg_create(child))
1080 goto cleanup;
1081
1082 if (cg_write(parent, "memory.max", "80M"))
1083 goto cleanup;
1084
1085 if (cg_write(parent, "memory.swap.max", "0"))
1086 goto cleanup;
1087
1088 if (cg_write(parent, "memory.oom.group", "1"))
1089 goto cleanup;
1090
1091 cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
1092 cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
1093 cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
1094
1095 if (!cg_run(child, alloc_anon, (void *)MB(100)))
1096 goto cleanup;
1097
1098 if (cg_test_proc_killed(child))
1099 goto cleanup;
1100 if (cg_test_proc_killed(parent))
1101 goto cleanup;
1102
1103 ret = KSFT_PASS;
1104
1105cleanup:
1106 if (child)
1107 cg_destroy(child);
1108 if (parent)
1109 cg_destroy(parent);
1110 free(child);
1111 free(parent);
1112
1113 return ret;
1114}
1115
1116/*
1117 * This test disables swapping and tries to allocate anonymous memory
1118 * up to OOM with memory.group.oom set. Then it checks that all
1119 * processes were killed except those set with OOM_SCORE_ADJ_MIN
1120 */
1121static int test_memcg_oom_group_score_events(const char *root)
1122{
1123 int ret = KSFT_FAIL;
1124 char *memcg;
1125 int safe_pid;
1126
1127 memcg = cg_name(root, "memcg_test_0");
1128
1129 if (!memcg)
1130 goto cleanup;
1131
1132 if (cg_create(memcg))
1133 goto cleanup;
1134
1135 if (cg_write(memcg, "memory.max", "50M"))
1136 goto cleanup;
1137
1138 if (cg_write(memcg, "memory.swap.max", "0"))
1139 goto cleanup;
1140
1141 if (cg_write(memcg, "memory.oom.group", "1"))
1142 goto cleanup;
1143
1144 safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
1145 if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
1146 goto cleanup;
1147
1148 cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
1149 if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
1150 goto cleanup;
1151
1152 if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
1153 goto cleanup;
1154
1155 if (kill(safe_pid, SIGKILL))
1156 goto cleanup;
1157
1158 ret = KSFT_PASS;
1159
1160cleanup:
1161 if (memcg)
1162 cg_destroy(memcg);
1163 free(memcg);
1164
1165 return ret;
1166}
1167
1168
967#define T(x) { x, #x } 1169#define T(x) { x, #x }
968struct memcg_test { 1170struct memcg_test {
969 int (*fn)(const char *root); 1171 int (*fn)(const char *root);
@@ -978,6 +1180,9 @@ struct memcg_test {
978 T(test_memcg_oom_events), 1180 T(test_memcg_oom_events),
979 T(test_memcg_swap_max), 1181 T(test_memcg_swap_max),
980 T(test_memcg_sock), 1182 T(test_memcg_sock),
1183 T(test_memcg_oom_group_leaf_events),
1184 T(test_memcg_oom_group_parent_events),
1185 T(test_memcg_oom_group_score_events),
981}; 1186};
982#undef T 1187#undef T
983 1188
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 000000000000..4e151f1005b2
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ff8feca49746..ad1eeb14fda7 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \
18 18
19TEST_PROGS := run.sh 19TEST_PROGS := run.sh
20 20
21top_srcdir = ../../../../..
21include ../../lib.mk 22include ../../lib.mk
22 23
23$(TEST_GEN_FILES): $(HEADERS) 24$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 1bbb47565c55..4665cdbf1a8d 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -21,11 +21,8 @@ endef
21CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ 21CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
22LDLIBS += -lmount -I/usr/include/libmount 22LDLIBS += -lmount -I/usr/include/libmount
23 23
24$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h 24$(BINARIES):| khdr
25$(BINARIES): ../../../gpio/gpio-utils.o
25 26
26../../../gpio/gpio-utils.o: 27../../../gpio/gpio-utils.o:
27 make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio 28 make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
28
29../../../../usr/include/linux/gpio.h:
30 make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
31
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 15e6b75fc3a5..a3edb2c8e43d 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -19,7 +19,6 @@
19#define KSFT_FAIL 1 19#define KSFT_FAIL 1
20#define KSFT_XFAIL 2 20#define KSFT_XFAIL 2
21#define KSFT_XPASS 3 21#define KSFT_XPASS 3
22/* Treat skip as pass */
23#define KSFT_SKIP 4 22#define KSFT_SKIP 4
24 23
25/* counters */ 24/* counters */
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 4202139d81d9..5c34752e1cff 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,4 +1,5 @@
1cr4_cpuid_sync_test 1cr4_cpuid_sync_test
2platform_info_test
2set_sregs_test 3set_sregs_test
3sync_regs_test 4sync_regs_test
4vmx_tsc_adjust_test 5vmx_tsc_adjust_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 03b0f551bedf..ec32dad3c3f0 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m)
6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c 6LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
7LIBKVM_x86_64 = lib/x86.c lib/vmx.c 7LIBKVM_x86_64 = lib/x86.c lib/vmx.c
8 8
9TEST_GEN_PROGS_x86_64 = set_sregs_test 9TEST_GEN_PROGS_x86_64 = platform_info_test
10TEST_GEN_PROGS_x86_64 += set_sregs_test
10TEST_GEN_PROGS_x86_64 += sync_regs_test 11TEST_GEN_PROGS_x86_64 += sync_regs_test
11TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test 12TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
12TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test 13TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
@@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
20LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 21LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
21LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include 22LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
22CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I.. 23CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
23LDFLAGS += -lpthread 24LDFLAGS += -pthread
24 25
25# After inclusion, $(OUTPUT) is defined and 26# After inclusion, $(OUTPUT) is defined and
26# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 27# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
@@ -37,9 +38,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
37$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ) 38$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
38 $(AR) crs $@ $^ 39 $(AR) crs $@ $^
39 40
40$(LINUX_HDR_PATH): 41all: $(STATIC_LIBS)
41 make -C $(top_srcdir) headers_install
42
43all: $(STATIC_LIBS) $(LINUX_HDR_PATH)
44$(TEST_GEN_PROGS): $(STATIC_LIBS) 42$(TEST_GEN_PROGS): $(STATIC_LIBS)
45$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH) 43$(STATIC_LIBS):| khdr
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index bb5a25fb82c6..3acf9a91704c 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -50,6 +50,7 @@ enum vm_mem_backing_src_type {
50}; 50};
51 51
52int kvm_check_cap(long cap); 52int kvm_check_cap(long cap);
53int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
53 54
54struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); 55struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
55void kvm_vm_free(struct kvm_vm *vmp); 56void kvm_vm_free(struct kvm_vm *vmp);
@@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
108 struct kvm_vcpu_events *events); 109 struct kvm_vcpu_events *events);
109void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, 110void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
110 struct kvm_vcpu_events *events); 111 struct kvm_vcpu_events *events);
112uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
113void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
114 uint64_t msr_value);
111 115
112const char *exit_reason_str(unsigned int exit_reason); 116const char *exit_reason_str(unsigned int exit_reason);
113 117
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index e9ba389c48db..6fd8c089cafc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -63,6 +63,29 @@ int kvm_check_cap(long cap)
63 return ret; 63 return ret;
64} 64}
65 65
66/* VM Enable Capability
67 *
68 * Input Args:
69 * vm - Virtual Machine
70 * cap - Capability
71 *
72 * Output Args: None
73 *
74 * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
75 *
76 * Enables a capability (KVM_CAP_*) on the VM.
77 */
78int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
79{
80 int ret;
81
82 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
83 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
84 " rc: %i errno: %i", ret, errno);
85
86 return ret;
87}
88
66static void vm_open(struct kvm_vm *vm, int perm) 89static void vm_open(struct kvm_vm *vm, int perm)
67{ 90{
68 vm->kvm_fd = open(KVM_DEV_PATH, perm); 91 vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1220 ret, errno); 1243 ret, errno);
1221} 1244}
1222 1245
1246/* VCPU Get MSR
1247 *
1248 * Input Args:
1249 * vm - Virtual Machine
1250 * vcpuid - VCPU ID
1251 * msr_index - Index of MSR
1252 *
1253 * Output Args: None
1254 *
1255 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
1256 *
1257 * Get value of MSR for VCPU.
1258 */
1259uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
1260{
1261 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1262 struct {
1263 struct kvm_msrs header;
1264 struct kvm_msr_entry entry;
1265 } buffer = {};
1266 int r;
1267
1268 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1269 buffer.header.nmsrs = 1;
1270 buffer.entry.index = msr_index;
1271 r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
1272 TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
1273 " rc: %i errno: %i", r, errno);
1274
1275 return buffer.entry.data;
1276}
1277
1278/* VCPU Set MSR
1279 *
1280 * Input Args:
1281 * vm - Virtual Machine
1282 * vcpuid - VCPU ID
1283 * msr_index - Index of MSR
1284 * msr_value - New value of MSR
1285 *
1286 * Output Args: None
1287 *
1288 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
1289 *
1290 * Set value of MSR for VCPU.
1291 */
1292void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
1293 uint64_t msr_value)
1294{
1295 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1296 struct {
1297 struct kvm_msrs header;
1298 struct kvm_msr_entry entry;
1299 } buffer = {};
1300 int r;
1301
1302 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1303 memset(&buffer, 0, sizeof(buffer));
1304 buffer.header.nmsrs = 1;
1305 buffer.entry.index = msr_index;
1306 buffer.entry.data = msr_value;
1307 r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
1308 TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
1309 " rc: %i errno: %i", r, errno);
1310}
1311
1223/* VM VCPU Args Set 1312/* VM VCPU Args Set
1224 * 1313 *
1225 * Input Args: 1314 * Input Args:
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
new file mode 100644
index 000000000000..3764e7121265
--- /dev/null
+++ b/tools/testing/selftests/kvm/platform_info_test.c
@@ -0,0 +1,110 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
4 *
5 * Copyright (C) 2018, Google LLC.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2.
8 *
9 * Verifies expected behavior of controlling guest access to
10 * MSR_PLATFORM_INFO.
11 */
12
13#define _GNU_SOURCE /* for program_invocation_short_name */
14#include <fcntl.h>
15#include <stdio.h>
16#include <stdlib.h>
17#include <string.h>
18#include <sys/ioctl.h>
19
20#include "test_util.h"
21#include "kvm_util.h"
22#include "x86.h"
23
24#define VCPU_ID 0
25#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
26
27static void guest_code(void)
28{
29 uint64_t msr_platform_info;
30
31 for (;;) {
32 msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
33 GUEST_SYNC(msr_platform_info);
34 asm volatile ("inc %r11");
35 }
36}
37
38static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
39{
40 struct kvm_enable_cap cap = {};
41
42 cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
43 cap.flags = 0;
44 cap.args[0] = (int)enable;
45 vm_enable_cap(vm, &cap);
46}
47
48static void test_msr_platform_info_enabled(struct kvm_vm *vm)
49{
50 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
51 struct guest_args args;
52
53 set_msr_platform_info_enabled(vm, true);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
56 "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59 guest_args_read(vm, VCPU_ID, &args);
60 TEST_ASSERT(args.port == GUEST_PORT_SYNC,
61 "Received IO from port other than PORT_HOST_SYNC: %u\n",
62 run->io.port);
63 TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
64 MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
65 "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
66 MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
67}
68
69static void test_msr_platform_info_disabled(struct kvm_vm *vm)
70{
71 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
72
73 set_msr_platform_info_enabled(vm, false);
74 vcpu_run(vm, VCPU_ID);
75 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
76 "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
77 run->exit_reason,
78 exit_reason_str(run->exit_reason));
79}
80
81int main(int argc, char *argv[])
82{
83 struct kvm_vm *vm;
84 struct kvm_run *state;
85 int rv;
86 uint64_t msr_platform_info;
87
88 /* Tell stdout not to buffer its content */
89 setbuf(stdout, NULL);
90
91 rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
92 if (!rv) {
93 fprintf(stderr,
94 "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
95 exit(KSFT_SKIP);
96 }
97
98 vm = vm_create_default(VCPU_ID, 0, guest_code);
99
100 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
101 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
102 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
103 test_msr_platform_info_disabled(vm);
104 test_msr_platform_info_enabled(vm);
105 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
106
107 kvm_vm_free(vm);
108
109 return 0;
110}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 17ab36605a8e..0a8e75886224 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
16TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) 16TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
17TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) 17TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
18 18
19top_srcdir ?= ../../../..
20include $(top_srcdir)/scripts/subarch.include
21ARCH ?= $(SUBARCH)
22
19all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) 23all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
20 24
25.PHONY: khdr
26khdr:
27 make ARCH=$(ARCH) -C $(top_srcdir) headers_install
28
29ifdef KSFT_KHDR_INSTALL
30$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
31endif
32
21.ONESHELL: 33.ONESHELL:
22define RUN_TEST_PRINT_RESULT 34define RUN_TEST_PRINT_RESULT
23 TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \ 35 TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
index 2fde30191a47..a7e8cd5bb265 100644
--- a/tools/testing/selftests/memory-hotplug/config
+++ b/tools/testing/selftests/memory-hotplug/config
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
2CONFIG_MEMORY_HOTPLUG_SPARSE=y 2CONFIG_MEMORY_HOTPLUG_SPARSE=y
3CONFIG_NOTIFIER_ERROR_INJECTION=y 3CONFIG_NOTIFIER_ERROR_INJECTION=y
4CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m 4CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
5CONFIG_MEMORY_HOTREMOVE=y
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 9cca68e440a0..919aa2ac00af 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
15TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa 15TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
16TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls 16TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
17 17
18KSFT_KHDR_INSTALL := 1
18include ../lib.mk 19include ../lib.mk
19 20
20$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma 21$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index f8cc38afffa2..0ab9423d009f 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -46,6 +46,9 @@
46# Kselftest framework requirement - SKIP code is 4. 46# Kselftest framework requirement - SKIP code is 4.
47ksft_skip=4 47ksft_skip=4
48 48
49# Some systems don't have a ping6 binary anymore
50which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
51
49tests=" 52tests="
50 pmtu_vti6_exception vti6: PMTU exceptions 53 pmtu_vti6_exception vti6: PMTU exceptions
51 pmtu_vti4_exception vti4: PMTU exceptions 54 pmtu_vti4_exception vti4: PMTU exceptions
@@ -175,8 +178,8 @@ setup() {
175 178
176cleanup() { 179cleanup() {
177 [ ${cleanup_done} -eq 1 ] && return 180 [ ${cleanup_done} -eq 1 ] && return
178 ip netns del ${NS_A} 2 > /dev/null 181 ip netns del ${NS_A} 2> /dev/null
179 ip netns del ${NS_B} 2 > /dev/null 182 ip netns del ${NS_B} 2> /dev/null
180 cleanup_done=1 183 cleanup_done=1
181} 184}
182 185
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
274 mtu "${ns_b}" veth_b 4000 277 mtu "${ns_b}" veth_b 4000
275 mtu "${ns_a}" vti6_a 5000 278 mtu "${ns_a}" vti6_a 5000
276 mtu "${ns_b}" vti6_b 5000 279 mtu "${ns_b}" vti6_b 5000
277 ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null 280 ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
278 281
279 # Check that exception was created 282 # Check that exception was created
280 if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then 283 if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
334 fail=0 337 fail=0
335 338
336 min=68 339 min=68
337 max=$((65528 - 20)) 340 max=$((65535 - 20))
338 # Check invalid values first 341 # Check invalid values first
339 for v in $((min - 1)) $((max + 1)); do 342 for v in $((min - 1)) $((max + 1)); do
340 ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null 343 ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index b3ebf2646e52..8fdfeafaf8c0 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple)
502 EXPECT_EQ(memcmp(test_str, buf, send_len), 0); 502 EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
503} 503}
504 504
505TEST_F(tls, recv_peek_multiple_records)
506{
507 char const *test_str = "test_read_peek_mult_recs";
508 char const *test_str_first = "test_read_peek";
509 char const *test_str_second = "_mult_recs";
510 int len;
511 char buf[64];
512
513 len = strlen(test_str_first);
514 EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
515
516 len = strlen(test_str_second) + 1;
517 EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
518
519 len = sizeof(buf);
520 memset(buf, 0, len);
521 EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
522
523 /* MSG_PEEK can only peek into the current record. */
524 len = strlen(test_str_first) + 1;
525 EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
526
527 len = sizeof(buf);
528 memset(buf, 0, len);
529 EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
530
531 /* Non-MSG_PEEK will advance strparser (and therefore record)
532 * however.
533 */
534 len = strlen(test_str) + 1;
535 EXPECT_EQ(memcmp(test_str, buf, len), 0);
536
537 /* MSG_MORE will hold current record open, so later MSG_PEEK
538 * will see everything.
539 */
540 len = strlen(test_str_first);
541 EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
542
543 len = strlen(test_str_second) + 1;
544 EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
545
546 len = sizeof(buf);
547 memset(buf, 0, len);
548 EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
549
550 len = strlen(test_str) + 1;
551 EXPECT_EQ(memcmp(test_str, buf, len), 0);
552}
553
505TEST_F(tls, pollin) 554TEST_F(tls, pollin)
506{ 555{
507 char const *test_str = "test_poll"; 556 char const *test_str = "test_poll";
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index a728040edbe1..14cfcf006936 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
5 5
6all: $(TEST_PROGS) 6all: $(TEST_PROGS)
7 7
8top_srcdir = ../../../../..
8include ../../lib.mk 9include ../../lib.mk
9 10
10clean: 11clean:
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
index 93baacab7693..d056486f49de 100644
--- a/tools/testing/selftests/powerpc/alignment/Makefile
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -1,5 +1,6 @@
1TEST_GEN_PROGS := copy_first_unaligned alignment_handler 1TEST_GEN_PROGS := copy_first_unaligned alignment_handler
2 2
3top_srcdir = ../../../../..
3include ../../lib.mk 4include ../../lib.mk
4 5
5$(TEST_GEN_PROGS): ../harness.c ../utils.c 6$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index b4d7432a0ecd..d40300a65b42 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
4 4
5CFLAGS += -O2 5CFLAGS += -O2
6 6
7top_srcdir = ../../../../..
7include ../../lib.mk 8include ../../lib.mk
8 9
9$(TEST_GEN_PROGS): ../harness.c 10$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
index 1be547434a49..ede4d3dae750 100644
--- a/tools/testing/selftests/powerpc/cache_shape/Makefile
+++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
5 5
6$(TEST_PROGS): ../harness.c ../utils.c 6$(TEST_PROGS): ../harness.c ../utils.c
7 7
8top_srcdir = ../../../../..
8include ../../lib.mk 9include ../../lib.mk
9 10
10clean: 11clean:
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 1cf89a34d97c..44574f3818b3 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
17 17
18EXTRA_SOURCES := validate.c ../harness.c stubs.S 18EXTRA_SOURCES := validate.c ../harness.c stubs.S
19 19
20top_srcdir = ../../../../..
20include ../../lib.mk 21include ../../lib.mk
21 22
22$(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) 23$(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile
index 55d7db7a616b..5df476364b4d 100644
--- a/tools/testing/selftests/powerpc/dscr/Makefile
+++ b/tools/testing/selftests/powerpc/dscr/Makefile
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \
3 dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ 3 dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \
4 dscr_sysfs_thread_test 4 dscr_sysfs_thread_test
5 5
6top_srcdir = ../../../../..
6include ../../lib.mk 7include ../../lib.mk
7 8
8$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread 9$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 0dd3a01fdab9..11a10d7a2bbd 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,6 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt 2TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
3 3
4top_srcdir = ../../../../..
4include ../../lib.mk 5include ../../lib.mk
5 6
6$(TEST_GEN_PROGS): ../harness.c 7$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 8ebbe96d80a8..33ced6e0ad25 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -5,6 +5,7 @@ noarg:
5TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors 5TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
6TEST_GEN_FILES := tempfile 6TEST_GEN_FILES := tempfile
7 7
8top_srcdir = ../../../../..
8include ../../lib.mk 9include ../../lib.mk
9 10
10$(TEST_GEN_PROGS): ../harness.c 11$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 6e1629bf5b09..19046db995fe 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -5,6 +5,7 @@ noarg:
5TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes 5TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
6EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c 6EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
7 7
8top_srcdir = ../../../../..
8include ../../lib.mk 9include ../../lib.mk
9 10
10all: $(TEST_GEN_PROGS) ebb 11all: $(TEST_GEN_PROGS) ebb
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index c4e64bc2e265..bd5dfa509272 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
17 lost_exception_test no_handler_test \ 17 lost_exception_test no_handler_test \
18 cycles_with_mmcr2_test 18 cycles_with_mmcr2_test
19 19
20top_srcdir = ../../../../../..
20include ../../../lib.mk 21include ../../../lib.mk
21 22
22$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ 23$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
index 175366db7be8..ea2b7bd09e36 100644
--- a/tools/testing/selftests/powerpc/primitives/Makefile
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
2 2
3TEST_GEN_PROGS := load_unaligned_zeropad 3TEST_GEN_PROGS := load_unaligned_zeropad
4 4
5top_srcdir = ../../../../..
5include ../../lib.mk 6include ../../lib.mk
6 7
7$(TEST_GEN_PROGS): ../harness.c 8$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 28f5b781a553..923d531265f8 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
4 ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ 4 ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
5 perf-hwbreak 5 perf-hwbreak
6 6
7top_srcdir = ../../../../..
7include ../../lib.mk 8include ../../lib.mk
8 9
9all: $(TEST_PROGS) 10all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index a7cbd5082e27..1fca25c6ace0 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
8CFLAGS += -maltivec 8CFLAGS += -maltivec
9signal_tm: CFLAGS += -mhtm 9signal_tm: CFLAGS += -mhtm
10 10
11top_srcdir = ../../../../..
11include ../../lib.mk 12include ../../lib.mk
12 13
13clean: 14clean:
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
index 10b35c87a4f4..7fc0623d85c3 100644
--- a/tools/testing/selftests/powerpc/stringloops/Makefile
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -29,6 +29,7 @@ endif
29 29
30ASFLAGS = $(CFLAGS) 30ASFLAGS = $(CFLAGS)
31 31
32top_srcdir = ../../../../..
32include ../../lib.mk 33include ../../lib.mk
33 34
34$(TEST_GEN_PROGS): $(EXTRA_SOURCES) 35$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
index 30b8ff8fb82e..fcd2dcb8972b 100644
--- a/tools/testing/selftests/powerpc/switch_endian/Makefile
+++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
5 5
6EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S 6EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
7 7
8top_srcdir = ../../../../..
8include ../../lib.mk 9include ../../lib.mk
9 10
10$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S 11$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
index da22ca7c38c1..161b8846336f 100644
--- a/tools/testing/selftests/powerpc/syscalls/Makefile
+++ b/tools/testing/selftests/powerpc/syscalls/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
2 2
3CFLAGS += -I../../../../../usr/include 3CFLAGS += -I../../../../../usr/include
4 4
5top_srcdir = ../../../../..
5include ../../lib.mk 6include ../../lib.mk
6 7
7$(TEST_GEN_PROGS): ../harness.c 8$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index c0e45d2dde25..9fc2cf6fbc92 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ 6 tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
7 $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn 7 $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
8 8
9top_srcdir = ../../../../..
9include ../../lib.mk 10include ../../lib.mk
10 11
11$(TEST_GEN_PROGS): ../harness.c ../utils.c 12$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
index f8ced26748f8..fb82068c9fda 100644
--- a/tools/testing/selftests/powerpc/vphn/Makefile
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
2 2
3CFLAGS += -m64 3CFLAGS += -m64
4 4
5top_srcdir = ../../../../..
5include ../../lib.mk 6include ../../lib.mk
6 7
7$(TEST_GEN_PROGS): ../harness.c 8$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 642d4e12abea..eec2663261f2 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
56 printf(fmt, ## __VA_ARGS__); \ 56 printf(fmt, ## __VA_ARGS__); \
57 } while (0) 57 } while (0)
58 58
59#if defined(__x86_64__) || defined(__i386__) 59#ifdef __i386__
60 60
61#define INJECT_ASM_REG "eax" 61#define INJECT_ASM_REG "eax"
62 62
63#define RSEQ_INJECT_CLOBBER \ 63#define RSEQ_INJECT_CLOBBER \
64 , INJECT_ASM_REG 64 , INJECT_ASM_REG
65 65
66#ifdef __i386__
67
68#define RSEQ_INJECT_ASM(n) \ 66#define RSEQ_INJECT_ASM(n) \
69 "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \ 67 "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
70 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ 68 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
76 74
77#elif defined(__x86_64__) 75#elif defined(__x86_64__)
78 76
77#define INJECT_ASM_REG_P "rax"
78#define INJECT_ASM_REG "eax"
79
80#define RSEQ_INJECT_CLOBBER \
81 , INJECT_ASM_REG_P \
82 , INJECT_ASM_REG
83
79#define RSEQ_INJECT_ASM(n) \ 84#define RSEQ_INJECT_ASM(n) \
80 "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \ 85 "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
81 "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \ 86 "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
82 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \ 87 "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
83 "jz 333f\n\t" \ 88 "jz 333f\n\t" \
84 "222:\n\t" \ 89 "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
86 "jnz 222b\n\t" \ 91 "jnz 222b\n\t" \
87 "333:\n\t" 92 "333:\n\t"
88 93
89#else
90#error "Unsupported architecture"
91#endif
92
93#elif defined(__s390__) 94#elif defined(__s390__)
94 95
95#define RSEQ_INJECT_INPUT \ 96#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index f03763d81617..30f9b54bd666 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -313,6 +313,54 @@
313 ] 313 ]
314 }, 314 },
315 { 315 {
316 "id": "6aaf",
317 "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
318 "category": [
319 "actions",
320 "police"
321 ],
322 "setup": [
323 [
324 "$TC actions flush action police",
325 0,
326 1,
327 255
328 ]
329 ],
330 "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
331 "expExitCode": "0",
332 "verifyCmd": "$TC actions get action police index 1",
333 "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
334 "matchCount": "1",
335 "teardown": [
336 "$TC actions flush action police"
337 ]
338 },
339 {
340 "id": "29b1",
341 "name": "Add police actions with conform-exceed control <invalid>/drop",
342 "category": [
343 "actions",
344 "police"
345 ],
346 "setup": [
347 [
348 "$TC actions flush action police",
349 0,
350 1,
351 255
352 ]
353 ],
354 "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
355 "expExitCode": "255",
356 "verifyCmd": "$TC actions ls action police",
357 "matchPattern": "action order [0-9]*: police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
358 "matchCount": "0",
359 "teardown": [
360 "$TC actions flush action police"
361 ]
362 },
363 {
316 "id": "c26f", 364 "id": "c26f",
317 "name": "Add police action with invalid peakrate value", 365 "name": "Add police action with invalid peakrate value",
318 "category": [ 366 "category": [
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9881876d2aa0..e94b7b14bcb2 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests
26 26
27include ../lib.mk 27include ../lib.mk
28 28
29$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h
30$(OUTPUT)/userfaultfd: LDLIBS += -lpthread 29$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
31 30
32$(OUTPUT)/mlock-random-test: LDLIBS += -lcap 31$(OUTPUT)/mlock-random-test: LDLIBS += -lcap
33
34../../../../usr/include/linux/kernel.h:
35 make -C ../../../.. headers_install
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 30cb0a0713ff..37908a83ddc2 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
159}; 159};
160 160
161 161
162static const char * const debugfs_known_mountpoints[] = {
163 "/sys/kernel/debug",
164 "/debug",
165 0,
166};
167
168/* 162/*
169 * data structures 163 * data structures
170 */ 164 */
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index f82c2eaa859d..334b16db0ebb 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -30,8 +30,8 @@ struct slabinfo {
30 int alias; 30 int alias;
31 int refs; 31 int refs;
32 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu; 32 int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
33 int hwcache_align, object_size, objs_per_slab; 33 unsigned int hwcache_align, object_size, objs_per_slab;
34 int sanity_checks, slab_size, store_user, trace; 34 unsigned int sanity_checks, slab_size, store_user, trace;
35 int order, poison, reclaim_account, red_zone; 35 int order, poison, reclaim_account, red_zone;
36 unsigned long partial, objects, slabs, objects_partial, objects_total; 36 unsigned long partial, objects, slabs, objects_partial, objects_total;
37 unsigned long alloc_fastpath, alloc_slowpath; 37 unsigned long alloc_fastpath, alloc_slowpath;
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 91aaf73b00df..ed162a6c57c5 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
1817 return 0; 1817 return 0;
1818} 1818}
1819 1819
1820int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1821{
1822 unsigned long end = hva + PAGE_SIZE;
1823
1824 if (!kvm->arch.pgd)
1825 return 0;
1826
1827 trace_kvm_unmap_hva(hva);
1828 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1829 return 0;
1830}
1831
1832int kvm_unmap_hva_range(struct kvm *kvm, 1820int kvm_unmap_hva_range(struct kvm *kvm,
1833 unsigned long start, unsigned long end) 1821 unsigned long start, unsigned long end)
1834{ 1822{
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
1860void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 1848void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1861{ 1849{
1862 unsigned long end = hva + PAGE_SIZE; 1850 unsigned long end = hva + PAGE_SIZE;
1851 kvm_pfn_t pfn = pte_pfn(pte);
1863 pte_t stage2_pte; 1852 pte_t stage2_pte;
1864 1853
1865 if (!kvm->arch.pgd) 1854 if (!kvm->arch.pgd)
1866 return; 1855 return;
1867 1856
1868 trace_kvm_set_spte_hva(hva); 1857 trace_kvm_set_spte_hva(hva);
1869 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); 1858
1859 /*
1860 * We've moved a page around, probably through CoW, so let's treat it
1861 * just like a translation fault and clean the cache to the PoC.
1862 */
1863 clean_dcache_guest_page(pfn, PAGE_SIZE);
1864 stage2_pte = pfn_pte(pfn, PAGE_S2);
1870 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); 1865 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1871} 1866}
1872 1867
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index e53b596f483b..57b3edebbb40 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
134 __entry->vcpu_pc, __entry->instr, __entry->cpsr) 134 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
135); 135);
136 136
137TRACE_EVENT(kvm_unmap_hva,
138 TP_PROTO(unsigned long hva),
139 TP_ARGS(hva),
140
141 TP_STRUCT__entry(
142 __field( unsigned long, hva )
143 ),
144
145 TP_fast_assign(
146 __entry->hva = hva;
147 ),
148
149 TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
150);
151
152TRACE_EVENT(kvm_unmap_hva_range, 137TRACE_EVENT(kvm_unmap_hva_range,
153 TP_PROTO(unsigned long start, unsigned long end), 138 TP_PROTO(unsigned long start, unsigned long end),
154 TP_ARGS(start, end), 139 TP_ARGS(start, end),