aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2016-03-16 16:24:36 -0400
committerTrond Myklebust <trond.myklebust@primarydata.com>2016-03-16 16:25:09 -0400
commit1425075e7272faaa3629a1e2df679c0ba4cf55d3 (patch)
tree6d79a735f8a02d6dc9e27b915f6244fe1ab6b7ff
parent849dc3244c916545790bfb9055625a3719061c92 (diff)
parent2fa8f88d8892507ecff0126fbc67906740491d31 (diff)
Merge tag 'nfs-rdma-4.6-1' of git://git.linux-nfs.org/projects/anna/nfs-rdma
NFS: NFSoRDMA Client Side Changes These patches include several bugfixes and cleanups for the NFSoRDMA client. This includes bugfixes for NFS v4.1, proper RDMA_ERROR handling, and fixes from the recent workqueue swicchover. These patches also switch xprtrdma to use the new CQ API Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com> * tag 'nfs-rdma-4.6-1' of git://git.linux-nfs.org/projects/anna/nfs-rdma: (787 commits) xprtrdma: Use new CQ API for RPC-over-RDMA client send CQs xprtrdma: Use an anonymous union in struct rpcrdma_mw xprtrdma: Use new CQ API for RPC-over-RDMA client receive CQs xprtrdma: Serialize credit accounting again xprtrdma: Properly handle RDMA_ERROR replies rpcrdma: Add RPCRDMA_HDRLEN_ERR xprtrdma: Do not wait if ib_post_send() fails xprtrdma: Segment head and tail XDR buffers on page boundaries xprtrdma: Clean up dprintk format string containing a newline xprtrdma: Clean up physical_op_map() xprtrdma: Clean up unused RPCRDMA_INLINE_PAD_THRESH macro
-rw-r--r--Documentation/cgroup-v2.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65217.txt10
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-asoc-card.txt2
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt37
-rw-r--r--Documentation/filesystems/efivarfs.txt7
-rw-r--r--Documentation/kernel-parameters.txt11
-rw-r--r--Documentation/timers/hpet.txt4
-rw-r--r--MAINTAINERS54
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig39
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/configs/axs101_defconfig4
-rw-r--r--arch/arc/configs/axs103_defconfig10
-rw-r--r--arch/arc/configs/axs103_smp_defconfig10
-rw-r--r--arch/arc/configs/nsim_700_defconfig5
-rw-r--r--arch/arc/configs/nsim_hs_defconfig3
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig6
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig3
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig12
-rw-r--r--arch/arc/configs/tb10x_defconfig18
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/arcregs.h35
-rw-r--r--arch/arc/include/asm/irq.h2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h18
-rw-r--r--arch/arc/include/asm/mcip.h4
-rw-r--r--arch/arc/include/asm/pgtable.h45
-rw-r--r--arch/arc/kernel/entry-arcv2.S30
-rw-r--r--arch/arc/kernel/intc-arcv2.c41
-rw-r--r--arch/arc/kernel/intc-compact.c3
-rw-r--r--arch/arc/kernel/mcip.c70
-rw-r--r--arch/arc/kernel/setup.c100
-rw-r--r--arch/arc/kernel/smp.c3
-rw-r--r--arch/arc/kernel/time.c8
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi14
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts14
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts14
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts4
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts13
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi1
-rw-r--r--arch/arm/boot/dts/kirkwood-ds112.dts2
-rw-r--r--arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts31
-rw-r--r--arch/arm/boot/dts/sama5d2-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/tps65217.dtsi56
-rw-r--r--arch/arm/common/icst.c9
-rw-r--r--arch/arm/configs/omap2plus_defconfig33
-rw-r--r--arch/arm/crypto/aes-ce-glue.c4
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h21
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/mach-omap2/board-generic.c22
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c6
-rw-r--r--arch/arm/mach-omap2/omap_device.c14
-rw-r--r--arch/arm/mach-shmobile/common.h1
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S6
-rw-r--r--arch/arm/mach-shmobile/headsmp.S28
-rw-r--r--arch/arm/mach-shmobile/platsmp-apmu.c1
-rw-r--r--arch/arm/mach-shmobile/platsmp-scu.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/Makefile4
-rw-r--r--arch/arm64/boot/install.sh14
-rw-r--r--arch/arm64/crypto/aes-glue.c4
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/kernel/debug-monitors.c48
-rw-r--r--arch/arm64/kernel/image.h1
-rw-r--r--arch/arm64/kernel/stacktrace.c17
-rw-r--r--arch/arm64/kernel/traps.c11
-rw-r--r--arch/arm64/kvm/hyp-init.S12
-rw-r--r--arch/arm64/kvm/hyp/switch.c8
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c20
-rw-r--r--arch/arm64/kvm/inject_fault.c38
-rw-r--r--arch/arm64/kvm/sys_regs.c9
-rw-r--r--arch/arm64/lib/strnlen.S2
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c9
-rw-r--r--arch/arm64/mm/mmap.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig9
-rw-r--r--arch/m68k/configs/apollo_defconfig9
-rw-r--r--arch/m68k/configs/atari_defconfig9
-rw-r--r--arch/m68k/configs/bvme6000_defconfig9
-rw-r--r--arch/m68k/configs/hp300_defconfig9
-rw-r--r--arch/m68k/configs/mac_defconfig9
-rw-r--r--arch/m68k/configs/multi_defconfig9
-rw-r--r--arch/m68k/configs/mvme147_defconfig9
-rw-r--r--arch/m68k/configs/mvme16x_defconfig9
-rw-r--r--arch/m68k/configs/q40_defconfig9
-rw-r--r--arch/m68k/configs/sun3_defconfig9
-rw-r--r--arch/m68k/configs/sun3x_defconfig9
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7125.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7346.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7358.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7360.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7362.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7420.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi1
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi1
-rw-r--r--arch/mips/include/asm/elf.h9
-rw-r--r--arch/mips/include/asm/fpu.h4
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h3
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/stackframe.h4
-rw-r--r--arch/mips/include/asm/syscall.h4
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c2
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c2
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c25
-rw-r--r--arch/mips/mm/mmap.c4
-rw-r--r--arch/mips/mm/sc-mips.c10
-rw-r--r--arch/mips/mti-malta/malta-init.c8
-rw-r--r--arch/mips/pci/pci-mt7620.c8
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/trace.h8
-rw-r--r--arch/powerpc/kernel/eeh_driver.c6
-rw-r--r--arch/powerpc/kernel/eeh_pe.c2
-rw-r--r--arch/powerpc/kernel/module_64.c2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/mm/hash64_64k.c8
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c12
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c32
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c26
-rw-r--r--arch/powerpc/platforms/powernv/pci.h1
-rw-r--r--arch/s390/include/asm/fpu/internal.h2
-rw-r--r--arch/s390/include/asm/livepatch.h2
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/perf_event.c8
-rw-r--r--arch/s390/kernel/stacktrace.c47
-rw-r--r--arch/s390/kernel/trace.c3
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/oprofile/backtrace.c8
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/entry/entry_32.S1
-rw-r--r--arch/x86/entry/entry_64_compat.S1
-rw-r--r--arch/x86/include/asm/livepatch.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h26
-rw-r--r--arch/x86/include/asm/xen/pci.h4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c2
-rw-r--r--arch/x86/kvm/emulate.c4
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/lib/copy_user_64.S142
-rw-r--r--arch/x86/mm/fault.c15
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/mm/numa.c2
-rw-r--r--arch/x86/mm/pageattr.c14
-rw-r--r--arch/x86/pci/common.c26
-rw-r--r--arch/x86/pci/intel_mid_pci.c9
-rw-r--r--arch/x86/pci/irq.c23
-rw-r--r--arch/x86/pci/xen.c5
-rw-r--r--arch/x86/platform/intel-quark/imr.c4
-rw-r--r--block/Kconfig13
-rw-r--r--block/bio.c9
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/blk-sysfs.c5
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--crypto/algif_skcipher.c80
-rw-r--r--crypto/crypto_user.c6
-rw-r--r--drivers/acpi/nfit.c90
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c20
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_brcmstb.c1
-rw-r--r--drivers/ata/libahci.c27
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-sff.c35
-rw-r--r--drivers/base/component.c49
-rw-r--r--drivers/base/regmap/regmap-mmio.c16
-rw-r--r--drivers/block/floppy.c67
-rw-r--r--drivers/block/null_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c74
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c26
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c26
-rw-r--r--drivers/clk/tegra/clk-emc.c6
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c50
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c132
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/crypto/atmel-sha.c23
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/ioat/dma.c34
-rw-r--r--drivers/firmware/efi/efivars.c35
-rw-r--r--drivers/firmware/efi/vars.c144
-rw-r--r--drivers/gpio/gpio-altera.c5
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h32
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c19
-rw-r--r--drivers/gpu/drm/drm_atomic.c44
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c49
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h17
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c115
-rw-r--r--drivers/gpu/drm/i915/intel_display.c86
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c45
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c14
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c21
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c149
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/gpio-fan.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/infiniband/core/sysfs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c63
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c7
-rw-r--r--drivers/input/keyboard/cap11xx.c8
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/vmmouse.c13
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c18
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c35
-rw-r--r--drivers/irqchip/irq-gic.c13
-rw-r--r--drivers/irqchip/irq-sun4i.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/lightnvm/rrpc.h5
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c37
-rw-r--r--drivers/mmc/host/sdhci-acpi.c30
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c31
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c27
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c64
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c299
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c19
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c34
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c184
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c69
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c105
-rw-r--r--drivers/net/geneve.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/phy/bcm7xxx.c43
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c56
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c188
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c1
-rw-r--r--drivers/nvme/host/lightnvm.c12
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/of/irq.c9
-rw-r--r--drivers/of/of_mdio.c1
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pcie-iproc.c29
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c48
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/platform/x86/intel-hid.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/hisi_sas/Kconfig2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c76
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/storvsc_drv.c16
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c8
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_file.c29
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_tmr.c139
-rw-r--r--drivers/target/target_core_transport.c327
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/cpu_cooling.c14
-rw-r--r--drivers/thermal/of-thermal.c18
-rw-r--r--drivers/thermal/rcar_thermal.c45
-rw-r--r--drivers/thermal/spear_thermal.c6
-rw-r--r--drivers/tty/pty.c21
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/omap-serial.c10
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_mutex.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c20
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbdev/imxfb.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/ocfb.c4
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/affs/file.c5
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c19
-rw-r--r--fs/btrfs/backref.c10
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/delayed-inode.c3
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/extent_io.c45
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/btrfs/ioctl.c119
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/connect.c3
-rw-r--r--fs/dax.c21
-rw-r--r--fs/devpts/inode.c20
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/efivarfs/file.c70
-rw-r--r--fs/efivarfs/inode.c30
-rw-r--r--fs/efivarfs/internal.h3
-rw-r--r--fs/efivarfs/super.c16
-rw-r--r--fs/ext2/file.c19
-rw-r--r--fs/ext2/inode.c16
-rw-r--r--fs/ext4/balloc.c7
-rw-r--r--fs/ext4/crypto.c56
-rw-r--r--fs/ext4/dir.c13
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c28
-rw-r--r--fs/ext4/ialloc.c6
-rw-r--r--fs/ext4/inode.c78
-rw-r--r--fs/ext4/ioctl.c7
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/move_extent.c15
-rw-r--r--fs/ext4/namei.c26
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/fs-writeback.c15
-rw-r--r--fs/hpfs/namei.c31
-rw-r--r--fs/inode.c6
-rw-r--r--fs/namei.c22
-rw-r--r--fs/notify/mark.c53
-rw-r--r--fs/ocfs2/aops.c1
-rw-r--r--fs/pnode.c9
-rw-r--r--fs/read_write.c9
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/xfs_aops.c6
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--include/asm-generic/cputime_nsecs.h5
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--include/drm/drm_crtc.h8
-rw-r--r--include/dt-bindings/clock/tegra210-car.h2
-rw-r--r--include/linux/blkdev.h9
-rw-r--r--include/linux/cgroup-defs.h6
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/dax.h8
-rw-r--r--include/linux/devpts_fs.h4
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/fsnotify_backend.h5
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/libnvdimm.h3
-rw-r--r--include/linux/lightnvm.h4
-rw-r--r--include/linux/lockdep.h4
-rw-r--r--include/linux/mlx4/device.h13
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2968
-rw-r--r--include/linux/module.h19
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/pci.h17
-rw-r--r--include/linux/perf_event.h7
-rw-r--r--include/linux/pfn.h2
-rw-r--r--include/linux/pfn_t.h19
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/soc/ti/knav_dma.h4
-rw-r--r--include/linux/sunrpc/rpc_rdma.h12
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/ucs2_string.h4
-rw-r--r--include/linux/workqueue.h9
-rw-r--r--include/net/af_unix.h4
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/sound/hdaudio.h2
-rw-r--r--include/target/target_core_backend.h3
-rw-r--r--include/target/target_core_base.h9
-rw-r--r--include/uapi/linux/ndctl.h11
-rw-r--r--ipc/shm.c53
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup.c31
-rw-r--r--kernel/cpuset.c71
-rw-r--r--kernel/events/core.c372
-rw-r--r--kernel/locking/lockdep.c58
-rw-r--r--kernel/memremap.c8
-rw-r--r--kernel/module.c124
-rw-r--r--kernel/resource.c5
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/trace/ftrace.c36
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_stack.c6
-rw-r--r--kernel/workqueue.c74
-rw-r--r--lib/Kconfig.debug15
-rw-r--r--lib/Kconfig.ubsan4
-rw-r--r--lib/klist.c6
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/ucs2_string.c62
-rw-r--r--lib/vsprintf.c26
-rw-r--r--mm/backing-dev.c2
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/memory.c14
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c34
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/slab.c12
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c38
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/batman-adv/gateway_client.c7
-rw-r--r--net/batman-adv/hard-interface.c25
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_core.c6
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/ceph/messenger.c15
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/flow_dissector.c16
-rw-r--r--net/core/scm.c7
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/dccp/ipv4.c14
-rw-r--r--net/dccp/ipv6.c14
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/inet_connection_sock.c14
-rw-r--r--net/ipv4/ip_gre.c13
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ip_tunnel.c20
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c77
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_ipv4.c31
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_flowlabel.c5
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c74
-rw-r--r--net/ipv6/tcp_ipv6.c19
-rw-r--r--net/l2tp/l2tp_netlink.c18
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nfnetlink.c16
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c2
-rw-r--r--net/netfilter/nft_counter.c4
-rw-r--r--net/netfilter/xt_TEE.c4
-rw-r--r--net/openvswitch/vport-vxlan.c4
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sctp/protocol.c46
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c28
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c143
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c1
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c108
-rw-r--r--net/sunrpc/xprtrdma/verbs.c204
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h14
-rw-r--r--net/tipc/link.c4
-rw-r--r--net/tipc/node.c12
-rw-r--r--net/unix/af_unix.c30
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/garbage.c8
-rw-r--r--net/vmw_vsock/af_vsock.c19
-rwxr-xr-xscripts/prune-kernel20
-rw-r--r--security/integrity/evm/evm_main.c3
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/nlmsgtab.c1
-rw-r--r--sound/core/pcm_native.c16
-rw-r--r--sound/core/seq/seq_memory.c13
-rw-r--r--sound/core/seq/seq_ports.c13
-rw-r--r--sound/core/timer.c40
-rw-r--r--sound/drivers/dummy.c37
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c2
-rw-r--r--sound/firewire/tascam/tascam-transaction.c6
-rw-r--r--sound/firewire/tascam/tascam.c12
-rw-r--r--sound/firewire/tascam/tascam.h4
-rw-r--r--sound/hda/hdac_controller.c7
-rw-r--r--sound/pci/hda/hda_controller.c47
-rw-r--r--sound/pci/hda/hda_generic.c4
-rw-r--r--sound/pci/hda/hda_intel.c20
-rw-r--r--sound/pci/hda/hda_jack.c2
-rw-r--r--sound/pci/hda/hda_jack.h2
-rw-r--r--sound/pci/hda/patch_ca0132.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c50
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/soc/amd/acp-pcm-dma.c1
-rw-r--r--sound/soc/codecs/arizona.c43
-rw-r--r--sound/soc/codecs/rt286.c26
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5659.c31
-rw-r--r--sound/soc/codecs/rt5659.h1
-rw-r--r--sound/soc/codecs/sigmadsp-i2c.c5
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8960.c40
-rw-r--r--sound/soc/dwc/designware_i2s.c5
-rw-r--r--sound/soc/fsl/fsl_ssi.c42
-rw-r--r--sound/soc/fsl/imx-spdif.c2
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/Kconfig13
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c1
-rw-r--r--sound/soc/intel/boards/skl_rt286.c5
-rw-r--r--sound/soc/intel/common/Makefile9
-rw-r--r--sound/soc/intel/common/sst-acpi.c4
-rw-r--r--sound/soc/intel/common/sst-match-acpi.c3
-rw-r--r--sound/soc/intel/skylake/skl-messages.c6
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c1
-rw-r--r--sound/soc/intel/skylake/skl-topology.c75
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/mediatek/Kconfig4
-rw-r--r--sound/soc/mxs/mxs-saif.c13
-rw-r--r--sound/soc/qcom/lpass-platform.c15
-rw-r--r--sound/soc/soc-dapm.c8
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/usb/midi.c1
-rw-r--r--tools/perf/util/intel-pt.c9
-rw-r--r--tools/perf/util/parse-events.c3
-rw-r--r--tools/perf/util/probe-finder.c62
-rw-r--r--tools/perf/util/probe-finder.h5
-rw-r--r--tools/perf/util/stat.c10
-rw-r--r--tools/testing/nvdimm/test/nfit.c8
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh19
-rw-r--r--tools/testing/selftests/efivarfs/open-unlink.c72
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc15
-rw-r--r--virt/kvm/arm/arch_timer.c9
-rw-r--r--virt/kvm/arm/vgic.c4
-rw-r--r--virt/kvm/async_pf.c2
806 files changed, 10309 insertions, 6145 deletions
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index e8d25e784214..ff49cf901148 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
7conventions of cgroup v2. It describes all userland-visible aspects 7conventions of cgroup v2. It describes all userland-visible aspects
8of cgroup including core and specific controller behaviors. All 8of cgroup including core and specific controller behaviors. All
9future changes must be reflected in this document. Documentation for 9future changes must be reflected in this document. Documentation for
10v1 is available under Documentation/cgroup-legacy/. 10v1 is available under Documentation/cgroup-v1/.
11 11
12CONTENTS 12CONTENTS
13 13
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
index ace05992a262..20df350b9ef3 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
@@ -30,7 +30,7 @@ that they are defined using standard clock bindings with following
30clock-output-names: 30clock-output-names:
31 - "xin24m" - crystal input - required, 31 - "xin24m" - crystal input - required,
32 - "ext_i2s" - external I2S clock - optional, 32 - "ext_i2s" - external I2S clock - optional,
33 - "ext_gmac" - external GMAC clock - optional 33 - "rmii_clkin" - external EMAC clock - optional
34 34
35Example: Clock controller node: 35Example: Clock controller node:
36 36
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 7803e77d85cb..007a5b46256a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -24,9 +24,8 @@ Main node required properties:
24 1 = edge triggered 24 1 = edge triggered
25 4 = level triggered 25 4 = level triggered
26 26
27 Cells 4 and beyond are reserved for future use. When the 1st cell 27 Cells 4 and beyond are reserved for future use and must have a value
28 has a value of 0 or 1, cells 4 and beyond act as padding, and may be 28 of 0 if present.
29 ignored. It is recommended that padding cells have a value of 0.
30 29
31- reg : Specifies base physical address(s) and size of the GIC 30- reg : Specifies base physical address(s) and size of the GIC
32 registers, in the following order: 31 registers, in the following order:
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 81a9f9e6b45f..c8ac222eac67 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -82,8 +82,8 @@ Example:
82 "ch16", "ch17", "ch18", "ch19", 82 "ch16", "ch17", "ch18", "ch19",
83 "ch20", "ch21", "ch22", "ch23", 83 "ch20", "ch21", "ch22", "ch23",
84 "ch24"; 84 "ch24";
85 clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>; 85 clocks = <&cpg CPG_MOD 812>;
86 power-domains = <&cpg_clocks>; 86 power-domains = <&cpg>;
87 phy-mode = "rgmii-id"; 87 phy-mode = "rgmii-id";
88 phy-handle = <&phy0>; 88 phy-handle = <&phy0>;
89 89
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 4e8b90e43dd8..07a75094c5a8 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -8,6 +8,7 @@ OHCI and EHCI controllers.
8Required properties: 8Required properties:
9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; 9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
10 "renesas,pci-r8a7791" for the R8A7791 SoC; 10 "renesas,pci-r8a7791" for the R8A7791 SoC;
11 "renesas,pci-r8a7793" for the R8A7793 SoC;
11 "renesas,pci-r8a7794" for the R8A7794 SoC; 12 "renesas,pci-r8a7794" for the R8A7794 SoC;
12 "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device 13 "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
13 14
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 558fe528ae19..6cf99690eef9 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -4,6 +4,7 @@ Required properties:
4compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; 4compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
5 "renesas,pcie-r8a7790" for the R8A7790 SoC; 5 "renesas,pcie-r8a7790" for the R8A7790 SoC;
6 "renesas,pcie-r8a7791" for the R8A7791 SoC; 6 "renesas,pcie-r8a7791" for the R8A7791 SoC;
7 "renesas,pcie-r8a7793" for the R8A7793 SoC;
7 "renesas,pcie-r8a7795" for the R8A7795 SoC; 8 "renesas,pcie-r8a7795" for the R8A7795 SoC;
8 "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device. 9 "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
9 10
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
index d18109657da6..4f05d208c95c 100644
--- a/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -26,11 +26,7 @@ Example:
26 ti,pmic-shutdown-controller; 26 ti,pmic-shutdown-controller;
27 27
28 regulators { 28 regulators {
29 #address-cells = <1>;
30 #size-cells = <0>;
31
32 dcdc1_reg: dcdc1 { 29 dcdc1_reg: dcdc1 {
33 reg = <0>;
34 regulator-min-microvolt = <900000>; 30 regulator-min-microvolt = <900000>;
35 regulator-max-microvolt = <1800000>; 31 regulator-max-microvolt = <1800000>;
36 regulator-boot-on; 32 regulator-boot-on;
@@ -38,7 +34,6 @@ Example:
38 }; 34 };
39 35
40 dcdc2_reg: dcdc2 { 36 dcdc2_reg: dcdc2 {
41 reg = <1>;
42 regulator-min-microvolt = <900000>; 37 regulator-min-microvolt = <900000>;
43 regulator-max-microvolt = <3300000>; 38 regulator-max-microvolt = <3300000>;
44 regulator-boot-on; 39 regulator-boot-on;
@@ -46,7 +41,6 @@ Example:
46 }; 41 };
47 42
48 dcdc3_reg: dcc3 { 43 dcdc3_reg: dcc3 {
49 reg = <2>;
50 regulator-min-microvolt = <900000>; 44 regulator-min-microvolt = <900000>;
51 regulator-max-microvolt = <1500000>; 45 regulator-max-microvolt = <1500000>;
52 regulator-boot-on; 46 regulator-boot-on;
@@ -54,7 +48,6 @@ Example:
54 }; 48 };
55 49
56 ldo1_reg: ldo1 { 50 ldo1_reg: ldo1 {
57 reg = <3>;
58 regulator-min-microvolt = <1000000>; 51 regulator-min-microvolt = <1000000>;
59 regulator-max-microvolt = <3300000>; 52 regulator-max-microvolt = <3300000>;
60 regulator-boot-on; 53 regulator-boot-on;
@@ -62,7 +55,6 @@ Example:
62 }; 55 };
63 56
64 ldo2_reg: ldo2 { 57 ldo2_reg: ldo2 {
65 reg = <4>;
66 regulator-min-microvolt = <900000>; 58 regulator-min-microvolt = <900000>;
67 regulator-max-microvolt = <3300000>; 59 regulator-max-microvolt = <3300000>;
68 regulator-boot-on; 60 regulator-boot-on;
@@ -70,7 +62,6 @@ Example:
70 }; 62 };
71 63
72 ldo3_reg: ldo3 { 64 ldo3_reg: ldo3 {
73 reg = <5>;
74 regulator-min-microvolt = <1800000>; 65 regulator-min-microvolt = <1800000>;
75 regulator-max-microvolt = <3300000>; 66 regulator-max-microvolt = <3300000>;
76 regulator-boot-on; 67 regulator-boot-on;
@@ -78,7 +69,6 @@ Example:
78 }; 69 };
79 70
80 ldo4_reg: ldo4 { 71 ldo4_reg: ldo4 {
81 reg = <6>;
82 regulator-min-microvolt = <1800000>; 72 regulator-min-microvolt = <1800000>;
83 regulator-max-microvolt = <3300000>; 73 regulator-max-microvolt = <3300000>;
84 regulator-boot-on; 74 regulator-boot-on;
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index ac2fcd6ff4b8..1068ffce9f91 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -14,6 +14,10 @@ Required properties:
14 interrupt number is the rtc alarm interrupt and second interrupt number 14 interrupt number is the rtc alarm interrupt and second interrupt number
15 is the rtc tick interrupt. The number of cells representing a interrupt 15 is the rtc tick interrupt. The number of cells representing a interrupt
16 depends on the parent interrupt controller. 16 depends on the parent interrupt controller.
17- clocks: Must contain a list of phandle and clock specifier for the rtc
18 and source clocks.
19- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
20 same order as the clocks property.
17 21
18Example: 22Example:
19 23
@@ -21,4 +25,6 @@ Example:
21 compatible = "samsung,s3c6410-rtc"; 25 compatible = "samsung,s3c6410-rtc";
22 reg = <0x10070000 0x100>; 26 reg = <0x10070000 0x100>;
23 interrupts = <44 0 45 0>; 27 interrupts = <44 0 45 0>;
28 clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
29 clock-names = "rtc", "rtc_src";
24 }; 30 };
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 35ae1fb3537f..ed94c217c98d 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,7 @@ Optional properties:
9- fsl,uart-has-rtscts : Indicate the uart has rts and cts 9- fsl,uart-has-rtscts : Indicate the uart has rts and cts
10- fsl,irda-mode : Indicate the uart supports irda mode 10- fsl,irda-mode : Indicate the uart supports irda mode
11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works 11- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
12 is DCE mode by default. 12 in DCE mode by default.
13 13
14Note: Each uart controller should have an alias correctly numbered 14Note: Each uart controller should have an alias correctly numbered
15in "aliases" node. 15in "aliases" node.
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
index ce55c0a6f757..4da41bf1888e 100644
--- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
@@ -30,6 +30,8 @@ The compatible list for this generic sound card currently:
30 "fsl,imx-audio-sgtl5000" 30 "fsl,imx-audio-sgtl5000"
31 (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt) 31 (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
32 32
33 "fsl,imx-audio-wm8960"
34
33Required properties: 35Required properties:
34 36
35 - compatible : Contains one of entries in the compatible list. 37 - compatible : Contains one of entries in the compatible list.
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 332e625f6ed0..e5ee3f159893 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -1,8 +1,9 @@
1* Renesas R-Car Thermal 1* Renesas R-Car Thermal
2 2
3Required properties: 3Required properties:
4- compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal" 4- compatible : "renesas,thermal-<soctype>",
5 as fallback. 5 "renesas,rcar-gen2-thermal" (with thermal-zone) or
6 "renesas,rcar-thermal" (without thermal-zone) as fallback.
6 Examples with soctypes are: 7 Examples with soctypes are:
7 - "renesas,thermal-r8a73a4" (R-Mobile APE6) 8 - "renesas,thermal-r8a73a4" (R-Mobile APE6)
8 - "renesas,thermal-r8a7779" (R-Car H1) 9 - "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@ thermal@e61f0000 {
36 0xe61f0300 0x38>; 37 0xe61f0300 0x38>;
37 interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; 38 interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
38}; 39};
40
41Example (with thermal-zone):
42
43thermal-zones {
44 cpu_thermal: cpu-thermal {
45 polling-delay-passive = <1000>;
46 polling-delay = <5000>;
47
48 thermal-sensors = <&thermal>;
49
50 trips {
51 cpu-crit {
52 temperature = <115000>;
53 hysteresis = <0>;
54 type = "critical";
55 };
56 };
57 cooling-maps {
58 };
59 };
60};
61
62thermal: thermal@e61f0000 {
63 compatible = "renesas,thermal-r8a7790",
64 "renesas,rcar-gen2-thermal",
65 "renesas,rcar-thermal";
66 reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
67 interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
68 clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
69 power-domains = <&cpg_clocks>;
70 #thermal-sensor-cells = <0>;
71};
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
index c477af086e65..686a64bba775 100644
--- a/Documentation/filesystems/efivarfs.txt
+++ b/Documentation/filesystems/efivarfs.txt
@@ -14,3 +14,10 @@ filesystem.
14efivarfs is typically mounted like this, 14efivarfs is typically mounted like this,
15 15
16 mount -t efivarfs none /sys/firmware/efi/efivars 16 mount -t efivarfs none /sys/firmware/efi/efivars
17
18Due to the presence of numerous firmware bugs where removing non-standard
19UEFI variables causes the system firmware to fail to POST, efivarfs
20files that are not well-known standardized variables are created
21as immutable files. This doesn't prevent removal - "chattr -i" will work -
22but it does prevent this kind of failure from being accomplished
23accidentally.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 551ecf09c8dd..9a53c929f017 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -4235,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
4235 The default value of this parameter is determined by 4235 The default value of this parameter is determined by
4236 the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT. 4236 the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
4237 4237
4238 workqueue.debug_force_rr_cpu
4239 Workqueue used to implicitly guarantee that work
4240 items queued without explicit CPU specified are put
4241 on the local CPU. This guarantee is no longer true
4242 and while local CPU is still preferred work items
4243 may be put on foreign CPUs. This debug option
4244 forces round-robin CPU selection to flush out
4245 usages which depend on the now broken guarantee.
4246 When enabled, memory and cache locality will be
4247 impacted.
4248
4238 x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of 4249 x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
4239 default x2apic cluster mode on platforms 4250 default x2apic cluster mode on platforms
4240 supporting x2apic. 4251 supporting x2apic.
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index 767392ffd31e..a484d2c109d7 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,9 +1,7 @@
1 High Precision Event Timer Driver for Linux 1 High Precision Event Timer Driver for Linux
2 2
3The High Precision Event Timer (HPET) hardware follows a specification 3The High Precision Event Timer (HPET) hardware follows a specification
4by Intel and Microsoft which can be found at 4by Intel and Microsoft, revision 1.
5
6 http://www.intel.com/hardwaredesign/hpetspec_1.pdf
7 5
8Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision") 6Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
9and up to 32 comparators. Normally three or more comparators are provided, 7and up to 32 comparators. Normally three or more comparators are provided,
diff --git a/MAINTAINERS b/MAINTAINERS
index 7f1fa4ff300a..da3e4d8016d0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -920,17 +920,24 @@ M: Emilio López <emilio@elopez.com.ar>
920S: Maintained 920S: Maintained
921F: drivers/clk/sunxi/ 921F: drivers/clk/sunxi/
922 922
923ARM/Amlogic MesonX SoC support 923ARM/Amlogic Meson SoC support
924M: Carlo Caione <carlo@caione.org> 924M: Carlo Caione <carlo@caione.org>
925L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 925L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
926L: linux-meson@googlegroups.com
927W: http://linux-meson.com/
926S: Maintained 928S: Maintained
927F: drivers/media/rc/meson-ir.c 929F: arch/arm/mach-meson/
928N: meson[x68] 930F: arch/arm/boot/dts/meson*
931N: meson
929 932
930ARM/Annapurna Labs ALPINE ARCHITECTURE 933ARM/Annapurna Labs ALPINE ARCHITECTURE
931M: Tsahee Zidenberg <tsahee@annapurnalabs.com> 934M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
935M: Antoine Tenart <antoine.tenart@free-electrons.com>
932S: Maintained 936S: Maintained
933F: arch/arm/mach-alpine/ 937F: arch/arm/mach-alpine/
938F: arch/arm/boot/dts/alpine*
939F: arch/arm64/boot/dts/al/
940F: drivers/*/*alpine*
934 941
935ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT 942ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
936M: Nicolas Ferre <nicolas.ferre@atmel.com> 943M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1442,8 +1449,8 @@ S: Maintained
1442ARM/RENESAS ARM64 ARCHITECTURE 1449ARM/RENESAS ARM64 ARCHITECTURE
1443M: Simon Horman <horms@verge.net.au> 1450M: Simon Horman <horms@verge.net.au>
1444M: Magnus Damm <magnus.damm@gmail.com> 1451M: Magnus Damm <magnus.damm@gmail.com>
1445L: linux-sh@vger.kernel.org 1452L: linux-renesas-soc@vger.kernel.org
1446Q: http://patchwork.kernel.org/project/linux-sh/list/ 1453Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
1447T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next 1454T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
1448S: Supported 1455S: Supported
1449F: arch/arm64/boot/dts/renesas/ 1456F: arch/arm64/boot/dts/renesas/
@@ -2362,14 +2369,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
2362S: Maintained 2369S: Maintained
2363N: bcm2835 2370N: bcm2835
2364 2371
2365BROADCOM BCM33XX MIPS ARCHITECTURE
2366M: Kevin Cernekee <cernekee@gmail.com>
2367L: linux-mips@linux-mips.org
2368S: Maintained
2369F: arch/mips/bcm3384/*
2370F: arch/mips/include/asm/mach-bcm3384/*
2371F: arch/mips/kernel/*bmips*
2372
2373BROADCOM BCM47XX MIPS ARCHITECTURE 2372BROADCOM BCM47XX MIPS ARCHITECTURE
2374M: Hauke Mehrtens <hauke@hauke-m.de> 2373M: Hauke Mehrtens <hauke@hauke-m.de>
2375M: Rafał Miłecki <zajec5@gmail.com> 2374M: Rafał Miłecki <zajec5@gmail.com>
@@ -3452,7 +3451,6 @@ F: drivers/usb/dwc2/
3452DESIGNWARE USB3 DRD IP DRIVER 3451DESIGNWARE USB3 DRD IP DRIVER
3453M: Felipe Balbi <balbi@kernel.org> 3452M: Felipe Balbi <balbi@kernel.org>
3454L: linux-usb@vger.kernel.org 3453L: linux-usb@vger.kernel.org
3455L: linux-omap@vger.kernel.org
3456T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 3454T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
3457S: Maintained 3455S: Maintained
3458F: drivers/usb/dwc3/ 3456F: drivers/usb/dwc3/
@@ -6136,7 +6134,7 @@ F: include/uapi/linux/sunrpc/
6136 6134
6137KERNEL SELFTEST FRAMEWORK 6135KERNEL SELFTEST FRAMEWORK
6138M: Shuah Khan <shuahkh@osg.samsung.com> 6136M: Shuah Khan <shuahkh@osg.samsung.com>
6139L: linux-api@vger.kernel.org 6137L: linux-kselftest@vger.kernel.org
6140T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest 6138T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
6141S: Maintained 6139S: Maintained
6142F: tools/testing/selftests 6140F: tools/testing/selftests
@@ -7362,7 +7360,7 @@ F: drivers/tty/isicom.c
7362F: include/linux/isicom.h 7360F: include/linux/isicom.h
7363 7361
7364MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER 7362MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
7365M: Felipe Balbi <balbi@kernel.org> 7363M: Bin Liu <b-liu@ti.com>
7366L: linux-usb@vger.kernel.org 7364L: linux-usb@vger.kernel.org
7367T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 7365T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
7368S: Maintained 7366S: Maintained
@@ -7694,13 +7692,13 @@ S: Maintained
7694F: arch/nios2/ 7692F: arch/nios2/
7695 7693
7696NOKIA N900 POWER SUPPLY DRIVERS 7694NOKIA N900 POWER SUPPLY DRIVERS
7697M: Pali Rohár <pali.rohar@gmail.com> 7695R: Pali Rohár <pali.rohar@gmail.com>
7698S: Maintained
7699F: include/linux/power/bq2415x_charger.h 7696F: include/linux/power/bq2415x_charger.h
7700F: include/linux/power/bq27xxx_battery.h 7697F: include/linux/power/bq27xxx_battery.h
7701F: include/linux/power/isp1704_charger.h 7698F: include/linux/power/isp1704_charger.h
7702F: drivers/power/bq2415x_charger.c 7699F: drivers/power/bq2415x_charger.c
7703F: drivers/power/bq27xxx_battery.c 7700F: drivers/power/bq27xxx_battery.c
7701F: drivers/power/bq27xxx_battery_i2c.c
7704F: drivers/power/isp1704_charger.c 7702F: drivers/power/isp1704_charger.c
7705F: drivers/power/rx51_battery.c 7703F: drivers/power/rx51_battery.c
7706 7704
@@ -7931,11 +7929,9 @@ F: drivers/media/platform/omap3isp/
7931F: drivers/staging/media/omap4iss/ 7929F: drivers/staging/media/omap4iss/
7932 7930
7933OMAP USB SUPPORT 7931OMAP USB SUPPORT
7934M: Felipe Balbi <balbi@kernel.org>
7935L: linux-usb@vger.kernel.org 7932L: linux-usb@vger.kernel.org
7936L: linux-omap@vger.kernel.org 7933L: linux-omap@vger.kernel.org
7937T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 7934S: Orphan
7938S: Maintained
7939F: drivers/usb/*/*omap* 7935F: drivers/usb/*/*omap*
7940F: arch/arm/*omap*/usb* 7936F: arch/arm/*omap*/usb*
7941 7937
@@ -9566,6 +9562,12 @@ M: Andreas Noever <andreas.noever@gmail.com>
9566S: Maintained 9562S: Maintained
9567F: drivers/thunderbolt/ 9563F: drivers/thunderbolt/
9568 9564
9565TI BQ27XXX POWER SUPPLY DRIVER
9566R: Andrew F. Davis <afd@ti.com>
9567F: include/linux/power/bq27xxx_battery.h
9568F: drivers/power/bq27xxx_battery.c
9569F: drivers/power/bq27xxx_battery_i2c.c
9570
9569TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER 9571TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
9570M: John Stultz <john.stultz@linaro.org> 9572M: John Stultz <john.stultz@linaro.org>
9571M: Thomas Gleixner <tglx@linutronix.de> 9573M: Thomas Gleixner <tglx@linutronix.de>
@@ -9787,10 +9789,11 @@ S: Supported
9787F: drivers/scsi/be2iscsi/ 9789F: drivers/scsi/be2iscsi/
9788 9790
9789Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER 9791Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
9790M: Sathya Perla <sathya.perla@avagotech.com> 9792M: Sathya Perla <sathya.perla@broadcom.com>
9791M: Ajit Khaparde <ajit.khaparde@avagotech.com> 9793M: Ajit Khaparde <ajit.khaparde@broadcom.com>
9792M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com> 9794M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
9793M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com> 9795M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
9796M: Somnath Kotur <somnath.kotur@broadcom.com>
9794L: netdev@vger.kernel.org 9797L: netdev@vger.kernel.org
9795W: http://www.emulex.com 9798W: http://www.emulex.com
9796S: Supported 9799S: Supported
@@ -12020,7 +12023,6 @@ F: arch/arm64/xen/
12020F: arch/arm64/include/asm/xen/ 12023F: arch/arm64/include/asm/xen/
12021 12024
12022XEN NETWORK BACKEND DRIVER 12025XEN NETWORK BACKEND DRIVER
12023M: Ian Campbell <ian.campbell@citrix.com>
12024M: Wei Liu <wei.liu2@citrix.com> 12026M: Wei Liu <wei.liu2@citrix.com>
12025L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 12027L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
12026L: netdev@vger.kernel.org 12028L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 682840850f58..af6e5f893d56 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 5 2PATCHLEVEL = 5
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Blurry Fish Butt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 76dde9db7934..8a188bc1786a 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -12,8 +12,6 @@ config ARC
12 select BUILDTIME_EXTABLE_SORT 12 select BUILDTIME_EXTABLE_SORT
13 select COMMON_CLK 13 select COMMON_CLK
14 select CLONE_BACKWARDS 14 select CLONE_BACKWARDS
15 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
16 select DEVTMPFS if !INITRAMFS_SOURCE=""
17 select GENERIC_ATOMIC64 15 select GENERIC_ATOMIC64
18 select GENERIC_CLOCKEVENTS 16 select GENERIC_CLOCKEVENTS
19 select GENERIC_FIND_FIRST_BIT 17 select GENERIC_FIND_FIRST_BIT
@@ -275,14 +273,6 @@ config ARC_DCCM_BASE
275 default "0xA0000000" 273 default "0xA0000000"
276 depends on ARC_HAS_DCCM 274 depends on ARC_HAS_DCCM
277 275
278config ARC_HAS_HW_MPY
279 bool "Use Hardware Multiplier (Normal or Faster XMAC)"
280 default y
281 help
282 Influences how gcc generates code for MPY operations.
283 If enabled, MPYxx insns are generated, provided by Standard/XMAC
284 Multipler. Otherwise software multipy lib is used
285
286choice 276choice
287 prompt "MMU Version" 277 prompt "MMU Version"
288 default ARC_MMU_V3 if ARC_CPU_770 278 default ARC_MMU_V3 if ARC_CPU_770
@@ -338,6 +328,19 @@ config ARC_PAGE_SIZE_4K
338 328
339endchoice 329endchoice
340 330
331choice
332 prompt "MMU Super Page Size"
333 depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
334 default ARC_HUGEPAGE_2M
335
336config ARC_HUGEPAGE_2M
337 bool "2MB"
338
339config ARC_HUGEPAGE_16M
340 bool "16MB"
341
342endchoice
343
341if ISA_ARCOMPACT 344if ISA_ARCOMPACT
342 345
343config ARC_COMPACT_IRQ_LEVELS 346config ARC_COMPACT_IRQ_LEVELS
@@ -410,7 +413,7 @@ config ARC_HAS_RTC
410 default n 413 default n
411 depends on !SMP 414 depends on !SMP
412 415
413config ARC_HAS_GRTC 416config ARC_HAS_GFRC
414 bool "SMP synchronized 64-bit cycle counter" 417 bool "SMP synchronized 64-bit cycle counter"
415 default y 418 default y
416 depends on SMP 419 depends on SMP
@@ -529,14 +532,6 @@ config ARC_DBG_TLB_MISS_COUNT
529 Counts number of I and D TLB Misses and exports them via Debugfs 532 Counts number of I and D TLB Misses and exports them via Debugfs
530 The counters can be cleared via Debugfs as well 533 The counters can be cleared via Debugfs as well
531 534
532if SMP
533
534config ARC_IPI_DBG
535 bool "Debug Inter Core interrupts"
536 default n
537
538endif
539
540endif 535endif
541 536
542config ARC_UBOOT_SUPPORT 537config ARC_UBOOT_SUPPORT
@@ -566,6 +561,12 @@ endmenu
566endmenu # "ARC Architecture Configuration" 561endmenu # "ARC Architecture Configuration"
567 562
568source "mm/Kconfig" 563source "mm/Kconfig"
564
565config FORCE_MAX_ZONEORDER
566 int "Maximum zone order"
567 default "12" if ARC_HUGEPAGE_16M
568 default "11"
569
569source "net/Kconfig" 570source "net/Kconfig"
570source "drivers/Kconfig" 571source "drivers/Kconfig"
571source "fs/Kconfig" 572source "fs/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aeb19021099e..c8230f3395f2 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -74,10 +74,6 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
74# --build-id w/o "-marclinux". Default arc-elf32-ld is OK 74# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
75ldflags-$(upto_gcc44) += -marclinux 75ldflags-$(upto_gcc44) += -marclinux
76 76
77ifndef CONFIG_ARC_HAS_HW_MPY
78 cflags-y += -mno-mpy
79endif
80
81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 77LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
82 78
83# Modules with short calls might break for calls into builtin-kernel 79# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index f1ac9818b751..5d4e2a07ad3e 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -39,6 +39,7 @@ CONFIG_IP_PNP_RARP=y
39# CONFIG_INET_XFRM_MODE_TUNNEL is not set 39# CONFIG_INET_XFRM_MODE_TUNNEL is not set
40# CONFIG_INET_XFRM_MODE_BEET is not set 40# CONFIG_INET_XFRM_MODE_BEET is not set
41# CONFIG_IPV6 is not set 41# CONFIG_IPV6 is not set
42CONFIG_DEVTMPFS=y
42# CONFIG_STANDALONE is not set 43# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 44# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44# CONFIG_FIRMWARE_IN_KERNEL is not set 45# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -73,7 +74,6 @@ CONFIG_I2C_CHARDEV=y
73CONFIG_I2C_DESIGNWARE_PLATFORM=y 74CONFIG_I2C_DESIGNWARE_PLATFORM=y
74# CONFIG_HWMON is not set 75# CONFIG_HWMON is not set
75CONFIG_FB=y 76CONFIG_FB=y
76# CONFIG_VGA_CONSOLE is not set
77CONFIG_FRAMEBUFFER_CONSOLE=y 77CONFIG_FRAMEBUFFER_CONSOLE=y
78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
79CONFIG_LOGO=y 79CONFIG_LOGO=y
@@ -91,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
91CONFIG_MMC_DW=y 91CONFIG_MMC_DW=y
92# CONFIG_IOMMU_SUPPORT is not set 92# CONFIG_IOMMU_SUPPORT is not set
93CONFIG_EXT3_FS=y 93CONFIG_EXT3_FS=y
94CONFIG_EXT4_FS=y
95CONFIG_MSDOS_FS=y 94CONFIG_MSDOS_FS=y
96CONFIG_VFAT_FS=y 95CONFIG_VFAT_FS=y
97CONFIG_NTFS_FS=y 96CONFIG_NTFS_FS=y
98CONFIG_TMPFS=y 97CONFIG_TMPFS=y
99CONFIG_JFFS2_FS=y
100CONFIG_NFS_FS=y 98CONFIG_NFS_FS=y
101CONFIG_NLS_CODEPAGE_437=y 99CONFIG_NLS_CODEPAGE_437=y
102CONFIG_NLS_ISO8859_1=y 100CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 323486d6ee83..87ee46b237ef 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -39,14 +39,10 @@ CONFIG_IP_PNP_RARP=y
39# CONFIG_INET_XFRM_MODE_TUNNEL is not set 39# CONFIG_INET_XFRM_MODE_TUNNEL is not set
40# CONFIG_INET_XFRM_MODE_BEET is not set 40# CONFIG_INET_XFRM_MODE_BEET is not set
41# CONFIG_IPV6 is not set 41# CONFIG_IPV6 is not set
42CONFIG_DEVTMPFS=y
42# CONFIG_STANDALONE is not set 43# CONFIG_STANDALONE is not set
43# CONFIG_PREVENT_FIRMWARE_BUILD is not set 44# CONFIG_PREVENT_FIRMWARE_BUILD is not set
44# CONFIG_FIRMWARE_IN_KERNEL is not set 45# CONFIG_FIRMWARE_IN_KERNEL is not set
45CONFIG_MTD=y
46CONFIG_MTD_CMDLINE_PARTS=y
47CONFIG_MTD_BLOCK=y
48CONFIG_MTD_NAND=y
49CONFIG_MTD_NAND_AXS=y
50CONFIG_SCSI=y 46CONFIG_SCSI=y
51CONFIG_BLK_DEV_SD=y 47CONFIG_BLK_DEV_SD=y
52CONFIG_NETDEVICES=y 48CONFIG_NETDEVICES=y
@@ -78,14 +74,12 @@ CONFIG_I2C_CHARDEV=y
78CONFIG_I2C_DESIGNWARE_PLATFORM=y 74CONFIG_I2C_DESIGNWARE_PLATFORM=y
79# CONFIG_HWMON is not set 75# CONFIG_HWMON is not set
80CONFIG_FB=y 76CONFIG_FB=y
81# CONFIG_VGA_CONSOLE is not set
82CONFIG_FRAMEBUFFER_CONSOLE=y 77CONFIG_FRAMEBUFFER_CONSOLE=y
83CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 78CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
84CONFIG_LOGO=y 79CONFIG_LOGO=y
85# CONFIG_LOGO_LINUX_MONO is not set 80# CONFIG_LOGO_LINUX_MONO is not set
86# CONFIG_LOGO_LINUX_VGA16 is not set 81# CONFIG_LOGO_LINUX_VGA16 is not set
87# CONFIG_LOGO_LINUX_CLUT224 is not set 82# CONFIG_LOGO_LINUX_CLUT224 is not set
88CONFIG_USB=y
89CONFIG_USB_EHCI_HCD=y 83CONFIG_USB_EHCI_HCD=y
90CONFIG_USB_EHCI_HCD_PLATFORM=y 84CONFIG_USB_EHCI_HCD_PLATFORM=y
91CONFIG_USB_OHCI_HCD=y 85CONFIG_USB_OHCI_HCD=y
@@ -97,12 +91,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
97CONFIG_MMC_DW=y 91CONFIG_MMC_DW=y
98# CONFIG_IOMMU_SUPPORT is not set 92# CONFIG_IOMMU_SUPPORT is not set
99CONFIG_EXT3_FS=y 93CONFIG_EXT3_FS=y
100CONFIG_EXT4_FS=y
101CONFIG_MSDOS_FS=y 94CONFIG_MSDOS_FS=y
102CONFIG_VFAT_FS=y 95CONFIG_VFAT_FS=y
103CONFIG_NTFS_FS=y 96CONFIG_NTFS_FS=y
104CONFIG_TMPFS=y 97CONFIG_TMPFS=y
105CONFIG_JFFS2_FS=y
106CONFIG_NFS_FS=y 98CONFIG_NFS_FS=y
107CONFIG_NLS_CODEPAGE_437=y 99CONFIG_NLS_CODEPAGE_437=y
108CONFIG_NLS_ISO8859_1=y 100CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 66191cd0447e..d80daf4f7e73 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -40,14 +40,10 @@ CONFIG_IP_PNP_RARP=y
40# CONFIG_INET_XFRM_MODE_TUNNEL is not set 40# CONFIG_INET_XFRM_MODE_TUNNEL is not set
41# CONFIG_INET_XFRM_MODE_BEET is not set 41# CONFIG_INET_XFRM_MODE_BEET is not set
42# CONFIG_IPV6 is not set 42# CONFIG_IPV6 is not set
43CONFIG_DEVTMPFS=y
43# CONFIG_STANDALONE is not set 44# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 45# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45# CONFIG_FIRMWARE_IN_KERNEL is not set 46# CONFIG_FIRMWARE_IN_KERNEL is not set
46CONFIG_MTD=y
47CONFIG_MTD_CMDLINE_PARTS=y
48CONFIG_MTD_BLOCK=y
49CONFIG_MTD_NAND=y
50CONFIG_MTD_NAND_AXS=y
51CONFIG_SCSI=y 47CONFIG_SCSI=y
52CONFIG_BLK_DEV_SD=y 48CONFIG_BLK_DEV_SD=y
53CONFIG_NETDEVICES=y 49CONFIG_NETDEVICES=y
@@ -79,14 +75,12 @@ CONFIG_I2C_CHARDEV=y
79CONFIG_I2C_DESIGNWARE_PLATFORM=y 75CONFIG_I2C_DESIGNWARE_PLATFORM=y
80# CONFIG_HWMON is not set 76# CONFIG_HWMON is not set
81CONFIG_FB=y 77CONFIG_FB=y
82# CONFIG_VGA_CONSOLE is not set
83CONFIG_FRAMEBUFFER_CONSOLE=y 78CONFIG_FRAMEBUFFER_CONSOLE=y
84CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y 79CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
85CONFIG_LOGO=y 80CONFIG_LOGO=y
86# CONFIG_LOGO_LINUX_MONO is not set 81# CONFIG_LOGO_LINUX_MONO is not set
87# CONFIG_LOGO_LINUX_VGA16 is not set 82# CONFIG_LOGO_LINUX_VGA16 is not set
88# CONFIG_LOGO_LINUX_CLUT224 is not set 83# CONFIG_LOGO_LINUX_CLUT224 is not set
89CONFIG_USB=y
90CONFIG_USB_EHCI_HCD=y 84CONFIG_USB_EHCI_HCD=y
91CONFIG_USB_EHCI_HCD_PLATFORM=y 85CONFIG_USB_EHCI_HCD_PLATFORM=y
92CONFIG_USB_OHCI_HCD=y 86CONFIG_USB_OHCI_HCD=y
@@ -98,12 +92,10 @@ CONFIG_MMC_SDHCI_PLTFM=y
98CONFIG_MMC_DW=y 92CONFIG_MMC_DW=y
99# CONFIG_IOMMU_SUPPORT is not set 93# CONFIG_IOMMU_SUPPORT is not set
100CONFIG_EXT3_FS=y 94CONFIG_EXT3_FS=y
101CONFIG_EXT4_FS=y
102CONFIG_MSDOS_FS=y 95CONFIG_MSDOS_FS=y
103CONFIG_VFAT_FS=y 96CONFIG_VFAT_FS=y
104CONFIG_NTFS_FS=y 97CONFIG_NTFS_FS=y
105CONFIG_TMPFS=y 98CONFIG_TMPFS=y
106CONFIG_JFFS2_FS=y
107CONFIG_NFS_FS=y 99CONFIG_NFS_FS=y
108CONFIG_NLS_CODEPAGE_437=y 100CONFIG_NLS_CODEPAGE_437=y
109CONFIG_NLS_ISO8859_1=y 101CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 138f9d887957..f41095340b6a 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -4,6 +4,7 @@ CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
5CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
7# CONFIG_CROSS_MEMORY_ATTACH is not set
7CONFIG_HIGH_RES_TIMERS=y 8CONFIG_HIGH_RES_TIMERS=y
8CONFIG_IKCONFIG=y 9CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y 10CONFIG_IKCONFIG_PROC=y
@@ -26,7 +27,6 @@ CONFIG_ARC_PLAT_SIM=y
26CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700" 27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
27CONFIG_PREEMPT=y 28CONFIG_PREEMPT=y
28# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
29# CONFIG_CROSS_MEMORY_ATTACH is not set
30CONFIG_NET=y 30CONFIG_NET=y
31CONFIG_PACKET=y 31CONFIG_PACKET=y
32CONFIG_UNIX=y 32CONFIG_UNIX=y
@@ -34,6 +34,7 @@ CONFIG_UNIX_DIAG=y
34CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
35CONFIG_INET=y 35CONFIG_INET=y
36# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
37# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
38# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
39# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -51,7 +52,6 @@ CONFIG_SERIAL_ARC=y
51CONFIG_SERIAL_ARC_CONSOLE=y 52CONFIG_SERIAL_ARC_CONSOLE=y
52# CONFIG_HW_RANDOM is not set 53# CONFIG_HW_RANDOM is not set
53# CONFIG_HWMON is not set 54# CONFIG_HWMON is not set
54# CONFIG_VGA_CONSOLE is not set
55# CONFIG_HID is not set 55# CONFIG_HID is not set
56# CONFIG_USB_SUPPORT is not set 56# CONFIG_USB_SUPPORT is not set
57# CONFIG_IOMMU_SUPPORT is not set 57# CONFIG_IOMMU_SUPPORT is not set
@@ -63,4 +63,3 @@ CONFIG_NFS_FS=y
63# CONFIG_ENABLE_WARN_DEPRECATED is not set 63# CONFIG_ENABLE_WARN_DEPRECATED is not set
64# CONFIG_ENABLE_MUST_CHECK is not set 64# CONFIG_ENABLE_MUST_CHECK is not set
65# CONFIG_DEBUG_PREEMPT is not set 65# CONFIG_DEBUG_PREEMPT is not set
66CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f68838e8068a..cfaa33cb5921 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -35,6 +35,7 @@ CONFIG_UNIX_DIAG=y
35CONFIG_NET_KEY=y 35CONFIG_NET_KEY=y
36CONFIG_INET=y 36CONFIG_INET=y
37# CONFIG_IPV6 is not set 37# CONFIG_IPV6 is not set
38CONFIG_DEVTMPFS=y
38# CONFIG_STANDALONE is not set 39# CONFIG_STANDALONE is not set
39# CONFIG_PREVENT_FIRMWARE_BUILD is not set 40# CONFIG_PREVENT_FIRMWARE_BUILD is not set
40# CONFIG_FIRMWARE_IN_KERNEL is not set 41# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +50,6 @@ CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 50CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 51# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 52# CONFIG_HWMON is not set
52# CONFIG_VGA_CONSOLE is not set
53# CONFIG_HID is not set 53# CONFIG_HID is not set
54# CONFIG_USB_SUPPORT is not set 54# CONFIG_USB_SUPPORT is not set
55# CONFIG_IOMMU_SUPPORT is not set 55# CONFIG_IOMMU_SUPPORT is not set
@@ -61,4 +61,3 @@ CONFIG_NFS_FS=y
61# CONFIG_ENABLE_WARN_DEPRECATED is not set 61# CONFIG_ENABLE_WARN_DEPRECATED is not set
62# CONFIG_ENABLE_MUST_CHECK is not set 62# CONFIG_ENABLE_MUST_CHECK is not set
63# CONFIG_DEBUG_PREEMPT is not set 63# CONFIG_DEBUG_PREEMPT is not set
64CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 96bd1c20fb0b..bb2a8dc778b5 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
5# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_HIGH_RES_TIMERS=y 6CONFIG_HIGH_RES_TIMERS=y
6CONFIG_IKCONFIG=y 7CONFIG_IKCONFIG=y
7CONFIG_IKCONFIG_PROC=y 8CONFIG_IKCONFIG_PROC=y
@@ -21,13 +22,11 @@ CONFIG_MODULES=y
21# CONFIG_IOSCHED_DEADLINE is not set 22# CONFIG_IOSCHED_DEADLINE is not set
22# CONFIG_IOSCHED_CFQ is not set 23# CONFIG_IOSCHED_CFQ is not set
23CONFIG_ARC_PLAT_SIM=y 24CONFIG_ARC_PLAT_SIM=y
24CONFIG_ARC_BOARD_ML509=y
25CONFIG_ISA_ARCV2=y 25CONFIG_ISA_ARCV2=y
26CONFIG_SMP=y 26CONFIG_SMP=y
27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu" 27CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
28CONFIG_PREEMPT=y 28CONFIG_PREEMPT=y
29# CONFIG_COMPACTION is not set 29# CONFIG_COMPACTION is not set
30# CONFIG_CROSS_MEMORY_ATTACH is not set
31CONFIG_NET=y 30CONFIG_NET=y
32CONFIG_PACKET=y 31CONFIG_PACKET=y
33CONFIG_UNIX=y 32CONFIG_UNIX=y
@@ -35,6 +34,7 @@ CONFIG_UNIX_DIAG=y
35CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
36CONFIG_INET=y 35CONFIG_INET=y
37# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
38# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
39# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
40# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +49,6 @@ CONFIG_SERIAL_ARC=y
49CONFIG_SERIAL_ARC_CONSOLE=y 49CONFIG_SERIAL_ARC_CONSOLE=y
50# CONFIG_HW_RANDOM is not set 50# CONFIG_HW_RANDOM is not set
51# CONFIG_HWMON is not set 51# CONFIG_HWMON is not set
52# CONFIG_VGA_CONSOLE is not set
53# CONFIG_HID is not set 52# CONFIG_HID is not set
54# CONFIG_USB_SUPPORT is not set 53# CONFIG_USB_SUPPORT is not set
55# CONFIG_IOMMU_SUPPORT is not set 54# CONFIG_IOMMU_SUPPORT is not set
@@ -60,4 +59,3 @@ CONFIG_TMPFS=y
60CONFIG_NFS_FS=y 59CONFIG_NFS_FS=y
61# CONFIG_ENABLE_WARN_DEPRECATED is not set 60# CONFIG_ENABLE_WARN_DEPRECATED is not set
62# CONFIG_ENABLE_MUST_CHECK is not set 61# CONFIG_ENABLE_MUST_CHECK is not set
63CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 31e1d95764ff..646182e93753 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -33,6 +33,7 @@ CONFIG_UNIX_DIAG=y
33CONFIG_NET_KEY=y 33CONFIG_NET_KEY=y
34CONFIG_INET=y 34CONFIG_INET=y
35# CONFIG_IPV6 is not set 35# CONFIG_IPV6 is not set
36CONFIG_DEVTMPFS=y
36# CONFIG_STANDALONE is not set 37# CONFIG_STANDALONE is not set
37# CONFIG_PREVENT_FIRMWARE_BUILD is not set 38# CONFIG_PREVENT_FIRMWARE_BUILD is not set
38# CONFIG_FIRMWARE_IN_KERNEL is not set 39# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -58,7 +59,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_HW_RANDOM is not set 59# CONFIG_HW_RANDOM is not set
59# CONFIG_HWMON is not set 60# CONFIG_HWMON is not set
60CONFIG_FB=y 61CONFIG_FB=y
61# CONFIG_VGA_CONSOLE is not set
62CONFIG_FRAMEBUFFER_CONSOLE=y 62CONFIG_FRAMEBUFFER_CONSOLE=y
63CONFIG_LOGO=y 63CONFIG_LOGO=y
64# CONFIG_HID is not set 64# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index fcae66683ca0..ceca2541950d 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -34,12 +34,12 @@ CONFIG_UNIX_DIAG=y
34CONFIG_NET_KEY=y 34CONFIG_NET_KEY=y
35CONFIG_INET=y 35CONFIG_INET=y
36# CONFIG_IPV6 is not set 36# CONFIG_IPV6 is not set
37CONFIG_DEVTMPFS=y
37# CONFIG_STANDALONE is not set 38# CONFIG_STANDALONE is not set
38# CONFIG_PREVENT_FIRMWARE_BUILD is not set 39# CONFIG_PREVENT_FIRMWARE_BUILD is not set
39# CONFIG_FIRMWARE_IN_KERNEL is not set 40# CONFIG_FIRMWARE_IN_KERNEL is not set
40# CONFIG_BLK_DEV is not set 41# CONFIG_BLK_DEV is not set
41CONFIG_NETDEVICES=y 42CONFIG_NETDEVICES=y
42CONFIG_NET_OSCI_LAN=y
43CONFIG_INPUT_EVDEV=y 43CONFIG_INPUT_EVDEV=y
44# CONFIG_MOUSE_PS2_ALPS is not set 44# CONFIG_MOUSE_PS2_ALPS is not set
45# CONFIG_MOUSE_PS2_LOGIPS2PP is not set 45# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
@@ -58,7 +58,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
58# CONFIG_HW_RANDOM is not set 58# CONFIG_HW_RANDOM is not set
59# CONFIG_HWMON is not set 59# CONFIG_HWMON is not set
60CONFIG_FB=y 60CONFIG_FB=y
61# CONFIG_VGA_CONSOLE is not set
62CONFIG_FRAMEBUFFER_CONSOLE=y 61CONFIG_FRAMEBUFFER_CONSOLE=y
63CONFIG_LOGO=y 62CONFIG_LOGO=y
64# CONFIG_HID is not set 63# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index b01b659168ea..4b6da90f6f26 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -2,6 +2,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5# CONFIG_CROSS_MEMORY_ATTACH is not set
5CONFIG_NO_HZ=y 6CONFIG_NO_HZ=y
6CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IKCONFIG=y 8CONFIG_IKCONFIG=y
@@ -18,15 +19,11 @@ CONFIG_MODULES=y
18# CONFIG_IOSCHED_DEADLINE is not set 19# CONFIG_IOSCHED_DEADLINE is not set
19# CONFIG_IOSCHED_CFQ is not set 20# CONFIG_IOSCHED_CFQ is not set
20CONFIG_ARC_PLAT_SIM=y 21CONFIG_ARC_PLAT_SIM=y
21CONFIG_ARC_BOARD_ML509=y
22CONFIG_ISA_ARCV2=y 22CONFIG_ISA_ARCV2=y
23CONFIG_SMP=y 23CONFIG_SMP=y
24CONFIG_ARC_HAS_LL64=y
25# CONFIG_ARC_HAS_RTSC is not set
26CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu" 24CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
27CONFIG_PREEMPT=y 25CONFIG_PREEMPT=y
28# CONFIG_COMPACTION is not set 26# CONFIG_COMPACTION is not set
29# CONFIG_CROSS_MEMORY_ATTACH is not set
30CONFIG_NET=y 27CONFIG_NET=y
31CONFIG_PACKET=y 28CONFIG_PACKET=y
32CONFIG_PACKET_DIAG=y 29CONFIG_PACKET_DIAG=y
@@ -40,6 +37,7 @@ CONFIG_INET=y
40# CONFIG_INET_LRO is not set 37# CONFIG_INET_LRO is not set
41# CONFIG_IPV6 is not set 38# CONFIG_IPV6 is not set
42# CONFIG_WIRELESS is not set 39# CONFIG_WIRELESS is not set
40CONFIG_DEVTMPFS=y
43# CONFIG_STANDALONE is not set 41# CONFIG_STANDALONE is not set
44# CONFIG_PREVENT_FIRMWARE_BUILD is not set 42# CONFIG_PREVENT_FIRMWARE_BUILD is not set
45# CONFIG_FIRMWARE_IN_KERNEL is not set 43# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -56,14 +54,11 @@ CONFIG_NETDEVICES=y
56# CONFIG_NET_VENDOR_STMICRO is not set 54# CONFIG_NET_VENDOR_STMICRO is not set
57# CONFIG_NET_VENDOR_VIA is not set 55# CONFIG_NET_VENDOR_VIA is not set
58# CONFIG_NET_VENDOR_WIZNET is not set 56# CONFIG_NET_VENDOR_WIZNET is not set
59CONFIG_NET_OSCI_LAN=y
60# CONFIG_WLAN is not set 57# CONFIG_WLAN is not set
61CONFIG_INPUT_EVDEV=y 58CONFIG_INPUT_EVDEV=y
62CONFIG_MOUSE_PS2_TOUCHKIT=y 59CONFIG_MOUSE_PS2_TOUCHKIT=y
63# CONFIG_SERIO_SERPORT is not set 60# CONFIG_SERIO_SERPORT is not set
64CONFIG_SERIO_LIBPS2=y
65CONFIG_SERIO_ARC_PS2=y 61CONFIG_SERIO_ARC_PS2=y
66CONFIG_VT_HW_CONSOLE_BINDING=y
67# CONFIG_LEGACY_PTYS is not set 62# CONFIG_LEGACY_PTYS is not set
68# CONFIG_DEVKMEM is not set 63# CONFIG_DEVKMEM is not set
69CONFIG_SERIAL_8250=y 64CONFIG_SERIAL_8250=y
@@ -75,9 +70,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
75# CONFIG_HW_RANDOM is not set 70# CONFIG_HW_RANDOM is not set
76# CONFIG_HWMON is not set 71# CONFIG_HWMON is not set
77CONFIG_FB=y 72CONFIG_FB=y
78CONFIG_ARCPGU_RGB888=y
79CONFIG_ARCPGU_DISPTYPE=0
80# CONFIG_VGA_CONSOLE is not set
81CONFIG_FRAMEBUFFER_CONSOLE=y 73CONFIG_FRAMEBUFFER_CONSOLE=y
82CONFIG_LOGO=y 74CONFIG_LOGO=y
83# CONFIG_HID is not set 75# CONFIG_HID is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3b4dc9cebcf1..9b342eaf95ae 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -3,6 +3,7 @@ CONFIG_CROSS_COMPILE="arc-linux-"
3CONFIG_DEFAULT_HOSTNAME="tb10x" 3CONFIG_DEFAULT_HOSTNAME="tb10x"
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
6# CONFIG_CROSS_MEMORY_ATTACH is not set
6CONFIG_HIGH_RES_TIMERS=y 7CONFIG_HIGH_RES_TIMERS=y
7CONFIG_BSD_PROCESS_ACCT=y 8CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y 9CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -26,12 +27,10 @@ CONFIG_MODULE_UNLOAD=y
26# CONFIG_BLOCK is not set 27# CONFIG_BLOCK is not set
27CONFIG_ARC_PLAT_TB10X=y 28CONFIG_ARC_PLAT_TB10X=y
28CONFIG_ARC_CACHE_LINE_SHIFT=5 29CONFIG_ARC_CACHE_LINE_SHIFT=5
29CONFIG_ARC_STACK_NONEXEC=y
30CONFIG_HZ=250 30CONFIG_HZ=250
31CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" 31CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
32CONFIG_PREEMPT_VOLUNTARY=y 32CONFIG_PREEMPT_VOLUNTARY=y
33# CONFIG_COMPACTION is not set 33# CONFIG_COMPACTION is not set
34# CONFIG_CROSS_MEMORY_ATTACH is not set
35CONFIG_NET=y 34CONFIG_NET=y
36CONFIG_PACKET=y 35CONFIG_PACKET=y
37CONFIG_UNIX=y 36CONFIG_UNIX=y
@@ -44,8 +43,8 @@ CONFIG_IP_MULTICAST=y
44# CONFIG_INET_DIAG is not set 43# CONFIG_INET_DIAG is not set
45# CONFIG_IPV6 is not set 44# CONFIG_IPV6 is not set
46# CONFIG_WIRELESS is not set 45# CONFIG_WIRELESS is not set
46CONFIG_DEVTMPFS=y
47# CONFIG_FIRMWARE_IN_KERNEL is not set 47# CONFIG_FIRMWARE_IN_KERNEL is not set
48CONFIG_PROC_DEVICETREE=y
49CONFIG_NETDEVICES=y 48CONFIG_NETDEVICES=y
50# CONFIG_NET_CADENCE is not set 49# CONFIG_NET_CADENCE is not set
51# CONFIG_NET_VENDOR_BROADCOM is not set 50# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -55,9 +54,6 @@ CONFIG_NETDEVICES=y
55# CONFIG_NET_VENDOR_NATSEMI is not set 54# CONFIG_NET_VENDOR_NATSEMI is not set
56# CONFIG_NET_VENDOR_SEEQ is not set 55# CONFIG_NET_VENDOR_SEEQ is not set
57CONFIG_STMMAC_ETH=y 56CONFIG_STMMAC_ETH=y
58CONFIG_STMMAC_DEBUG_FS=y
59CONFIG_STMMAC_DA=y
60CONFIG_STMMAC_CHAINED=y
61# CONFIG_NET_VENDOR_WIZNET is not set 57# CONFIG_NET_VENDOR_WIZNET is not set
62# CONFIG_WLAN is not set 58# CONFIG_WLAN is not set
63# CONFIG_INPUT is not set 59# CONFIG_INPUT is not set
@@ -91,7 +87,6 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
91CONFIG_LEDS_TRIGGER_TRANSIENT=y 87CONFIG_LEDS_TRIGGER_TRANSIENT=y
92CONFIG_DMADEVICES=y 88CONFIG_DMADEVICES=y
93CONFIG_DW_DMAC=y 89CONFIG_DW_DMAC=y
94CONFIG_NET_DMA=y
95CONFIG_ASYNC_TX_DMA=y 90CONFIG_ASYNC_TX_DMA=y
96# CONFIG_IOMMU_SUPPORT is not set 91# CONFIG_IOMMU_SUPPORT is not set
97# CONFIG_DNOTIFY is not set 92# CONFIG_DNOTIFY is not set
@@ -100,17 +95,16 @@ CONFIG_TMPFS=y
100CONFIG_CONFIGFS_FS=y 95CONFIG_CONFIGFS_FS=y
101# CONFIG_MISC_FILESYSTEMS is not set 96# CONFIG_MISC_FILESYSTEMS is not set
102# CONFIG_NETWORK_FILESYSTEMS is not set 97# CONFIG_NETWORK_FILESYSTEMS is not set
98CONFIG_DEBUG_INFO=y
103# CONFIG_ENABLE_WARN_DEPRECATED is not set 99# CONFIG_ENABLE_WARN_DEPRECATED is not set
104CONFIG_MAGIC_SYSRQ=y
105CONFIG_STRIP_ASM_SYMS=y 100CONFIG_STRIP_ASM_SYMS=y
106CONFIG_DEBUG_FS=y 101CONFIG_DEBUG_FS=y
107CONFIG_HEADERS_CHECK=y 102CONFIG_HEADERS_CHECK=y
108CONFIG_DEBUG_SECTION_MISMATCH=y 103CONFIG_DEBUG_SECTION_MISMATCH=y
104CONFIG_MAGIC_SYSRQ=y
105CONFIG_DEBUG_MEMORY_INIT=y
106CONFIG_DEBUG_STACKOVERFLOW=y
109CONFIG_DETECT_HUNG_TASK=y 107CONFIG_DETECT_HUNG_TASK=y
110CONFIG_SCHEDSTATS=y 108CONFIG_SCHEDSTATS=y
111CONFIG_TIMER_STATS=y 109CONFIG_TIMER_STATS=y
112CONFIG_DEBUG_INFO=y
113CONFIG_DEBUG_MEMORY_INIT=y
114CONFIG_DEBUG_STACKOVERFLOW=y
115# CONFIG_CRYPTO_ANSI_CPRNG is not set
116# CONFIG_CRYPTO_HW is not set 110# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index f36c047b33ca..735985974a31 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y
16CONFIG_AXS103=y 16CONFIG_AXS103=y
17CONFIG_ISA_ARCV2=y 17CONFIG_ISA_ARCV2=y
18CONFIG_SMP=y 18CONFIG_SMP=y
19# CONFIG_ARC_HAS_GRTC is not set 19# CONFIG_ARC_HAS_GFRC is not set
20CONFIG_ARC_UBOOT_SUPPORT=y 20CONFIG_ARC_UBOOT_SUPPORT=y
21CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" 21CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
22CONFIG_PREEMPT=y 22CONFIG_PREEMPT=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7fac7d85ed6a..f9f4c6f59fdb 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -10,7 +10,8 @@
10#define _ASM_ARC_ARCREGS_H 10#define _ASM_ARC_ARCREGS_H
11 11
12/* Build Configuration Registers */ 12/* Build Configuration Registers */
13#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */ 13#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
14#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
14#define ARC_REG_CRC_BCR 0x62 15#define ARC_REG_CRC_BCR 0x62
15#define ARC_REG_VECBASE_BCR 0x68 16#define ARC_REG_VECBASE_BCR 0x68
16#define ARC_REG_PERIBASE_BCR 0x69 17#define ARC_REG_PERIBASE_BCR 0x69
@@ -18,10 +19,10 @@
18#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */ 19#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
19#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */ 20#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
20#define ARC_REG_SLC_BCR 0xce 21#define ARC_REG_SLC_BCR 0xce
21#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ 22#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
22#define ARC_REG_TIMERS_BCR 0x75 23#define ARC_REG_TIMERS_BCR 0x75
23#define ARC_REG_AP_BCR 0x76 24#define ARC_REG_AP_BCR 0x76
24#define ARC_REG_ICCM_BCR 0x78 25#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
25#define ARC_REG_XY_MEM_BCR 0x79 26#define ARC_REG_XY_MEM_BCR 0x79
26#define ARC_REG_MAC_BCR 0x7a 27#define ARC_REG_MAC_BCR 0x7a
27#define ARC_REG_MUL_BCR 0x7b 28#define ARC_REG_MUL_BCR 0x7b
@@ -36,6 +37,7 @@
36#define ARC_REG_IRQ_BCR 0xF3 37#define ARC_REG_IRQ_BCR 0xF3
37#define ARC_REG_SMART_BCR 0xFF 38#define ARC_REG_SMART_BCR 0xFF
38#define ARC_REG_CLUSTER_BCR 0xcf 39#define ARC_REG_CLUSTER_BCR 0xcf
40#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
39 41
40/* status32 Bits Positions */ 42/* status32 Bits Positions */
41#define STATUS_AE_BIT 5 /* Exception active */ 43#define STATUS_AE_BIT 5 /* Exception active */
@@ -246,7 +248,7 @@ struct bcr_perip {
246#endif 248#endif
247}; 249};
248 250
249struct bcr_iccm { 251struct bcr_iccm_arcompact {
250#ifdef CONFIG_CPU_BIG_ENDIAN 252#ifdef CONFIG_CPU_BIG_ENDIAN
251 unsigned int base:16, pad:5, sz:3, ver:8; 253 unsigned int base:16, pad:5, sz:3, ver:8;
252#else 254#else
@@ -254,17 +256,15 @@ struct bcr_iccm {
254#endif 256#endif
255}; 257};
256 258
257/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */ 259struct bcr_iccm_arcv2 {
258struct bcr_dccm_base {
259#ifdef CONFIG_CPU_BIG_ENDIAN 260#ifdef CONFIG_CPU_BIG_ENDIAN
260 unsigned int addr:24, ver:8; 261 unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
261#else 262#else
262 unsigned int ver:8, addr:24; 263 unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
263#endif 264#endif
264}; 265};
265 266
266/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */ 267struct bcr_dccm_arcompact {
267struct bcr_dccm {
268#ifdef CONFIG_CPU_BIG_ENDIAN 268#ifdef CONFIG_CPU_BIG_ENDIAN
269 unsigned int res:21, sz:3, ver:8; 269 unsigned int res:21, sz:3, ver:8;
270#else 270#else
@@ -272,6 +272,14 @@ struct bcr_dccm {
272#endif 272#endif
273}; 273};
274 274
275struct bcr_dccm_arcv2 {
276#ifdef CONFIG_CPU_BIG_ENDIAN
277 unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
278#else
279 unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
280#endif
281};
282
275/* ARCompact: Both SP and DP FPU BCRs have same format */ 283/* ARCompact: Both SP and DP FPU BCRs have same format */
276struct bcr_fp_arcompact { 284struct bcr_fp_arcompact {
277#ifdef CONFIG_CPU_BIG_ENDIAN 285#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -315,9 +323,9 @@ struct bcr_bpu_arcv2 {
315 323
316struct bcr_generic { 324struct bcr_generic {
317#ifdef CONFIG_CPU_BIG_ENDIAN 325#ifdef CONFIG_CPU_BIG_ENDIAN
318 unsigned int pad:24, ver:8; 326 unsigned int info:24, ver:8;
319#else 327#else
320 unsigned int ver:8, pad:24; 328 unsigned int ver:8, info:24;
321#endif 329#endif
322}; 330};
323 331
@@ -349,14 +357,13 @@ struct cpuinfo_arc {
349 struct cpuinfo_arc_bpu bpu; 357 struct cpuinfo_arc_bpu bpu;
350 struct bcr_identity core; 358 struct bcr_identity core;
351 struct bcr_isa isa; 359 struct bcr_isa isa;
352 struct bcr_timer timers;
353 unsigned int vec_base; 360 unsigned int vec_base;
354 struct cpuinfo_arc_ccm iccm, dccm; 361 struct cpuinfo_arc_ccm iccm, dccm;
355 struct { 362 struct {
356 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, 363 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
357 fpu_sp:1, fpu_dp:1, pad2:6, 364 fpu_sp:1, fpu_dp:1, pad2:6,
358 debug:1, ap:1, smart:1, rtt:1, pad3:4, 365 debug:1, ap:1, smart:1, rtt:1, pad3:4,
359 pad4:8; 366 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
360 } extn; 367 } extn;
361 struct bcr_mpy extn_mpy; 368 struct bcr_mpy extn_mpy;
362 struct bcr_extn_xymem extn_xymem; 369 struct bcr_extn_xymem extn_xymem;
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4fd7d62a6e30..49014f0ef36d 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,11 +16,9 @@
16#ifdef CONFIG_ISA_ARCOMPACT 16#ifdef CONFIG_ISA_ARCOMPACT
17#define TIMER0_IRQ 3 17#define TIMER0_IRQ 3
18#define TIMER1_IRQ 4 18#define TIMER1_IRQ 4
19#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
20#else 19#else
21#define TIMER0_IRQ 16 20#define TIMER0_IRQ 16
22#define TIMER1_IRQ 17 21#define TIMER1_IRQ 17
23#define IPI_IRQ 19
24#endif 22#endif
25 23
26#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 258b0e5ad332..37c2f751eebf 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -22,6 +22,7 @@
22#define AUX_IRQ_CTRL 0x00E 22#define AUX_IRQ_CTRL 0x00E
23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ 23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
24#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */ 24#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
25#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
25#define AUX_IRQ_PRIORITY 0x206 26#define AUX_IRQ_PRIORITY 0x206
26#define ICAUSE 0x40a 27#define ICAUSE 0x40a
27#define AUX_IRQ_SELECT 0x40b 28#define AUX_IRQ_SELECT 0x40b
@@ -30,8 +31,11 @@
30/* Was Intr taken in User Mode */ 31/* Was Intr taken in User Mode */
31#define AUX_IRQ_ACT_BIT_U 31 32#define AUX_IRQ_ACT_BIT_U 31
32 33
33/* 0 is highest level, but taken by FIRQs, if present in design */ 34/*
34#define ARCV2_IRQ_DEF_PRIO 0 35 * User space should be interruptable even by lowest prio interrupt
36 * Safe even if actual interrupt priorities is fewer or even one
37 */
38#define ARCV2_IRQ_DEF_PRIO 15
35 39
36/* seed value for status register */ 40/* seed value for status register */
37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ 41#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
@@ -112,6 +116,16 @@ static inline int arch_irqs_disabled(void)
112 return arch_irqs_disabled_flags(arch_local_save_flags()); 116 return arch_irqs_disabled_flags(arch_local_save_flags());
113} 117}
114 118
119static inline void arc_softirq_trigger(int irq)
120{
121 write_aux_reg(AUX_IRQ_HINT, irq);
122}
123
124static inline void arc_softirq_clear(int irq)
125{
126 write_aux_reg(AUX_IRQ_HINT, 0);
127}
128
115#else 129#else
116 130
117.macro IRQ_DISABLE scratch 131.macro IRQ_DISABLE scratch
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 46f4e5351b2a..847e3bbe387f 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -39,8 +39,8 @@ struct mcip_cmd {
39#define CMD_DEBUG_SET_MASK 0x34 39#define CMD_DEBUG_SET_MASK 0x34
40#define CMD_DEBUG_SET_SELECT 0x36 40#define CMD_DEBUG_SET_SELECT 0x36
41 41
42#define CMD_GRTC_READ_LO 0x42 42#define CMD_GFRC_READ_LO 0x42
43#define CMD_GRTC_READ_HI 0x43 43#define CMD_GFRC_READ_HI 0x43
44 44
45#define CMD_IDU_ENABLE 0x71 45#define CMD_IDU_ENABLE 0x71
46#define CMD_IDU_DISABLE 0x72 46#define CMD_IDU_DISABLE 0x72
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 57af2f05ae84..d426d4215513 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -179,37 +179,44 @@
179#define __S111 PAGE_U_X_W_R 179#define __S111 PAGE_U_X_W_R
180 180
181/**************************************************************** 181/****************************************************************
182 * Page Table Lookup split 182 * 2 tier (PGD:PTE) software page walker
183 * 183 *
184 * We implement 2 tier paging and since this is all software, we are free 184 * [31] 32 bit virtual address [0]
185 * to customize the span of a PGD / PTE entry to suit us
186 *
187 * 32 bit virtual address
188 * ------------------------------------------------------- 185 * -------------------------------------------------------
189 * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE | 186 * | | <------------ PGDIR_SHIFT ----------> |
187 * | | |
188 * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
190 * ------------------------------------------------------- 189 * -------------------------------------------------------
191 * | | | 190 * | | |
192 * | | --> off in page frame 191 * | | --> off in page frame
193 * | |
194 * | ---> index into Page Table 192 * | ---> index into Page Table
195 * |
196 * ----> index into Page Directory 193 * ----> index into Page Directory
194 *
195 * In a single page size configuration, only PAGE_SHIFT is fixed
196 * So both PGD and PTE sizing can be tweaked
197 * e.g. 8K page (PAGE_SHIFT 13) can have
198 * - PGDIR_SHIFT 21 -> 11:8:13 address split
199 * - PGDIR_SHIFT 24 -> 8:11:13 address split
200 *
201 * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
202 * so the sizing flexibility is gone.
197 */ 203 */
198 204
199#define BITS_IN_PAGE PAGE_SHIFT 205#if defined(CONFIG_ARC_HUGEPAGE_16M)
200 206#define PGDIR_SHIFT 24
201/* Optimal Sizing of Pg Tbl - based on MMU page size */ 207#elif defined(CONFIG_ARC_HUGEPAGE_2M)
202#if defined(CONFIG_ARC_PAGE_SIZE_8K) 208#define PGDIR_SHIFT 21
203#define BITS_FOR_PTE 8 /* 11:8:13 */ 209#else
204#elif defined(CONFIG_ARC_PAGE_SIZE_16K) 210/*
205#define BITS_FOR_PTE 8 /* 10:8:14 */ 211 * Only Normal page support so "hackable" (see comment above)
206#elif defined(CONFIG_ARC_PAGE_SIZE_4K) 212 * Default value provides 11:8:13 (8K), 11:9:12 (4K)
207#define BITS_FOR_PTE 9 /* 11:9:12 */ 213 */
214#define PGDIR_SHIFT 21
208#endif 215#endif
209 216
210#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) 217#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
218#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
211 219
212#define PGDIR_SHIFT (32 - BITS_FOR_PGD)
213#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ 220#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
214#define PGDIR_MASK (~(PGDIR_SIZE-1)) 221#define PGDIR_MASK (~(PGDIR_SIZE-1))
215 222
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cbfec79137bf..c1264607bbff 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
45VECTOR handle_interrupt ; (16) Timer0 45VECTOR handle_interrupt ; (16) Timer0
46VECTOR handle_interrupt ; unused (Timer1) 46VECTOR handle_interrupt ; unused (Timer1)
47VECTOR handle_interrupt ; unused (WDT) 47VECTOR handle_interrupt ; unused (WDT)
48VECTOR handle_interrupt ; (19) ICI (inter core interrupt) 48VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
49VECTOR handle_interrupt 49VECTOR handle_interrupt ; (20) perf Interrupt
50VECTOR handle_interrupt 50VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
51VECTOR handle_interrupt 51VECTOR handle_interrupt ; unused
52VECTOR handle_interrupt ; (23) End of fixed IRQs 52VECTOR handle_interrupt ; (23) unused
53# End of fixed IRQs
53 54
54.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8 55.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
55 VECTOR handle_interrupt 56 VECTOR handle_interrupt
@@ -211,7 +212,11 @@ debug_marker_syscall:
211; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig 212; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
212; entry was via Exception in DS which got preempted in kernel). 213; entry was via Exception in DS which got preempted in kernel).
213; 214;
214; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling 215; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
216;
217; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
218; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
219
215.Lintr_ret_to_delay_slot: 220.Lintr_ret_to_delay_slot:
216debug_marker_ds: 221debug_marker_ds:
217 222
@@ -222,18 +227,23 @@ debug_marker_ds:
222 ld r2, [sp, PT_ret] 227 ld r2, [sp, PT_ret]
223 ld r3, [sp, PT_status32] 228 ld r3, [sp, PT_status32]
224 229
230 ; STAT32 for Int return created from scratch
231 ; (No delay dlot, disable Further intr in trampoline)
232
225 bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK 233 bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
226 st r0, [sp, PT_status32] 234 st r0, [sp, PT_status32]
227 235
228 mov r1, .Lintr_ret_to_delay_slot_2 236 mov r1, .Lintr_ret_to_delay_slot_2
229 st r1, [sp, PT_ret] 237 st r1, [sp, PT_ret]
230 238
239 ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
231 st r2, [sp, 0] 240 st r2, [sp, 0]
232 st r3, [sp, 4] 241 st r3, [sp, 4]
233 242
234 b .Lisr_ret_fast_path 243 b .Lisr_ret_fast_path
235 244
236.Lintr_ret_to_delay_slot_2: 245.Lintr_ret_to_delay_slot_2:
246 ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
237 sub sp, sp, SZ_PT_REGS 247 sub sp, sp, SZ_PT_REGS
238 st r9, [sp, -4] 248 st r9, [sp, -4]
239 249
@@ -243,11 +253,19 @@ debug_marker_ds:
243 ld r9, [sp, 4] 253 ld r9, [sp, 4]
244 sr r9, [erstatus] 254 sr r9, [erstatus]
245 255
256 ; restore AUX_USER_SP if returning to U mode
257 bbit0 r9, STATUS_U_BIT, 1f
258 ld r9, [sp, PT_sp]
259 sr r9, [AUX_USER_SP]
260
2611:
246 ld r9, [sp, 8] 262 ld r9, [sp, 8]
247 sr r9, [erbta] 263 sr r9, [erbta]
248 264
249 ld r9, [sp, -4] 265 ld r9, [sp, -4]
250 add sp, sp, SZ_PT_REGS 266 add sp, sp, SZ_PT_REGS
267
268 ; return from pure kernel mode to delay slot
251 rtie 269 rtie
252 270
253END(ret_from_exception) 271END(ret_from_exception)
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 0394f9f61b46..942526322ae7 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -14,6 +14,8 @@
14#include <linux/irqchip.h> 14#include <linux/irqchip.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16 16
17static int irq_prio;
18
17/* 19/*
18 * Early Hardware specific Interrupt setup 20 * Early Hardware specific Interrupt setup
19 * -Called very early (start_kernel -> setup_arch -> setup_processor) 21 * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@ void arc_init_IRQ(void)
24{ 26{
25 unsigned int tmp; 27 unsigned int tmp;
26 28
29 struct irq_build {
30#ifdef CONFIG_CPU_BIG_ENDIAN
31 unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
32#else
33 unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
34#endif
35 } irq_bcr;
36
27 struct aux_irq_ctrl { 37 struct aux_irq_ctrl {
28#ifdef CONFIG_CPU_BIG_ENDIAN 38#ifdef CONFIG_CPU_BIG_ENDIAN
29 unsigned int res3:18, save_idx_regs:1, res2:1, 39 unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@ void arc_init_IRQ(void)
46 56
47 WRITE_AUX(AUX_IRQ_CTRL, ictrl); 57 WRITE_AUX(AUX_IRQ_CTRL, ictrl);
48 58
49 /* setup status32, don't enable intr yet as kernel doesn't want */
50 tmp = read_aux_reg(0xa);
51 tmp |= ISA_INIT_STATUS_BITS;
52 tmp &= ~STATUS_IE_MASK;
53 asm volatile("flag %0 \n"::"r"(tmp));
54
55 /* 59 /*
56 * ARCv2 core intc provides multiple interrupt priorities (upto 16). 60 * ARCv2 core intc provides multiple interrupt priorities (upto 16).
57 * Typical builds though have only two levels (0-high, 1-low) 61 * Typical builds though have only two levels (0-high, 1-low)
58 * Linux by default uses lower prio 1 for most irqs, reserving 0 for 62 * Linux by default uses lower prio 1 for most irqs, reserving 0 for
59 * NMI style interrupts in future (say perf) 63 * NMI style interrupts in future (say perf)
60 *
61 * Read the intc BCR to confirm that Linux default priority is avail
62 * in h/w
63 *
64 * Note:
65 * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
66 * is 0 based.
67 */ 64 */
68 tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF; 65
69 if (ARCV2_IRQ_DEF_PRIO > tmp) 66 READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
70 panic("Linux default irq prio incorrect\n"); 67
68 irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
69 pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
70 irq_prio + 1, irq_prio,
71 irq_bcr.firq ? " FIRQ (not used)":"");
72
73 /* setup status32, don't enable intr yet as kernel doesn't want */
74 tmp = read_aux_reg(0xa);
75 tmp |= STATUS_AD_MASK | (irq_prio << 1);
76 tmp &= ~STATUS_IE_MASK;
77 asm volatile("flag %0 \n"::"r"(tmp));
71} 78}
72 79
73static void arcv2_irq_mask(struct irq_data *data) 80static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data)
86{ 93{
87 /* set default priority */ 94 /* set default priority */
88 write_aux_reg(AUX_IRQ_SELECT, data->irq); 95 write_aux_reg(AUX_IRQ_SELECT, data->irq);
89 write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); 96 write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
90 97
91 /* 98 /*
92 * hw auto enables (linux unmask) all by default 99 * hw auto enables (linux unmask) all by default
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 06bcedf19b62..224d1c3aa9c4 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -81,9 +81,6 @@ static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
81{ 81{
82 switch (irq) { 82 switch (irq) {
83 case TIMER0_IRQ: 83 case TIMER0_IRQ:
84#ifdef CONFIG_SMP
85 case IPI_IRQ:
86#endif
87 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq); 84 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
88 break; 85 break;
89 default: 86 default:
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bd237acdf4f2..c41c364b926c 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,13 @@
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <asm/irqflags-arcv2.h>
14#include <asm/mcip.h> 15#include <asm/mcip.h>
15#include <asm/setup.h> 16#include <asm/setup.h>
16 17
18#define IPI_IRQ 19
19#define SOFTIRQ_IRQ 21
20
17static char smp_cpuinfo_buf[128]; 21static char smp_cpuinfo_buf[128];
18static int idu_detected; 22static int idu_detected;
19 23
@@ -22,6 +26,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
22static void mcip_setup_per_cpu(int cpu) 26static void mcip_setup_per_cpu(int cpu)
23{ 27{
24 smp_ipi_irq_setup(cpu, IPI_IRQ); 28 smp_ipi_irq_setup(cpu, IPI_IRQ);
29 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
25} 30}
26 31
27static void mcip_ipi_send(int cpu) 32static void mcip_ipi_send(int cpu)
@@ -29,46 +34,44 @@ static void mcip_ipi_send(int cpu)
29 unsigned long flags; 34 unsigned long flags;
30 int ipi_was_pending; 35 int ipi_was_pending;
31 36
37 /* ARConnect can only send IPI to others */
38 if (unlikely(cpu == raw_smp_processor_id())) {
39 arc_softirq_trigger(SOFTIRQ_IRQ);
40 return;
41 }
42
43 raw_spin_lock_irqsave(&mcip_lock, flags);
44
32 /* 45 /*
33 * NOTE: We must spin here if the other cpu hasn't yet 46 * If receiver already has a pending interrupt, elide sending this one.
34 * serviced a previous message. This can burn lots 47 * Linux cross core calling works well with concurrent IPIs
35 * of time, but we MUST follows this protocol or 48 * coalesced into one
36 * ipi messages can be lost!!! 49 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
37 * Also, we must release the lock in this loop because
38 * the other side may get to this same loop and not
39 * be able to ack -- thus causing deadlock.
40 */ 50 */
51 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
52 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
53 if (!ipi_was_pending)
54 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
41 55
42 do {
43 raw_spin_lock_irqsave(&mcip_lock, flags);
44 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
45 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
46 if (ipi_was_pending == 0)
47 break; /* break out but keep lock */
48 raw_spin_unlock_irqrestore(&mcip_lock, flags);
49 } while (1);
50
51 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
52 raw_spin_unlock_irqrestore(&mcip_lock, flags); 56 raw_spin_unlock_irqrestore(&mcip_lock, flags);
53
54#ifdef CONFIG_ARC_IPI_DBG
55 if (ipi_was_pending)
56 pr_info("IPI ACK delayed from cpu %d\n", cpu);
57#endif
58} 57}
59 58
60static void mcip_ipi_clear(int irq) 59static void mcip_ipi_clear(int irq)
61{ 60{
62 unsigned int cpu, c; 61 unsigned int cpu, c;
63 unsigned long flags; 62 unsigned long flags;
64 unsigned int __maybe_unused copy; 63
64 if (unlikely(irq == SOFTIRQ_IRQ)) {
65 arc_softirq_clear(irq);
66 return;
67 }
65 68
66 raw_spin_lock_irqsave(&mcip_lock, flags); 69 raw_spin_lock_irqsave(&mcip_lock, flags);
67 70
68 /* Who sent the IPI */ 71 /* Who sent the IPI */
69 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0); 72 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
70 73
71 copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */ 74 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
72 75
73 /* 76 /*
74 * In rare case, multiple concurrent IPIs sent to same target can 77 * In rare case, multiple concurrent IPIs sent to same target can
@@ -82,12 +85,6 @@ static void mcip_ipi_clear(int irq)
82 } while (cpu); 85 } while (cpu);
83 86
84 raw_spin_unlock_irqrestore(&mcip_lock, flags); 87 raw_spin_unlock_irqrestore(&mcip_lock, flags);
85
86#ifdef CONFIG_ARC_IPI_DBG
87 if (c != __ffs(copy))
88 pr_info("IPIs from %x coalesced to %x\n",
89 copy, raw_smp_processor_id());
90#endif
91} 88}
92 89
93static void mcip_probe_n_setup(void) 90static void mcip_probe_n_setup(void)
@@ -96,13 +93,13 @@ static void mcip_probe_n_setup(void)
96#ifdef CONFIG_CPU_BIG_ENDIAN 93#ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int pad3:8, 94 unsigned int pad3:8,
98 idu:1, llm:1, num_cores:6, 95 idu:1, llm:1, num_cores:6,
99 iocoh:1, grtc:1, dbg:1, pad2:1, 96 iocoh:1, gfrc:1, dbg:1, pad2:1,
100 msg:1, sem:1, ipi:1, pad:1, 97 msg:1, sem:1, ipi:1, pad:1,
101 ver:8; 98 ver:8;
102#else 99#else
103 unsigned int ver:8, 100 unsigned int ver:8,
104 pad:1, ipi:1, sem:1, msg:1, 101 pad:1, ipi:1, sem:1, msg:1,
105 pad2:1, dbg:1, grtc:1, iocoh:1, 102 pad2:1, dbg:1, gfrc:1, iocoh:1,
106 num_cores:6, llm:1, idu:1, 103 num_cores:6, llm:1, idu:1,
107 pad3:8; 104 pad3:8;
108#endif 105#endif
@@ -111,12 +108,13 @@ static void mcip_probe_n_setup(void)
111 READ_BCR(ARC_REG_MCIP_BCR, mp); 108 READ_BCR(ARC_REG_MCIP_BCR, mp);
112 109
113 sprintf(smp_cpuinfo_buf, 110 sprintf(smp_cpuinfo_buf,
114 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n", 111 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
115 mp.ver, mp.num_cores, 112 mp.ver, mp.num_cores,
116 IS_AVAIL1(mp.ipi, "IPI "), 113 IS_AVAIL1(mp.ipi, "IPI "),
117 IS_AVAIL1(mp.idu, "IDU "), 114 IS_AVAIL1(mp.idu, "IDU "),
115 IS_AVAIL1(mp.llm, "LLM "),
118 IS_AVAIL1(mp.dbg, "DEBUG "), 116 IS_AVAIL1(mp.dbg, "DEBUG "),
119 IS_AVAIL1(mp.grtc, "GRTC")); 117 IS_AVAIL1(mp.gfrc, "GFRC"));
120 118
121 idu_detected = mp.idu; 119 idu_detected = mp.idu;
122 120
@@ -125,8 +123,8 @@ static void mcip_probe_n_setup(void)
125 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); 123 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
126 } 124 }
127 125
128 if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) 126 if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
129 panic("kernel trying to use non-existent GRTC\n"); 127 panic("kernel trying to use non-existent GFRC\n");
130} 128}
131 129
132struct plat_smp_ops plat_smp_ops = { 130struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index e1b87444ea9a..cdc821df1809 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -42,9 +42,57 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
42 42
43struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 43struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
44 44
45static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
46{
47 if (is_isa_arcompact()) {
48 struct bcr_iccm_arcompact iccm;
49 struct bcr_dccm_arcompact dccm;
50
51 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
52 if (iccm.ver) {
53 cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
54 cpu->iccm.base_addr = iccm.base << 16;
55 }
56
57 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
58 if (dccm.ver) {
59 unsigned long base;
60 cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
61
62 base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
63 cpu->dccm.base_addr = base & ~0xF;
64 }
65 } else {
66 struct bcr_iccm_arcv2 iccm;
67 struct bcr_dccm_arcv2 dccm;
68 unsigned long region;
69
70 READ_BCR(ARC_REG_ICCM_BUILD, iccm);
71 if (iccm.ver) {
72 cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
73 if (iccm.sz00 == 0xF && iccm.sz01 > 0)
74 cpu->iccm.sz <<= iccm.sz01;
75
76 region = read_aux_reg(ARC_REG_AUX_ICCM);
77 cpu->iccm.base_addr = region & 0xF0000000;
78 }
79
80 READ_BCR(ARC_REG_DCCM_BUILD, dccm);
81 if (dccm.ver) {
82 cpu->dccm.sz = 256 << dccm.sz0;
83 if (dccm.sz0 == 0xF && dccm.sz1 > 0)
84 cpu->dccm.sz <<= dccm.sz1;
85
86 region = read_aux_reg(ARC_REG_AUX_DCCM);
87 cpu->dccm.base_addr = region & 0xF0000000;
88 }
89 }
90}
91
45static void read_arc_build_cfg_regs(void) 92static void read_arc_build_cfg_regs(void)
46{ 93{
47 struct bcr_perip uncached_space; 94 struct bcr_perip uncached_space;
95 struct bcr_timer timer;
48 struct bcr_generic bcr; 96 struct bcr_generic bcr;
49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 97 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
50 unsigned long perip_space; 98 unsigned long perip_space;
@@ -53,7 +101,11 @@ static void read_arc_build_cfg_regs(void)
53 READ_BCR(AUX_IDENTITY, cpu->core); 101 READ_BCR(AUX_IDENTITY, cpu->core);
54 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); 102 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
55 103
56 READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); 104 READ_BCR(ARC_REG_TIMERS_BCR, timer);
105 cpu->extn.timer0 = timer.t0;
106 cpu->extn.timer1 = timer.t1;
107 cpu->extn.rtc = timer.rtc;
108
57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 109 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
58 110
59 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 111 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -71,36 +123,11 @@ static void read_arc_build_cfg_regs(void)
71 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ 123 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
72 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; 124 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
73 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ 125 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
74
75 /* Note that we read the CCM BCRs independent of kernel config
76 * This is to catch the cases where user doesn't know that
77 * CCMs are present in hardware build
78 */
79 {
80 struct bcr_iccm iccm;
81 struct bcr_dccm dccm;
82 struct bcr_dccm_base dccm_base;
83 unsigned int bcr_32bit_val;
84
85 bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
86 if (bcr_32bit_val) {
87 iccm = *((struct bcr_iccm *)&bcr_32bit_val);
88 cpu->iccm.base_addr = iccm.base << 16;
89 cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
90 }
91
92 bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
93 if (bcr_32bit_val) {
94 dccm = *((struct bcr_dccm *)&bcr_32bit_val);
95 cpu->dccm.sz = 0x800 << (dccm.sz);
96
97 READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
98 cpu->dccm.base_addr = dccm_base.addr << 8;
99 }
100 }
101
102 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 126 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
103 127
128 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
129 read_decode_ccm_bcr(cpu);
130
104 read_decode_mmu_bcr(); 131 read_decode_mmu_bcr();
105 read_decode_cache_bcr(); 132 read_decode_cache_bcr();
106 133
@@ -208,9 +235,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
208 (unsigned int)(arc_get_core_freq() / 10000) % 100); 235 (unsigned int)(arc_get_core_freq() / 10000) % 100);
209 236
210 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", 237 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
211 IS_AVAIL1(cpu->timers.t0, "Timer0 "), 238 IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
212 IS_AVAIL1(cpu->timers.t1, "Timer1 "), 239 IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
213 IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ", 240 IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
214 CONFIG_ARC_HAS_RTC)); 241 CONFIG_ARC_HAS_RTC));
215 242
216 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", 243 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -232,8 +259,6 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
232 259
233 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 260 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
234 } 261 }
235 n += scnprintf(buf + n, len - n, "%s",
236 IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
237 } 262 }
238 263
239 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 264 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
@@ -293,13 +318,13 @@ static void arc_chk_core_config(void)
293 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 318 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
294 int fpu_enabled; 319 int fpu_enabled;
295 320
296 if (!cpu->timers.t0) 321 if (!cpu->extn.timer0)
297 panic("Timer0 is not present!\n"); 322 panic("Timer0 is not present!\n");
298 323
299 if (!cpu->timers.t1) 324 if (!cpu->extn.timer1)
300 panic("Timer1 is not present!\n"); 325 panic("Timer1 is not present!\n");
301 326
302 if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc) 327 if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
303 panic("RTC is not present\n"); 328 panic("RTC is not present\n");
304 329
305#ifdef CONFIG_ARC_HAS_DCCM 330#ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +359,7 @@ static void arc_chk_core_config(void)
334 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); 359 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
335 360
336 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && 361 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
362 IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
337 !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) 363 !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
338 panic("llock/scond livelock workaround missing\n"); 364 panic("llock/scond livelock workaround missing\n");
339} 365}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index ef6e9e15b82a..424e937da5c8 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -336,11 +336,8 @@ irqreturn_t do_IPI(int irq, void *dev_id)
336 int rc; 336 int rc;
337 337
338 rc = __do_IPI(msg); 338 rc = __do_IPI(msg);
339#ifdef CONFIG_ARC_IPI_DBG
340 /* IPI received but no valid @msg */
341 if (rc) 339 if (rc)
342 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); 340 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
343#endif
344 pending &= ~(1U << msg); 341 pending &= ~(1U << msg);
345 } while (pending); 342 } while (pending);
346 343
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index dfad287f1db1..156d9833ff84 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -62,7 +62,7 @@
62 62
63/********** Clock Source Device *********/ 63/********** Clock Source Device *********/
64 64
65#ifdef CONFIG_ARC_HAS_GRTC 65#ifdef CONFIG_ARC_HAS_GFRC
66 66
67static int arc_counter_setup(void) 67static int arc_counter_setup(void)
68{ 68{
@@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs)
83 83
84 local_irq_save(flags); 84 local_irq_save(flags);
85 85
86 __mcip_cmd(CMD_GRTC_READ_LO, 0); 86 __mcip_cmd(CMD_GFRC_READ_LO, 0);
87 stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); 87 stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
88 88
89 __mcip_cmd(CMD_GRTC_READ_HI, 0); 89 __mcip_cmd(CMD_GFRC_READ_HI, 0);
90 stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); 90 stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
91 91
92 local_irq_restore(flags); 92 local_irq_restore(flags);
@@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
95} 95}
96 96
97static struct clocksource arc_counter = { 97static struct clocksource arc_counter = {
98 .name = "ARConnect GRTC", 98 .name = "ARConnect GFRC",
99 .rating = 400, 99 .rating = 400,
100 .read = arc_counter_read, 100 .read = arc_counter_read,
101 .mask = CLOCKSOURCE_MASK(64), 101 .mask = CLOCKSOURCE_MASK(64),
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f3db13d2d90e..0cc150b87b86 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -285,8 +285,10 @@
285 }; 285 };
286}; 286};
287 287
288
289/include/ "tps65217.dtsi"
290
288&tps { 291&tps {
289 compatible = "ti,tps65217";
290 /* 292 /*
291 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only 293 * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
292 * mode") at poweroff. Most BeagleBone versions do not support RTC-only 294 * mode") at poweroff. Most BeagleBone versions do not support RTC-only
@@ -307,17 +309,12 @@
307 ti,pmic-shutdown-controller; 309 ti,pmic-shutdown-controller;
308 310
309 regulators { 311 regulators {
310 #address-cells = <1>;
311 #size-cells = <0>;
312
313 dcdc1_reg: regulator@0 { 312 dcdc1_reg: regulator@0 {
314 reg = <0>;
315 regulator-name = "vdds_dpr"; 313 regulator-name = "vdds_dpr";
316 regulator-always-on; 314 regulator-always-on;
317 }; 315 };
318 316
319 dcdc2_reg: regulator@1 { 317 dcdc2_reg: regulator@1 {
320 reg = <1>;
321 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 318 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
322 regulator-name = "vdd_mpu"; 319 regulator-name = "vdd_mpu";
323 regulator-min-microvolt = <925000>; 320 regulator-min-microvolt = <925000>;
@@ -327,7 +324,6 @@
327 }; 324 };
328 325
329 dcdc3_reg: regulator@2 { 326 dcdc3_reg: regulator@2 {
330 reg = <2>;
331 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 327 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
332 regulator-name = "vdd_core"; 328 regulator-name = "vdd_core";
333 regulator-min-microvolt = <925000>; 329 regulator-min-microvolt = <925000>;
@@ -337,25 +333,21 @@
337 }; 333 };
338 334
339 ldo1_reg: regulator@3 { 335 ldo1_reg: regulator@3 {
340 reg = <3>;
341 regulator-name = "vio,vrtc,vdds"; 336 regulator-name = "vio,vrtc,vdds";
342 regulator-always-on; 337 regulator-always-on;
343 }; 338 };
344 339
345 ldo2_reg: regulator@4 { 340 ldo2_reg: regulator@4 {
346 reg = <4>;
347 regulator-name = "vdd_3v3aux"; 341 regulator-name = "vdd_3v3aux";
348 regulator-always-on; 342 regulator-always-on;
349 }; 343 };
350 344
351 ldo3_reg: regulator@5 { 345 ldo3_reg: regulator@5 {
352 reg = <5>;
353 regulator-name = "vdd_1v8"; 346 regulator-name = "vdd_1v8";
354 regulator-always-on; 347 regulator-always-on;
355 }; 348 };
356 349
357 ldo4_reg: regulator@6 { 350 ldo4_reg: regulator@6 {
358 reg = <6>;
359 regulator-name = "vdd_3v3a"; 351 regulator-name = "vdd_3v3a";
360 regulator-always-on; 352 regulator-always-on;
361 }; 353 };
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index fda457b07e15..857d9894103a 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -128,21 +128,16 @@
128 128
129}; 129};
130 130
131&tps { 131/include/ "tps65217.dtsi"
132 compatible = "ti,tps65217";
133 132
133&tps {
134 regulators { 134 regulators {
135 #address-cells = <1>;
136 #size-cells = <0>;
137
138 dcdc1_reg: regulator@0 { 135 dcdc1_reg: regulator@0 {
139 reg = <0>;
140 regulator-name = "vdds_dpr"; 136 regulator-name = "vdds_dpr";
141 regulator-always-on; 137 regulator-always-on;
142 }; 138 };
143 139
144 dcdc2_reg: regulator@1 { 140 dcdc2_reg: regulator@1 {
145 reg = <1>;
146 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 141 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
147 regulator-name = "vdd_mpu"; 142 regulator-name = "vdd_mpu";
148 regulator-min-microvolt = <925000>; 143 regulator-min-microvolt = <925000>;
@@ -152,7 +147,6 @@
152 }; 147 };
153 148
154 dcdc3_reg: regulator@2 { 149 dcdc3_reg: regulator@2 {
155 reg = <2>;
156 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 150 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
157 regulator-name = "vdd_core"; 151 regulator-name = "vdd_core";
158 regulator-min-microvolt = <925000>; 152 regulator-min-microvolt = <925000>;
@@ -162,28 +156,24 @@
162 }; 156 };
163 157
164 ldo1_reg: regulator@3 { 158 ldo1_reg: regulator@3 {
165 reg = <3>;
166 regulator-name = "vio,vrtc,vdds"; 159 regulator-name = "vio,vrtc,vdds";
167 regulator-boot-on; 160 regulator-boot-on;
168 regulator-always-on; 161 regulator-always-on;
169 }; 162 };
170 163
171 ldo2_reg: regulator@4 { 164 ldo2_reg: regulator@4 {
172 reg = <4>;
173 regulator-name = "vdd_3v3aux"; 165 regulator-name = "vdd_3v3aux";
174 regulator-boot-on; 166 regulator-boot-on;
175 regulator-always-on; 167 regulator-always-on;
176 }; 168 };
177 169
178 ldo3_reg: regulator@5 { 170 ldo3_reg: regulator@5 {
179 reg = <5>;
180 regulator-name = "vdd_1v8"; 171 regulator-name = "vdd_1v8";
181 regulator-boot-on; 172 regulator-boot-on;
182 regulator-always-on; 173 regulator-always-on;
183 }; 174 };
184 175
185 ldo4_reg: regulator@6 { 176 ldo4_reg: regulator@6 {
186 reg = <6>;
187 regulator-name = "vdd_3v3d"; 177 regulator-name = "vdd_3v3d";
188 regulator-boot-on; 178 regulator-boot-on;
189 regulator-always-on; 179 regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 77559a1ded60..f313999c503e 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -375,15 +375,11 @@
375 wp-gpios = <&gpio3 18 0>; 375 wp-gpios = <&gpio3 18 0>;
376}; 376};
377 377
378&tps { 378#include "tps65217.dtsi"
379 compatible = "ti,tps65217";
380 379
380&tps {
381 regulators { 381 regulators {
382 #address-cells = <1>;
383 #size-cells = <0>;
384
385 dcdc1_reg: regulator@0 { 382 dcdc1_reg: regulator@0 {
386 reg = <0>;
387 /* +1.5V voltage with ±4% tolerance */ 383 /* +1.5V voltage with ±4% tolerance */
388 regulator-min-microvolt = <1450000>; 384 regulator-min-microvolt = <1450000>;
389 regulator-max-microvolt = <1550000>; 385 regulator-max-microvolt = <1550000>;
@@ -392,7 +388,6 @@
392 }; 388 };
393 389
394 dcdc2_reg: regulator@1 { 390 dcdc2_reg: regulator@1 {
395 reg = <1>;
396 /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */ 391 /* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
397 regulator-name = "vdd_mpu"; 392 regulator-name = "vdd_mpu";
398 regulator-min-microvolt = <915000>; 393 regulator-min-microvolt = <915000>;
@@ -402,7 +397,6 @@
402 }; 397 };
403 398
404 dcdc3_reg: regulator@2 { 399 dcdc3_reg: regulator@2 {
405 reg = <2>;
406 /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */ 400 /* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
407 regulator-name = "vdd_core"; 401 regulator-name = "vdd_core";
408 regulator-min-microvolt = <915000>; 402 regulator-min-microvolt = <915000>;
@@ -412,7 +406,6 @@
412 }; 406 };
413 407
414 ldo1_reg: regulator@3 { 408 ldo1_reg: regulator@3 {
415 reg = <3>;
416 /* +1.8V voltage with ±4% tolerance */ 409 /* +1.8V voltage with ±4% tolerance */
417 regulator-min-microvolt = <1750000>; 410 regulator-min-microvolt = <1750000>;
418 regulator-max-microvolt = <1870000>; 411 regulator-max-microvolt = <1870000>;
@@ -421,7 +414,6 @@
421 }; 414 };
422 415
423 ldo2_reg: regulator@4 { 416 ldo2_reg: regulator@4 {
424 reg = <4>;
425 /* +3.3V voltage with ±4% tolerance */ 417 /* +3.3V voltage with ±4% tolerance */
426 regulator-min-microvolt = <3175000>; 418 regulator-min-microvolt = <3175000>;
427 regulator-max-microvolt = <3430000>; 419 regulator-max-microvolt = <3430000>;
@@ -430,7 +422,6 @@
430 }; 422 };
431 423
432 ldo3_reg: regulator@5 { 424 ldo3_reg: regulator@5 {
433 reg = <5>;
434 /* +1.8V voltage with ±4% tolerance */ 425 /* +1.8V voltage with ±4% tolerance */
435 regulator-min-microvolt = <1750000>; 426 regulator-min-microvolt = <1750000>;
436 regulator-max-microvolt = <1870000>; 427 regulator-max-microvolt = <1870000>;
@@ -439,7 +430,6 @@
439 }; 430 };
440 431
441 ldo4_reg: regulator@6 { 432 ldo4_reg: regulator@6 {
442 reg = <6>;
443 /* +3.3V voltage with ±4% tolerance */ 433 /* +3.3V voltage with ±4% tolerance */
444 regulator-min-microvolt = <3175000>; 434 regulator-min-microvolt = <3175000>;
445 regulator-max-microvolt = <3430000>; 435 regulator-max-microvolt = <3430000>;
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 471a3a70ea1f..8867aaaec54d 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -420,9 +420,9 @@
420 vin-supply = <&vbat>; 420 vin-supply = <&vbat>;
421}; 421};
422 422
423&tps { 423/include/ "tps65217.dtsi"
424 compatible = "ti,tps65217";
425 424
425&tps {
426 backlight { 426 backlight {
427 isel = <1>; /* ISET1 */ 427 isel = <1>; /* ISET1 */
428 fdim = <200>; /* TPS65217_BL_FDIM_200HZ */ 428 fdim = <200>; /* TPS65217_BL_FDIM_200HZ */
@@ -430,17 +430,12 @@
430 }; 430 };
431 431
432 regulators { 432 regulators {
433 #address-cells = <1>;
434 #size-cells = <0>;
435
436 dcdc1_reg: regulator@0 { 433 dcdc1_reg: regulator@0 {
437 reg = <0>;
438 /* VDD_1V8 system supply */ 434 /* VDD_1V8 system supply */
439 regulator-always-on; 435 regulator-always-on;
440 }; 436 };
441 437
442 dcdc2_reg: regulator@1 { 438 dcdc2_reg: regulator@1 {
443 reg = <1>;
444 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ 439 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
445 regulator-name = "vdd_core"; 440 regulator-name = "vdd_core";
446 regulator-min-microvolt = <925000>; 441 regulator-min-microvolt = <925000>;
@@ -450,7 +445,6 @@
450 }; 445 };
451 446
452 dcdc3_reg: regulator@2 { 447 dcdc3_reg: regulator@2 {
453 reg = <2>;
454 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ 448 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
455 regulator-name = "vdd_mpu"; 449 regulator-name = "vdd_mpu";
456 regulator-min-microvolt = <925000>; 450 regulator-min-microvolt = <925000>;
@@ -460,21 +454,18 @@
460 }; 454 };
461 455
462 ldo1_reg: regulator@3 { 456 ldo1_reg: regulator@3 {
463 reg = <3>;
464 /* VRTC 1.8V always-on supply */ 457 /* VRTC 1.8V always-on supply */
465 regulator-name = "vrtc,vdds"; 458 regulator-name = "vrtc,vdds";
466 regulator-always-on; 459 regulator-always-on;
467 }; 460 };
468 461
469 ldo2_reg: regulator@4 { 462 ldo2_reg: regulator@4 {
470 reg = <4>;
471 /* 3.3V rail */ 463 /* 3.3V rail */
472 regulator-name = "vdd_3v3aux"; 464 regulator-name = "vdd_3v3aux";
473 regulator-always-on; 465 regulator-always-on;
474 }; 466 };
475 467
476 ldo3_reg: regulator@5 { 468 ldo3_reg: regulator@5 {
477 reg = <5>;
478 /* VDD_3V3A 3.3V rail */ 469 /* VDD_3V3A 3.3V rail */
479 regulator-name = "vdd_3v3a"; 470 regulator-name = "vdd_3v3a";
480 regulator-min-microvolt = <3300000>; 471 regulator-min-microvolt = <3300000>;
@@ -482,7 +473,6 @@
482 }; 473 };
483 474
484 ldo4_reg: regulator@6 { 475 ldo4_reg: regulator@6 {
485 reg = <6>;
486 /* VDD_3V3B 3.3V rail */ 476 /* VDD_3V3B 3.3V rail */
487 regulator-name = "vdd_3v3b"; 477 regulator-name = "vdd_3v3b";
488 regulator-always-on; 478 regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 1b5b044fcd91..865de8500f1c 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -46,7 +46,7 @@
46 gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; 46 gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
47 linux,code = <KEY_BACK>; 47 linux,code = <KEY_BACK>;
48 debounce-interval = <1000>; 48 debounce-interval = <1000>;
49 gpio-key,wakeup; 49 wakeup-source;
50 }; 50 };
51 51
52 front_button { 52 front_button {
@@ -54,7 +54,7 @@
54 gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>; 54 gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
55 linux,code = <KEY_FRONT>; 55 linux,code = <KEY_FRONT>;
56 debounce-interval = <1000>; 56 debounce-interval = <1000>;
57 gpio-key,wakeup; 57 wakeup-source;
58 }; 58 };
59 }; 59 };
60 60
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index d38edfa53bb9..3303c281697b 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -375,19 +375,16 @@
375 pinctrl-0 = <&uart4_pins>; 375 pinctrl-0 = <&uart4_pins>;
376}; 376};
377 377
378#include "tps65217.dtsi"
379
378&tps { 380&tps {
379 compatible = "ti,tps65217";
380 ti,pmic-shutdown-controller; 381 ti,pmic-shutdown-controller;
381 382
382 interrupt-parent = <&intc>; 383 interrupt-parent = <&intc>;
383 interrupts = <7>; /* NNMI */ 384 interrupts = <7>; /* NNMI */
384 385
385 regulators { 386 regulators {
386 #address-cells = <1>;
387 #size-cells = <0>;
388
389 dcdc1_reg: regulator@0 { 387 dcdc1_reg: regulator@0 {
390 reg = <0>;
391 /* VDDS_DDR */ 388 /* VDDS_DDR */
392 regulator-min-microvolt = <1500000>; 389 regulator-min-microvolt = <1500000>;
393 regulator-max-microvolt = <1500000>; 390 regulator-max-microvolt = <1500000>;
@@ -395,7 +392,6 @@
395 }; 392 };
396 393
397 dcdc2_reg: regulator@1 { 394 dcdc2_reg: regulator@1 {
398 reg = <1>;
399 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 395 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
400 regulator-name = "vdd_mpu"; 396 regulator-name = "vdd_mpu";
401 regulator-min-microvolt = <925000>; 397 regulator-min-microvolt = <925000>;
@@ -405,7 +401,6 @@
405 }; 401 };
406 402
407 dcdc3_reg: regulator@2 { 403 dcdc3_reg: regulator@2 {
408 reg = <2>;
409 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */ 404 /* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
410 regulator-name = "vdd_core"; 405 regulator-name = "vdd_core";
411 regulator-min-microvolt = <925000>; 406 regulator-min-microvolt = <925000>;
@@ -415,7 +410,6 @@
415 }; 410 };
416 411
417 ldo1_reg: regulator@3 { 412 ldo1_reg: regulator@3 {
418 reg = <3>;
419 /* VRTC / VIO / VDDS*/ 413 /* VRTC / VIO / VDDS*/
420 regulator-always-on; 414 regulator-always-on;
421 regulator-min-microvolt = <1800000>; 415 regulator-min-microvolt = <1800000>;
@@ -423,7 +417,6 @@
423 }; 417 };
424 418
425 ldo2_reg: regulator@4 { 419 ldo2_reg: regulator@4 {
426 reg = <4>;
427 /* VDD_3V3AUX */ 420 /* VDD_3V3AUX */
428 regulator-always-on; 421 regulator-always-on;
429 regulator-min-microvolt = <3300000>; 422 regulator-min-microvolt = <3300000>;
@@ -431,7 +424,6 @@
431 }; 424 };
432 425
433 ldo3_reg: regulator@5 { 426 ldo3_reg: regulator@5 {
434 reg = <5>;
435 /* VDD_1V8 */ 427 /* VDD_1V8 */
436 regulator-min-microvolt = <1800000>; 428 regulator-min-microvolt = <1800000>;
437 regulator-max-microvolt = <1800000>; 429 regulator-max-microvolt = <1800000>;
@@ -439,7 +431,6 @@
439 }; 431 };
440 432
441 ldo4_reg: regulator@6 { 433 ldo4_reg: regulator@6 {
442 reg = <6>;
443 /* VDD_3V3A */ 434 /* VDD_3V3A */
444 regulator-min-microvolt = <3300000>; 435 regulator-min-microvolt = <3300000>;
445 regulator-max-microvolt = <3300000>; 436 regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 36c0fa6c362a..a0986c65be0c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -173,6 +173,8 @@
173 173
174 sound0_master: simple-audio-card,codec { 174 sound0_master: simple-audio-card,codec {
175 sound-dai = <&tlv320aic3104>; 175 sound-dai = <&tlv320aic3104>;
176 assigned-clocks = <&clkoutmux2_clk_mux>;
177 assigned-clock-parents = <&sys_clk2_dclk_div>;
176 clocks = <&clkout2_clk>; 178 clocks = <&clkout2_clk>;
177 }; 179 };
178 }; 180 };
@@ -796,6 +798,8 @@
796 pinctrl-names = "default", "sleep"; 798 pinctrl-names = "default", "sleep";
797 pinctrl-0 = <&mcasp3_pins_default>; 799 pinctrl-0 = <&mcasp3_pins_default>;
798 pinctrl-1 = <&mcasp3_pins_sleep>; 800 pinctrl-1 = <&mcasp3_pins_sleep>;
801 assigned-clocks = <&mcasp3_ahclkx_mux>;
802 assigned-clock-parents = <&sys_clkin2>;
799 status = "okay"; 803 status = "okay";
800 804
801 op-mode = <0>; /* MCASP_IIS_MODE */ 805 op-mode = <0>; /* MCASP_IIS_MODE */
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 8d93882dc8d5..1c06cb76da07 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -545,7 +545,7 @@
545 ti,debounce-tol = /bits/ 16 <10>; 545 ti,debounce-tol = /bits/ 16 <10>;
546 ti,debounce-rep = /bits/ 16 <1>; 546 ti,debounce-rep = /bits/ 16 <1>;
547 547
548 linux,wakeup; 548 wakeup-source;
549 }; 549 };
550}; 550};
551 551
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4f6ae921656f..f74d3db4846d 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -896,7 +896,6 @@
896 #size-cells = <1>; 896 #size-cells = <1>;
897 reg = <0x2100000 0x10000>; 897 reg = <0x2100000 0x10000>;
898 ranges = <0 0x2100000 0x10000>; 898 ranges = <0 0x2100000 0x10000>;
899 interrupt-parent = <&intc>;
900 clocks = <&clks IMX6QDL_CLK_CAAM_MEM>, 899 clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
901 <&clks IMX6QDL_CLK_CAAM_ACLK>, 900 <&clks IMX6QDL_CLK_CAAM_ACLK>,
902 <&clks IMX6QDL_CLK_CAAM_IPG>, 901 <&clks IMX6QDL_CLK_CAAM_IPG>,
diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts
index bf4143c6cb8f..b84af3da8c84 100644
--- a/arch/arm/boot/dts/kirkwood-ds112.dts
+++ b/arch/arm/boot/dts/kirkwood-ds112.dts
@@ -14,7 +14,7 @@
14#include "kirkwood-synology.dtsi" 14#include "kirkwood-synology.dtsi"
15 15
16/ { 16/ {
17 model = "Synology DS111"; 17 model = "Synology DS112";
18 compatible = "synology,ds111", "marvell,kirkwood"; 18 compatible = "synology,ds111", "marvell,kirkwood";
19 19
20 memory { 20 memory {
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
index 420788229e6f..aae8a7aceab7 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -228,6 +228,37 @@
228 }; 228 };
229}; 229};
230 230
231&devbus_bootcs {
232 status = "okay";
233 devbus,keep-config;
234
235 flash@0 {
236 compatible = "jedec-flash";
237 reg = <0 0x40000>;
238 bank-width = <1>;
239
240 partitions {
241 compatible = "fixed-partitions";
242 #address-cells = <1>;
243 #size-cells = <1>;
244
245 header@0 {
246 reg = <0 0x30000>;
247 read-only;
248 };
249
250 uboot@30000 {
251 reg = <0x30000 0xF000>;
252 read-only;
253 };
254
255 uboot_env@3F000 {
256 reg = <0x3F000 0x1000>;
257 };
258 };
259 };
260};
261
231&mdio { 262&mdio {
232 status = "okay"; 263 status = "okay";
233 264
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1afe24629d1f..b0c912feaa2f 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -90,7 +90,7 @@
90#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2) 90#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
91#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1) 91#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
92#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2) 92#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
93#define PIN_PA15 14 93#define PIN_PA15 15
94#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0) 94#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
95#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1) 95#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
96#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1) 96#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
new file mode 100644
index 000000000000..a63272422d76
--- /dev/null
+++ b/arch/arm/boot/dts/tps65217.dtsi
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Integrated Power Management Chip
11 * http://www.ti.com/lit/ds/symlink/tps65217.pdf
12 */
13
14&tps {
15 compatible = "ti,tps65217";
16
17 regulators {
18 #address-cells = <1>;
19 #size-cells = <0>;
20
21 dcdc1_reg: regulator@0 {
22 reg = <0>;
23 regulator-compatible = "dcdc1";
24 };
25
26 dcdc2_reg: regulator@1 {
27 reg = <1>;
28 regulator-compatible = "dcdc2";
29 };
30
31 dcdc3_reg: regulator@2 {
32 reg = <2>;
33 regulator-compatible = "dcdc3";
34 };
35
36 ldo1_reg: regulator@3 {
37 reg = <3>;
38 regulator-compatible = "ldo1";
39 };
40
41 ldo2_reg: regulator@4 {
42 reg = <4>;
43 regulator-compatible = "ldo2";
44 };
45
46 ldo3_reg: regulator@5 {
47 reg = <5>;
48 regulator-compatible = "ldo3";
49 };
50
51 ldo4_reg: regulator@6 {
52 reg = <6>;
53 regulator-compatible = "ldo4";
54 };
55 };
56};
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 2dc6da70ae59..d7ed252708c5 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -16,7 +16,7 @@
16 */ 16 */
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19 19#include <asm/div64.h>
20#include <asm/hardware/icst.h> 20#include <asm/hardware/icst.h>
21 21
22/* 22/*
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
29 29
30unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) 30unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
31{ 31{
32 return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]); 32 u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
33 u32 divisor = (vco.r + 2) * p->s2div[vco.s];
34
35 do_div(dividend, divisor);
36 return (unsigned long)dividend;
33} 37}
34 38
35EXPORT_SYMBOL(icst_hz); 39EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
58 62
59 if (f > p->vco_min && f <= p->vco_max) 63 if (f > p->vco_min && f <= p->vco_max)
60 break; 64 break;
65 i++;
61 } while (i < 8); 66 } while (i < 8);
62 67
63 if (i >= 8) 68 if (i >= 8)
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a7151744b85c..d18d6b42fcf5 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -292,24 +292,23 @@ CONFIG_FB=y
292CONFIG_FIRMWARE_EDID=y 292CONFIG_FIRMWARE_EDID=y
293CONFIG_FB_MODE_HELPERS=y 293CONFIG_FB_MODE_HELPERS=y
294CONFIG_FB_TILEBLITTING=y 294CONFIG_FB_TILEBLITTING=y
295CONFIG_OMAP2_DSS=m 295CONFIG_FB_OMAP5_DSS_HDMI=y
296CONFIG_OMAP5_DSS_HDMI=y 296CONFIG_FB_OMAP2_DSS_SDI=y
297CONFIG_OMAP2_DSS_SDI=y 297CONFIG_FB_OMAP2_DSS_DSI=y
298CONFIG_OMAP2_DSS_DSI=y
299CONFIG_FB_OMAP2=m 298CONFIG_FB_OMAP2=m
300CONFIG_DISPLAY_ENCODER_TFP410=m 299CONFIG_FB_OMAP2_ENCODER_TFP410=m
301CONFIG_DISPLAY_ENCODER_TPD12S015=m 300CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
302CONFIG_DISPLAY_CONNECTOR_DVI=m 301CONFIG_FB_OMAP2_CONNECTOR_DVI=m
303CONFIG_DISPLAY_CONNECTOR_HDMI=m 302CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
304CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m 303CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
305CONFIG_DISPLAY_PANEL_DPI=m 304CONFIG_FB_OMAP2_PANEL_DPI=m
306CONFIG_DISPLAY_PANEL_DSI_CM=m 305CONFIG_FB_OMAP2_PANEL_DSI_CM=m
307CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m 306CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
308CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m 307CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
309CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m 308CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
310CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m 309CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
311CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m 310CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
312CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m 311CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
313CONFIG_BACKLIGHT_LCD_SUPPORT=y 312CONFIG_BACKLIGHT_LCD_SUPPORT=y
314CONFIG_LCD_CLASS_DEVICE=y 313CONFIG_LCD_CLASS_DEVICE=y
315CONFIG_LCD_PLATFORM=y 314CONFIG_LCD_PLATFORM=y
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b445a5d56f43..89a3a3e592d6 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -364,7 +364,7 @@ static struct crypto_alg aes_algs[] = { {
364 .cra_blkcipher = { 364 .cra_blkcipher = {
365 .min_keysize = AES_MIN_KEY_SIZE, 365 .min_keysize = AES_MIN_KEY_SIZE,
366 .max_keysize = AES_MAX_KEY_SIZE, 366 .max_keysize = AES_MAX_KEY_SIZE,
367 .ivsize = AES_BLOCK_SIZE, 367 .ivsize = 0,
368 .setkey = ce_aes_setkey, 368 .setkey = ce_aes_setkey,
369 .encrypt = ecb_encrypt, 369 .encrypt = ecb_encrypt,
370 .decrypt = ecb_decrypt, 370 .decrypt = ecb_decrypt,
@@ -441,7 +441,7 @@ static struct crypto_alg aes_algs[] = { {
441 .cra_ablkcipher = { 441 .cra_ablkcipher = {
442 .min_keysize = AES_MIN_KEY_SIZE, 442 .min_keysize = AES_MIN_KEY_SIZE,
443 .max_keysize = AES_MAX_KEY_SIZE, 443 .max_keysize = AES_MAX_KEY_SIZE,
444 .ivsize = AES_BLOCK_SIZE, 444 .ivsize = 0,
445 .setkey = ablk_set_key, 445 .setkey = ablk_set_key,
446 .encrypt = ablk_encrypt, 446 .encrypt = ablk_encrypt,
447 .decrypt = ablk_decrypt, 447 .decrypt = ablk_decrypt,
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 7da5503c0591..e08d15184056 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void)
117 u32 irqstat; 117 u32 irqstat;
118 118
119 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat)); 119 asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
120 dsb(sy);
120 return irqstat; 121 return irqstat;
121} 122}
122 123
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 0375c8caa061..9408a994cc91 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 dma_addr_t dev_addr, unsigned long offset, size_t size, 35 dma_addr_t dev_addr, unsigned long offset, size_t size,
36 enum dma_data_direction dir, struct dma_attrs *attrs) 36 enum dma_data_direction dir, struct dma_attrs *attrs)
37{ 37{
38 bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page); 38 unsigned long page_pfn = page_to_xen_pfn(page);
39 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
40 unsigned long compound_pages =
41 (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
42 bool local = (page_pfn <= dev_pfn) &&
43 (dev_pfn - page_pfn < compound_pages);
44
39 /* 45 /*
40 * Dom0 is mapped 1:1, while the Linux page can be spanned accross 46 * Dom0 is mapped 1:1, while the Linux page can span across
41 * multiple Xen page, it's not possible to have a mix of local and 47 * multiple Xen pages, it's not possible for it to contain a
42 * foreign Xen page. So if the first xen_pfn == mfn the page is local 48 * mix of local and foreign Xen pages. So if the first xen_pfn
43 * otherwise it's a foreign page grant-mapped in dom0. If the page is 49 * == mfn the page is local otherwise it's a foreign page
44 * local we can safely call the native dma_ops function, otherwise we 50 * grant-mapped in dom0. If the page is local we can safely
45 * call the xen specific function. 51 * call the native dma_ops function, otherwise we call the xen
52 * specific function.
46 */ 53 */
47 if (local) 54 if (local)
48 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 55 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 7f33b2056ae6..0f6600f05137 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -206,7 +206,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
206 run->mmio.is_write = is_write; 206 run->mmio.is_write = is_write;
207 run->mmio.phys_addr = fault_ipa; 207 run->mmio.phys_addr = fault_ipa;
208 run->mmio.len = len; 208 run->mmio.len = len;
209 memcpy(run->mmio.data, data_buf, len); 209 if (is_write)
210 memcpy(run->mmio.data, data_buf, len);
210 211
211 if (!ret) { 212 if (!ret) {
212 /* We handled the access successfully in the kernel. */ 213 /* We handled the access successfully in the kernel. */
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 809827265fb3..bab814d2f37d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -18,6 +18,7 @@
18 18
19#include <asm/setup.h> 19#include <asm/setup.h>
20#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
21#include <asm/system_info.h>
21 22
22#include "common.h" 23#include "common.h"
23 24
@@ -77,12 +78,31 @@ static const char *const n900_boards_compat[] __initconst = {
77 NULL, 78 NULL,
78}; 79};
79 80
81/* Set system_rev from atags */
82static void __init rx51_set_system_rev(const struct tag *tags)
83{
84 const struct tag *tag;
85
86 if (tags->hdr.tag != ATAG_CORE)
87 return;
88
89 for_each_tag(tag, tags) {
90 if (tag->hdr.tag == ATAG_REVISION) {
91 system_rev = tag->u.revision.rev;
92 break;
93 }
94 }
95}
96
80/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags, 97/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
81 * save them while the data is still not overwritten 98 * save them while the data is still not overwritten
82 */ 99 */
83static void __init rx51_reserve(void) 100static void __init rx51_reserve(void)
84{ 101{
85 save_atags((const struct tag *)(PAGE_OFFSET + 0x100)); 102 const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100);
103
104 save_atags(tags);
105 rx51_set_system_rev(tags);
86 omap_reserve(); 106 omap_reserve();
87} 107}
88 108
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7b76ce01c21d..8633c703546a 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
101 101
102static void set_onenand_cfg(void __iomem *onenand_base) 102static void set_onenand_cfg(void __iomem *onenand_base)
103{ 103{
104 u32 reg; 104 u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
105 105
106 reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
107 reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
108 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) | 106 reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
109 ONENAND_SYS_CFG1_BL_16; 107 ONENAND_SYS_CFG1_BL_16;
110 if (onenand_flags & ONENAND_FLAG_SYNCREAD) 108 if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
123 reg |= ONENAND_SYS_CFG1_VHF; 121 reg |= ONENAND_SYS_CFG1_VHF;
124 else 122 else
125 reg &= ~ONENAND_SYS_CFG1_VHF; 123 reg &= ~ONENAND_SYS_CFG1_VHF;
124
126 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1); 125 writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
127} 126}
128 127
@@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
289 } 288 }
290 } 289 }
291 290
291 onenand_async.sync_write = true;
292 omap2_onenand_calc_async_timings(&t); 292 omap2_onenand_calc_async_timings(&t);
293 293
294 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async); 294 ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 0437537751bc..f7ff3b9dad87 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -191,12 +191,22 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
191{ 191{
192 struct platform_device *pdev = to_platform_device(dev); 192 struct platform_device *pdev = to_platform_device(dev);
193 struct omap_device *od; 193 struct omap_device *od;
194 int err;
194 195
195 switch (event) { 196 switch (event) {
196 case BUS_NOTIFY_DEL_DEVICE: 197 case BUS_NOTIFY_DEL_DEVICE:
197 if (pdev->archdata.od) 198 if (pdev->archdata.od)
198 omap_device_delete(pdev->archdata.od); 199 omap_device_delete(pdev->archdata.od);
199 break; 200 break;
201 case BUS_NOTIFY_UNBOUND_DRIVER:
202 od = to_omap_device(pdev);
203 if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) {
204 dev_info(dev, "enabled after unload, idling\n");
205 err = omap_device_idle(pdev);
206 if (err)
207 dev_err(dev, "failed to idle\n");
208 }
209 break;
200 case BUS_NOTIFY_ADD_DEVICE: 210 case BUS_NOTIFY_ADD_DEVICE:
201 if (pdev->dev.of_node) 211 if (pdev->dev.of_node)
202 omap_device_build_from_dt(pdev); 212 omap_device_build_from_dt(pdev);
@@ -602,8 +612,10 @@ static int _od_runtime_resume(struct device *dev)
602 int ret; 612 int ret;
603 613
604 ret = omap_device_enable(pdev); 614 ret = omap_device_enable(pdev);
605 if (ret) 615 if (ret) {
616 dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
606 return ret; 617 return ret;
618 }
607 619
608 return pm_generic_runtime_resume(dev); 620 return pm_generic_runtime_resume(dev);
609} 621}
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 9cb11215ceba..b3a4ed5289ec 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -4,7 +4,6 @@
4extern void shmobile_init_delay(void); 4extern void shmobile_init_delay(void);
5extern void shmobile_boot_vector(void); 5extern void shmobile_boot_vector(void);
6extern unsigned long shmobile_boot_fn; 6extern unsigned long shmobile_boot_fn;
7extern unsigned long shmobile_boot_arg;
8extern unsigned long shmobile_boot_size; 7extern unsigned long shmobile_boot_size;
9extern void shmobile_smp_boot(void); 8extern void shmobile_smp_boot(void);
10extern void shmobile_smp_sleep(void); 9extern void shmobile_smp_sleep(void);
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index fa5248c52399..5e503d91ad70 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
38 38
39 b secondary_startup 39 b secondary_startup
40ENDPROC(shmobile_boot_scu) 40ENDPROC(shmobile_boot_scu)
41
42 .text
43 .align 2
44 .globl shmobile_scu_base
45shmobile_scu_base:
46 .space 4
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 330c1fc63197..32e0bf6e3ccb 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,7 +24,6 @@
24 .arm 24 .arm
25 .align 12 25 .align 12
26ENTRY(shmobile_boot_vector) 26ENTRY(shmobile_boot_vector)
27 ldr r0, 2f
28 ldr r1, 1f 27 ldr r1, 1f
29 bx r1 28 bx r1
30 29
@@ -34,9 +33,6 @@ ENDPROC(shmobile_boot_vector)
34 .globl shmobile_boot_fn 33 .globl shmobile_boot_fn
35shmobile_boot_fn: 34shmobile_boot_fn:
361: .space 4 351: .space 4
37 .globl shmobile_boot_arg
38shmobile_boot_arg:
392: .space 4
40 .globl shmobile_boot_size 36 .globl shmobile_boot_size
41shmobile_boot_size: 37shmobile_boot_size:
42 .long . - shmobile_boot_vector 38 .long . - shmobile_boot_vector
@@ -46,13 +42,15 @@ shmobile_boot_size:
46 */ 42 */
47 43
48ENTRY(shmobile_smp_boot) 44ENTRY(shmobile_smp_boot)
49 @ r0 = MPIDR_HWID_BITMASK
50 mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR 45 mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
51 and r0, r1, r0 @ r0 = cpu_logical_map() value 46 and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
47 @ r0 = cpu_logical_map() value
52 mov r1, #0 @ r1 = CPU index 48 mov r1, #0 @ r1 = CPU index
53 adr r5, 1f @ array of per-cpu mpidr values 49 adr r2, 1f
54 adr r6, 2f @ array of per-cpu functions 50 ldmia r2, {r5, r6, r7}
55 adr r7, 3f @ array of per-cpu arguments 51 add r5, r5, r2 @ array of per-cpu mpidr values
52 add r6, r6, r2 @ array of per-cpu functions
53 add r7, r7, r2 @ array of per-cpu arguments
56 54
57shmobile_smp_boot_find_mpidr: 55shmobile_smp_boot_find_mpidr:
58 ldr r8, [r5, r1, lsl #2] 56 ldr r8, [r5, r1, lsl #2]
@@ -80,12 +78,18 @@ ENTRY(shmobile_smp_sleep)
80 b shmobile_smp_boot 78 b shmobile_smp_boot
81ENDPROC(shmobile_smp_sleep) 79ENDPROC(shmobile_smp_sleep)
82 80
81 .align 2
821: .long shmobile_smp_mpidr - .
83 .long shmobile_smp_fn - 1b
84 .long shmobile_smp_arg - 1b
85
86 .bss
83 .globl shmobile_smp_mpidr 87 .globl shmobile_smp_mpidr
84shmobile_smp_mpidr: 88shmobile_smp_mpidr:
851: .space NR_CPUS * 4 89 .space NR_CPUS * 4
86 .globl shmobile_smp_fn 90 .globl shmobile_smp_fn
87shmobile_smp_fn: 91shmobile_smp_fn:
882: .space NR_CPUS * 4 92 .space NR_CPUS * 4
89 .globl shmobile_smp_arg 93 .globl shmobile_smp_arg
90shmobile_smp_arg: 94shmobile_smp_arg:
913: .space NR_CPUS * 4 95 .space NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index 911884f7e28b..aba75c89f9c1 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -123,7 +123,6 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
123{ 123{
124 /* install boot code shared by all CPUs */ 124 /* install boot code shared by all CPUs */
125 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 125 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
126 shmobile_boot_arg = MPIDR_HWID_BITMASK;
127 126
128 /* perform per-cpu setup */ 127 /* perform per-cpu setup */
129 apmu_parse_cfg(apmu_init_cpu, apmu_config, num); 128 apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index 64663110ab6c..081a097c9219 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -17,6 +17,9 @@
17#include <asm/smp_scu.h> 17#include <asm/smp_scu.h>
18#include "common.h" 18#include "common.h"
19 19
20
21void __iomem *shmobile_scu_base;
22
20static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb, 23static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
21 unsigned long action, void *hcpu) 24 unsigned long action, void *hcpu)
22{ 25{
@@ -41,7 +44,6 @@ void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
41{ 44{
42 /* install boot code shared by all CPUs */ 45 /* install boot code shared by all CPUs */
43 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 46 shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
44 shmobile_boot_arg = MPIDR_HWID_BITMASK;
45 47
46 /* enable SCU and cache coherency on booting CPU */ 48 /* enable SCU and cache coherency on booting CPU */
47 scu_enable(shmobile_scu_base); 49 scu_enable(shmobile_scu_base);
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b854fe2095ad..0b024a9dbd43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -92,8 +92,6 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
92{ 92{
93 /* Map the reset vector (in headsmp-scu.S, headsmp.S) */ 93 /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
94 __raw_writel(__pa(shmobile_boot_vector), AVECR); 94 __raw_writel(__pa(shmobile_boot_vector), AVECR);
95 shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
96 shmobile_boot_arg = (unsigned long)shmobile_scu_base;
97 95
98 /* setup r8a7779 specific SCU bits */ 96 /* setup r8a7779 specific SCU bits */
99 shmobile_scu_base = IOMEM(R8A7779_SCU_BASE); 97 shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4b4058db0781..66353caa35b9 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -173,7 +173,7 @@ unsigned long arch_mmap_rnd(void)
173{ 173{
174 unsigned long rnd; 174 unsigned long rnd;
175 175
176 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 176 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
177 177
178 return rnd << PAGE_SHIFT; 178 return rnd << PAGE_SHIFT;
179} 179}
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 307237cfe728..b5e3f6d42b88 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -88,7 +88,7 @@ Image: vmlinux
88Image.%: vmlinux 88Image.%: vmlinux
89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 89 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
90 90
91zinstall install: vmlinux 91zinstall install:
92 $(Q)$(MAKE) $(build)=$(boot) $@ 92 $(Q)$(MAKE) $(build)=$(boot) $@
93 93
94%.dtb: scripts 94%.dtb: scripts
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abcbba2f01ba..305c552b5ec1 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -34,10 +34,10 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
34$(obj)/Image.lzo: $(obj)/Image FORCE 34$(obj)/Image.lzo: $(obj)/Image FORCE
35 $(call if_changed,lzo) 35 $(call if_changed,lzo)
36 36
37install: $(obj)/Image 37install:
38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 38 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
39 $(obj)/Image System.map "$(INSTALL_PATH)" 39 $(obj)/Image System.map "$(INSTALL_PATH)"
40 40
41zinstall: $(obj)/Image.gz 41zinstall:
42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 42 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
43 $(obj)/Image.gz System.map "$(INSTALL_PATH)" 43 $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 12ed78aa6f0c..d91e1f022573 100644
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 05d9e16c0dfd..7a3d22a46faf 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@ static struct crypto_alg aes_algs[] = { {
294 .cra_blkcipher = { 294 .cra_blkcipher = {
295 .min_keysize = AES_MIN_KEY_SIZE, 295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE, 296 .max_keysize = AES_MAX_KEY_SIZE,
297 .ivsize = AES_BLOCK_SIZE, 297 .ivsize = 0,
298 .setkey = aes_setkey, 298 .setkey = aes_setkey,
299 .encrypt = ecb_encrypt, 299 .encrypt = ecb_encrypt,
300 .decrypt = ecb_decrypt, 300 .decrypt = ecb_decrypt,
@@ -371,7 +371,7 @@ static struct crypto_alg aes_algs[] = { {
371 .cra_ablkcipher = { 371 .cra_ablkcipher = {
372 .min_keysize = AES_MIN_KEY_SIZE, 372 .min_keysize = AES_MIN_KEY_SIZE,
373 .max_keysize = AES_MAX_KEY_SIZE, 373 .max_keysize = AES_MAX_KEY_SIZE,
374 .ivsize = AES_BLOCK_SIZE, 374 .ivsize = 0,
375 .setkey = ablk_set_key, 375 .setkey = ablk_set_key,
376 .encrypt = ablk_encrypt, 376 .encrypt = ablk_encrypt,
377 .decrypt = ablk_decrypt, 377 .decrypt = ablk_decrypt,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 2731d3b25ed2..8ec88e5b290f 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void)
103 u64 irqstat; 103 u64 irqstat;
104 104
105 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat)); 105 asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
106 dsb(sy);
106 return irqstat; 107 return irqstat;
107} 108}
108 109
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 738a95f93e49..d201d4b396d1 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,8 +107,6 @@
107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \ 107#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ) 108 TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
109 109
110#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
111
112/* VTCR_EL2 Registers bits */ 110/* VTCR_EL2 Registers bits */
113#define VTCR_EL2_RES1 (1 << 31) 111#define VTCR_EL2_RES1 (1 << 31)
114#define VTCR_EL2_PS_MASK (7 << 16) 112#define VTCR_EL2_PS_MASK (7 << 16)
@@ -182,6 +180,7 @@
182#define CPTR_EL2_TCPAC (1 << 31) 180#define CPTR_EL2_TCPAC (1 << 31)
183#define CPTR_EL2_TTA (1 << 20) 181#define CPTR_EL2_TTA (1 << 20)
184#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) 182#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
183#define CPTR_EL2_DEFAULT 0x000033ff
185 184
186/* Hyp Debug Configuration Register bits */ 185/* Hyp Debug Configuration Register bits */
187#define MDCR_EL2_TDRA (1 << 11) 186#define MDCR_EL2_TDRA (1 << 11)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3066328cd86b..779a5872a2c5 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
127 127
128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) 128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
129{ 129{
130 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 130 u32 mode;
131 131
132 if (vcpu_mode_is_32bit(vcpu)) 132 if (vcpu_mode_is_32bit(vcpu)) {
133 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
133 return mode > COMPAT_PSR_MODE_USR; 134 return mode > COMPAT_PSR_MODE_USR;
135 }
136
137 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
134 138
135 return mode != PSR_MODE_EL0t; 139 return mode != PSR_MODE_EL0t;
136} 140}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3aeec3e6..c536c9e307b9 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,11 +226,28 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
226 return retval; 226 return retval;
227} 227}
228 228
229static void send_user_sigtrap(int si_code)
230{
231 struct pt_regs *regs = current_pt_regs();
232 siginfo_t info = {
233 .si_signo = SIGTRAP,
234 .si_errno = 0,
235 .si_code = si_code,
236 .si_addr = (void __user *)instruction_pointer(regs),
237 };
238
239 if (WARN_ON(!user_mode(regs)))
240 return;
241
242 if (interrupts_enabled(regs))
243 local_irq_enable();
244
245 force_sig_info(SIGTRAP, &info, current);
246}
247
229static int single_step_handler(unsigned long addr, unsigned int esr, 248static int single_step_handler(unsigned long addr, unsigned int esr,
230 struct pt_regs *regs) 249 struct pt_regs *regs)
231{ 250{
232 siginfo_t info;
233
234 /* 251 /*
235 * If we are stepping a pending breakpoint, call the hw_breakpoint 252 * If we are stepping a pending breakpoint, call the hw_breakpoint
236 * handler first. 253 * handler first.
@@ -239,11 +256,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
239 return 0; 256 return 0;
240 257
241 if (user_mode(regs)) { 258 if (user_mode(regs)) {
242 info.si_signo = SIGTRAP; 259 send_user_sigtrap(TRAP_HWBKPT);
243 info.si_errno = 0;
244 info.si_code = TRAP_HWBKPT;
245 info.si_addr = (void __user *)instruction_pointer(regs);
246 force_sig_info(SIGTRAP, &info, current);
247 260
248 /* 261 /*
249 * ptrace will disable single step unless explicitly 262 * ptrace will disable single step unless explicitly
@@ -307,17 +320,8 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
307static int brk_handler(unsigned long addr, unsigned int esr, 320static int brk_handler(unsigned long addr, unsigned int esr,
308 struct pt_regs *regs) 321 struct pt_regs *regs)
309{ 322{
310 siginfo_t info;
311
312 if (user_mode(regs)) { 323 if (user_mode(regs)) {
313 info = (siginfo_t) { 324 send_user_sigtrap(TRAP_BRKPT);
314 .si_signo = SIGTRAP,
315 .si_errno = 0,
316 .si_code = TRAP_BRKPT,
317 .si_addr = (void __user *)instruction_pointer(regs),
318 };
319
320 force_sig_info(SIGTRAP, &info, current);
321 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { 325 } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
322 pr_warning("Unexpected kernel BRK exception at EL1\n"); 326 pr_warning("Unexpected kernel BRK exception at EL1\n");
323 return -EFAULT; 327 return -EFAULT;
@@ -328,7 +332,6 @@ static int brk_handler(unsigned long addr, unsigned int esr,
328 332
329int aarch32_break_handler(struct pt_regs *regs) 333int aarch32_break_handler(struct pt_regs *regs)
330{ 334{
331 siginfo_t info;
332 u32 arm_instr; 335 u32 arm_instr;
333 u16 thumb_instr; 336 u16 thumb_instr;
334 bool bp = false; 337 bool bp = false;
@@ -359,14 +362,7 @@ int aarch32_break_handler(struct pt_regs *regs)
359 if (!bp) 362 if (!bp)
360 return -EFAULT; 363 return -EFAULT;
361 364
362 info = (siginfo_t) { 365 send_user_sigtrap(TRAP_BRKPT);
363 .si_signo = SIGTRAP,
364 .si_errno = 0,
365 .si_code = TRAP_BRKPT,
366 .si_addr = pc,
367 };
368
369 force_sig_info(SIGTRAP, &info, current);
370 return 0; 366 return 0;
371} 367}
372 368
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 999633bd7294..352f7abd91c9 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -89,6 +89,7 @@ __efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); 89__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
90__efistub_memset = KALLSYMS_HIDE(__pi_memset); 90__efistub_memset = KALLSYMS_HIDE(__pi_memset);
91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); 91__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
92__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
92__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); 93__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
93__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); 94__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
94__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); 95__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4fad9787ab46..d9751a4769e7 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,14 +44,13 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
44 unsigned long irq_stack_ptr; 44 unsigned long irq_stack_ptr;
45 45
46 /* 46 /*
47 * Use raw_smp_processor_id() to avoid false-positives from 47 * Switching between stacks is valid when tracing current and in
48 * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping 48 * non-preemptible context.
49 * task stacks, we can be pre-empted in this case, so
50 * {raw_,}smp_processor_id() may give us the wrong value. Sleeping
51 * tasks can't ever be on an interrupt stack, so regardless of cpu,
52 * the checks will always fail.
53 */ 49 */
54 irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); 50 if (tsk == current && !preemptible())
51 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
52 else
53 irq_stack_ptr = 0;
55 54
56 low = frame->sp; 55 low = frame->sp;
57 /* irq stacks are not THREAD_SIZE aligned */ 56 /* irq stacks are not THREAD_SIZE aligned */
@@ -64,8 +63,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
64 return -EINVAL; 63 return -EINVAL;
65 64
66 frame->sp = fp + 0x10; 65 frame->sp = fp + 0x10;
67 frame->fp = *(unsigned long *)(fp); 66 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
68 frame->pc = *(unsigned long *)(fp + 8); 67 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
69 68
70#ifdef CONFIG_FUNCTION_GRAPH_TRACER 69#ifdef CONFIG_FUNCTION_GRAPH_TRACER
71 if (tsk && tsk->ret_stack && 70 if (tsk && tsk->ret_stack &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cbedd724f48e..c5392081b49b 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -146,9 +146,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) 146static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
147{ 147{
148 struct stackframe frame; 148 struct stackframe frame;
149 unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); 149 unsigned long irq_stack_ptr;
150 int skip; 150 int skip;
151 151
152 /*
153 * Switching between stacks is valid when tracing current and in
154 * non-preemptible context.
155 */
156 if (tsk == current && !preemptible())
157 irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
158 else
159 irq_stack_ptr = 0;
160
152 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 161 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
153 162
154 if (!tsk) 163 if (!tsk)
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3e568dcd907b..d073b5a216f7 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@ __do_hyp_init:
64 mrs x4, tcr_el1 64 mrs x4, tcr_el1
65 ldr x5, =TCR_EL2_MASK 65 ldr x5, =TCR_EL2_MASK
66 and x4, x4, x5 66 and x4, x4, x5
67 ldr x5, =TCR_EL2_FLAGS 67 mov x5, #TCR_EL2_RES1
68 orr x4, x4, x5 68 orr x4, x4, x5
69 69
70#ifndef CONFIG_ARM64_VA_BITS_48 70#ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,15 +85,17 @@ __do_hyp_init:
85 ldr_l x5, idmap_t0sz 85 ldr_l x5, idmap_t0sz
86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 86 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
87#endif 87#endif
88 msr tcr_el2, x4
89
90 ldr x4, =VTCR_EL2_FLAGS
91 /* 88 /*
92 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in 89 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
93 * VTCR_EL2. 90 * TCR_EL2 and VTCR_EL2.
94 */ 91 */
95 mrs x5, ID_AA64MMFR0_EL1 92 mrs x5, ID_AA64MMFR0_EL1
96 bfi x4, x5, #16, #3 93 bfi x4, x5, #16, #3
94
95 msr tcr_el2, x4
96
97 ldr x4, =VTCR_EL2_FLAGS
98 bfi x4, x5, #16, #3
97 /* 99 /*
98 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in 100 * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
99 * VTCR_EL2. 101 * VTCR_EL2.
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca8f5a5e2f96..f0e7bdfae134 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
36 write_sysreg(val, hcr_el2); 36 write_sysreg(val, hcr_el2);
37 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ 37 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
38 write_sysreg(1 << 15, hstr_el2); 38 write_sysreg(1 << 15, hstr_el2);
39 write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); 39
40 val = CPTR_EL2_DEFAULT;
41 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
42 write_sysreg(val, cptr_el2);
43
40 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 44 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
41} 45}
42 46
@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
45 write_sysreg(HCR_RW, hcr_el2); 49 write_sysreg(HCR_RW, hcr_el2);
46 write_sysreg(0, hstr_el2); 50 write_sysreg(0, hstr_el2);
47 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); 51 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
48 write_sysreg(0, cptr_el2); 52 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
49} 53}
50 54
51static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) 55static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 9142e082f5f3..5dd2a26444ec 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -149,16 +149,6 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
149 149
150 switch (nr_pri_bits) { 150 switch (nr_pri_bits) {
151 case 7: 151 case 7:
152 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
153 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
154 case 6:
155 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
156 default:
157 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
158 }
159
160 switch (nr_pri_bits) {
161 case 7:
162 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2); 152 write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
163 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2); 153 write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
164 case 6: 154 case 6:
@@ -167,6 +157,16 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
167 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2); 157 write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
168 } 158 }
169 159
160 switch (nr_pri_bits) {
161 case 7:
162 write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
163 write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
164 case 6:
165 write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
166 default:
167 write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
168 }
169
170 switch (max_lr_idx) { 170 switch (max_lr_idx) {
171 case 15: 171 case 15:
172 write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2); 172 write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 648112e90ed5..4d1ac81870d2 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -27,7 +27,11 @@
27 27
28#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ 28#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
29 PSR_I_BIT | PSR_D_BIT) 29 PSR_I_BIT | PSR_D_BIT)
30#define EL1_EXCEPT_SYNC_OFFSET 0x200 30
31#define CURRENT_EL_SP_EL0_VECTOR 0x0
32#define CURRENT_EL_SP_ELx_VECTOR 0x200
33#define LOWER_EL_AArch64_VECTOR 0x400
34#define LOWER_EL_AArch32_VECTOR 0x600
31 35
32static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) 36static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
33{ 37{
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
97 *fsr = 0x14; 101 *fsr = 0x14;
98} 102}
99 103
104enum exception_type {
105 except_type_sync = 0,
106 except_type_irq = 0x80,
107 except_type_fiq = 0x100,
108 except_type_serror = 0x180,
109};
110
111static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
112{
113 u64 exc_offset;
114
115 switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
116 case PSR_MODE_EL1t:
117 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
118 break;
119 case PSR_MODE_EL1h:
120 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
121 break;
122 case PSR_MODE_EL0t:
123 exc_offset = LOWER_EL_AArch64_VECTOR;
124 break;
125 default:
126 exc_offset = LOWER_EL_AArch32_VECTOR;
127 }
128
129 return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
130}
131
100static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) 132static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
101{ 133{
102 unsigned long cpsr = *vcpu_cpsr(vcpu); 134 unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
108 *vcpu_spsr(vcpu) = cpsr; 140 *vcpu_spsr(vcpu) = cpsr;
109 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 141 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
110 142
143 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
111 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 144 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
112 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
113 145
114 vcpu_sys_reg(vcpu, FAR_EL1) = addr; 146 vcpu_sys_reg(vcpu, FAR_EL1) = addr;
115 147
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
143 *vcpu_spsr(vcpu) = cpsr; 175 *vcpu_spsr(vcpu) = cpsr;
144 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); 176 *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
145 177
178 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
146 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; 179 *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
147 *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
148 180
149 /* 181 /*
150 * Build an unknown exception, depending on the instruction 182 * Build an unknown exception, depending on the instruction
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index eec3598b4184..2e90371cfb37 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
1007 if (likely(r->access(vcpu, params, r))) { 1007 if (likely(r->access(vcpu, params, r))) {
1008 /* Skip instruction, since it was emulated */ 1008 /* Skip instruction, since it was emulated */
1009 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1009 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1010 /* Handled */
1011 return 0;
1010 } 1012 }
1011
1012 /* Handled */
1013 return 0;
1014 } 1013 }
1015 1014
1016 /* Not handled */ 1015 /* Not handled */
@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1043} 1042}
1044 1043
1045/** 1044/**
1046 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 1045 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1047 * @vcpu: The VCPU pointer 1046 * @vcpu: The VCPU pointer
1048 * @run: The kvm_run struct 1047 * @run: The kvm_run struct
1049 */ 1048 */
@@ -1095,7 +1094,7 @@ out:
1095} 1094}
1096 1095
1097/** 1096/**
1098 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 1097 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1099 * @vcpu: The VCPU pointer 1098 * @vcpu: The VCPU pointer
1100 * @run: The kvm_run struct 1099 * @run: The kvm_run struct
1101 */ 1100 */
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 2ca665711bf2..eae38da6e0bb 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -168,4 +168,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
168.Lhit_limit: 168.Lhit_limit:
169 mov len, limit 169 mov len, limit
170 ret 170 ret
171ENDPROC(strnlen) 171ENDPIPROC(strnlen)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 331c4ca6205c..a6e757cbab77 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@ static int __init __iommu_dma_init(void)
933 ret = register_iommu_dma_ops_notifier(&platform_bus_type); 933 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
934 if (!ret) 934 if (!ret)
935 ret = register_iommu_dma_ops_notifier(&amba_bustype); 935 ret = register_iommu_dma_ops_notifier(&amba_bustype);
936
937 /* handle devices queued before this arch_initcall */
938 if (!ret)
939 __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
936 return ret; 940 return ret;
937} 941}
938arch_initcall(__iommu_dma_init); 942arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 92ddac1e8ca2..abe2a9542b3a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,6 +371,13 @@ static int __kprobes do_translation_fault(unsigned long addr,
371 return 0; 371 return 0;
372} 372}
373 373
374static int do_alignment_fault(unsigned long addr, unsigned int esr,
375 struct pt_regs *regs)
376{
377 do_bad_area(addr, esr, regs);
378 return 0;
379}
380
374/* 381/*
375 * This abort handler always returns "fault". 382 * This abort handler always returns "fault".
376 */ 383 */
@@ -418,7 +425,7 @@ static struct fault_info {
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 425 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" }, 426 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 427 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 428 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "unknown 34" }, 429 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 430 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 431 { do_bad, SIGBUS, 0, "unknown 36" },
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 4c893b5189dd..232f787a088a 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -53,10 +53,10 @@ unsigned long arch_mmap_rnd(void)
53 53
54#ifdef CONFIG_COMPAT 54#ifdef CONFIG_COMPAT
55 if (test_thread_flag(TIF_32BIT)) 55 if (test_thread_flag(TIF_32BIT))
56 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); 56 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
57 else 57 else
58#endif 58#endif
59 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 59 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
60 return rnd << PAGE_SHIFT; 60 return rnd << PAGE_SHIFT;
61} 61}
62 62
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index fc96e814188e..d1fc4796025e 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m
108CONFIG_NFT_QUEUE=m 108CONFIG_NFT_QUEUE=m
109CONFIG_NFT_REJECT=m 109CONFIG_NFT_REJECT=m
110CONFIG_NFT_COMPAT=m 110CONFIG_NFT_COMPAT=m
111CONFIG_NFT_DUP_NETDEV=m
112CONFIG_NFT_FWD_NETDEV=m
111CONFIG_NETFILTER_XT_SET=m 113CONFIG_NETFILTER_XT_SET=m
112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 114CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 115CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_L2TP=m
266CONFIG_BRIDGE=m 268CONFIG_BRIDGE=m
267CONFIG_ATALK=m 269CONFIG_ATALK=m
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@ CONFIG_ARIADNE=y
366# CONFIG_NET_VENDOR_INTEL is not set 374# CONFIG_NET_VENDOR_INTEL is not set
367# CONFIG_NET_VENDOR_MARVELL is not set 375# CONFIG_NET_VENDOR_MARVELL is not set
368# CONFIG_NET_VENDOR_MICREL is not set 376# CONFIG_NET_VENDOR_MICREL is not set
377# CONFIG_NET_VENDOR_NETRONOME is not set
369CONFIG_HYDRA=y 378CONFIG_HYDRA=y
370CONFIG_APNE=y 379CONFIG_APNE=y
371CONFIG_ZORRO8390=y 380CONFIG_ZORRO8390=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 05c904f08d9d..9bfe8be3658c 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@ CONFIG_VETH=m
344# CONFIG_NET_VENDOR_MARVELL is not set 352# CONFIG_NET_VENDOR_MARVELL is not set
345# CONFIG_NET_VENDOR_MICREL is not set 353# CONFIG_NET_VENDOR_MICREL is not set
346# CONFIG_NET_VENDOR_NATSEMI is not set 354# CONFIG_NET_VENDOR_NATSEMI is not set
355# CONFIG_NET_VENDOR_NETRONOME is not set
347# CONFIG_NET_VENDOR_QUALCOMM is not set 356# CONFIG_NET_VENDOR_QUALCOMM is not set
348# CONFIG_NET_VENDOR_RENESAS is not set 357# CONFIG_NET_VENDOR_RENESAS is not set
349# CONFIG_NET_VENDOR_ROCKER is not set 358# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index d572b731c510..ebdcfae55580 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y
353# CONFIG_NET_VENDOR_INTEL is not set 361# CONFIG_NET_VENDOR_INTEL is not set
354# CONFIG_NET_VENDOR_MARVELL is not set 362# CONFIG_NET_VENDOR_MARVELL is not set
355# CONFIG_NET_VENDOR_MICREL is not set 363# CONFIG_NET_VENDOR_MICREL is not set
364# CONFIG_NET_VENDOR_NETRONOME is not set
356CONFIG_NE2000=y 365CONFIG_NE2000=y
357# CONFIG_NET_VENDOR_QUALCOMM is not set 366# CONFIG_NET_VENDOR_QUALCOMM is not set
358# CONFIG_NET_VENDOR_RENESAS is not set 367# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 11a30c65ad44..8acc65e54995 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6630a5154b9d..0c6a3d52b26e 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
106CONFIG_NFT_QUEUE=m 106CONFIG_NFT_QUEUE=m
107CONFIG_NFT_REJECT=m 107CONFIG_NFT_REJECT=m
108CONFIG_NFT_COMPAT=m 108CONFIG_NFT_COMPAT=m
109CONFIG_NFT_DUP_NETDEV=m
110CONFIG_NFT_FWD_NETDEV=m
109CONFIG_NETFILTER_XT_SET=m 111CONFIG_NETFILTER_XT_SET=m
110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 112CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 113CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
264CONFIG_BRIDGE=m 266CONFIG_BRIDGE=m
265CONFIG_ATALK=m 267CONFIG_ATALK=m
266CONFIG_6LOWPAN=m 268CONFIG_6LOWPAN=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
270CONFIG_6LOWPAN_GHC_UDP=m
271CONFIG_6LOWPAN_GHC_ICMPV6=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
273CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
267CONFIG_DNS_RESOLVER=y 275CONFIG_DNS_RESOLVER=y
268CONFIG_BATMAN_ADV=m 276CONFIG_BATMAN_ADV=m
269CONFIG_BATMAN_ADV_DAT=y 277CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@ CONFIG_HPLANCE=y
345# CONFIG_NET_VENDOR_MARVELL is not set 353# CONFIG_NET_VENDOR_MARVELL is not set
346# CONFIG_NET_VENDOR_MICREL is not set 354# CONFIG_NET_VENDOR_MICREL is not set
347# CONFIG_NET_VENDOR_NATSEMI is not set 355# CONFIG_NET_VENDOR_NATSEMI is not set
356# CONFIG_NET_VENDOR_NETRONOME is not set
348# CONFIG_NET_VENDOR_QUALCOMM is not set 357# CONFIG_NET_VENDOR_QUALCOMM is not set
349# CONFIG_NET_VENDOR_RENESAS is not set 358# CONFIG_NET_VENDOR_RENESAS is not set
350# CONFIG_NET_VENDOR_ROCKER is not set 359# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1d90b71d0903..12a8a6cb32f4 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m
105CONFIG_NFT_QUEUE=m 105CONFIG_NFT_QUEUE=m
106CONFIG_NFT_REJECT=m 106CONFIG_NFT_REJECT=m
107CONFIG_NFT_COMPAT=m 107CONFIG_NFT_COMPAT=m
108CONFIG_NFT_DUP_NETDEV=m
109CONFIG_NFT_FWD_NETDEV=m
108CONFIG_NETFILTER_XT_SET=m 110CONFIG_NETFILTER_XT_SET=m
109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 111CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 112CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m
266CONFIG_IPDDP=m 268CONFIG_IPDDP=m
267CONFIG_IPDDP_ENCAP=y 269CONFIG_IPDDP_ENCAP=y
268CONFIG_6LOWPAN=m 270CONFIG_6LOWPAN=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
272CONFIG_6LOWPAN_GHC_UDP=m
273CONFIG_6LOWPAN_GHC_ICMPV6=m
274CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
275CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
276CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
269CONFIG_DNS_RESOLVER=y 277CONFIG_DNS_RESOLVER=y
270CONFIG_BATMAN_ADV=m 278CONFIG_BATMAN_ADV=m
271CONFIG_BATMAN_ADV_DAT=y 279CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@ CONFIG_MAC89x0=y
362# CONFIG_NET_VENDOR_MARVELL is not set 370# CONFIG_NET_VENDOR_MARVELL is not set
363# CONFIG_NET_VENDOR_MICREL is not set 371# CONFIG_NET_VENDOR_MICREL is not set
364CONFIG_MACSONIC=y 372CONFIG_MACSONIC=y
373# CONFIG_NET_VENDOR_NETRONOME is not set
365CONFIG_MAC8390=y 374CONFIG_MAC8390=y
366# CONFIG_NET_VENDOR_QUALCOMM is not set 375# CONFIG_NET_VENDOR_QUALCOMM is not set
367# CONFIG_NET_VENDOR_RENESAS is not set 376# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1fd21c1ca87f..64ff2dcb34c8 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m
115CONFIG_NFT_QUEUE=m 115CONFIG_NFT_QUEUE=m
116CONFIG_NFT_REJECT=m 116CONFIG_NFT_REJECT=m
117CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
118CONFIG_NFT_DUP_NETDEV=m
119CONFIG_NFT_FWD_NETDEV=m
118CONFIG_NETFILTER_XT_SET=m 120CONFIG_NETFILTER_XT_SET=m
119CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 121CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
120CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 122CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m
276CONFIG_IPDDP=m 278CONFIG_IPDDP=m
277CONFIG_IPDDP_ENCAP=y 279CONFIG_IPDDP_ENCAP=y
278CONFIG_6LOWPAN=m 280CONFIG_6LOWPAN=m
281CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
282CONFIG_6LOWPAN_GHC_UDP=m
283CONFIG_6LOWPAN_GHC_ICMPV6=m
284CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
285CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
286CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
279CONFIG_DNS_RESOLVER=y 287CONFIG_DNS_RESOLVER=y
280CONFIG_BATMAN_ADV=m 288CONFIG_BATMAN_ADV=m
281CONFIG_BATMAN_ADV_DAT=y 289CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y
404# CONFIG_NET_VENDOR_MARVELL is not set 412# CONFIG_NET_VENDOR_MARVELL is not set
405# CONFIG_NET_VENDOR_MICREL is not set 413# CONFIG_NET_VENDOR_MICREL is not set
406CONFIG_MACSONIC=y 414CONFIG_MACSONIC=y
415# CONFIG_NET_VENDOR_NETRONOME is not set
407CONFIG_HYDRA=y 416CONFIG_HYDRA=y
408CONFIG_MAC8390=y 417CONFIG_MAC8390=y
409CONFIG_NE2000=y 418CONFIG_NE2000=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 74e10f79d7b1..07fc6abcfe0c 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m
103CONFIG_NFT_QUEUE=m 103CONFIG_NFT_QUEUE=m
104CONFIG_NFT_REJECT=m 104CONFIG_NFT_REJECT=m
105CONFIG_NFT_COMPAT=m 105CONFIG_NFT_COMPAT=m
106CONFIG_NFT_DUP_NETDEV=m
107CONFIG_NFT_FWD_NETDEV=m
106CONFIG_NETFILTER_XT_SET=m 108CONFIG_NETFILTER_XT_SET=m
107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 109CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 110CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@ CONFIG_L2TP=m
261CONFIG_BRIDGE=m 263CONFIG_BRIDGE=m
262CONFIG_ATALK=m 264CONFIG_ATALK=m
263CONFIG_6LOWPAN=m 265CONFIG_6LOWPAN=m
266CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
267CONFIG_6LOWPAN_GHC_UDP=m
268CONFIG_6LOWPAN_GHC_ICMPV6=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
264CONFIG_DNS_RESOLVER=y 272CONFIG_DNS_RESOLVER=y
265CONFIG_BATMAN_ADV=m 273CONFIG_BATMAN_ADV=m
266CONFIG_BATMAN_ADV_DAT=y 274CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7034e716f166..69903ded88f7 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y
343# CONFIG_NET_VENDOR_MARVELL is not set 351# CONFIG_NET_VENDOR_MARVELL is not set
344# CONFIG_NET_VENDOR_MICREL is not set 352# CONFIG_NET_VENDOR_MICREL is not set
345# CONFIG_NET_VENDOR_NATSEMI is not set 353# CONFIG_NET_VENDOR_NATSEMI is not set
354# CONFIG_NET_VENDOR_NETRONOME is not set
346# CONFIG_NET_VENDOR_QUALCOMM is not set 355# CONFIG_NET_VENDOR_QUALCOMM is not set
347# CONFIG_NET_VENDOR_RENESAS is not set 356# CONFIG_NET_VENDOR_RENESAS is not set
348# CONFIG_NET_VENDOR_ROCKER is not set 357# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f7deb5f702a6..bd8401686dde 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
104CONFIG_NFT_QUEUE=m 104CONFIG_NFT_QUEUE=m
105CONFIG_NFT_REJECT=m 105CONFIG_NFT_REJECT=m
106CONFIG_NFT_COMPAT=m 106CONFIG_NFT_COMPAT=m
107CONFIG_NFT_DUP_NETDEV=m
108CONFIG_NFT_FWD_NETDEV=m
107CONFIG_NETFILTER_XT_SET=m 109CONFIG_NETFILTER_XT_SET=m
108CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 110CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
262CONFIG_BRIDGE=m 264CONFIG_BRIDGE=m
263CONFIG_ATALK=m 265CONFIG_ATALK=m
264CONFIG_6LOWPAN=m 266CONFIG_6LOWPAN=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
268CONFIG_6LOWPAN_GHC_UDP=m
269CONFIG_6LOWPAN_GHC_ICMPV6=m
270CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
271CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
272CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
265CONFIG_DNS_RESOLVER=y 273CONFIG_DNS_RESOLVER=y
266CONFIG_BATMAN_ADV=m 274CONFIG_BATMAN_ADV=m
267CONFIG_BATMAN_ADV_DAT=y 275CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@ CONFIG_VETH=m
352# CONFIG_NET_VENDOR_INTEL is not set 360# CONFIG_NET_VENDOR_INTEL is not set
353# CONFIG_NET_VENDOR_MARVELL is not set 361# CONFIG_NET_VENDOR_MARVELL is not set
354# CONFIG_NET_VENDOR_MICREL is not set 362# CONFIG_NET_VENDOR_MICREL is not set
363# CONFIG_NET_VENDOR_NETRONOME is not set
355CONFIG_NE2000=y 364CONFIG_NE2000=y
356# CONFIG_NET_VENDOR_QUALCOMM is not set 365# CONFIG_NET_VENDOR_QUALCOMM is not set
357# CONFIG_NET_VENDOR_RENESAS is not set 366# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 0ce79eb0d805..5f9fb3ab9636 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y
340# CONFIG_NET_VENDOR_MARVELL is not set 348# CONFIG_NET_VENDOR_MARVELL is not set
341# CONFIG_NET_VENDOR_MICREL is not set 349# CONFIG_NET_VENDOR_MICREL is not set
342# CONFIG_NET_VENDOR_NATSEMI is not set 350# CONFIG_NET_VENDOR_NATSEMI is not set
351# CONFIG_NET_VENDOR_NETRONOME is not set
343# CONFIG_NET_VENDOR_QUALCOMM is not set 352# CONFIG_NET_VENDOR_QUALCOMM is not set
344# CONFIG_NET_VENDOR_RENESAS is not set 353# CONFIG_NET_VENDOR_RENESAS is not set
345# CONFIG_NET_VENDOR_ROCKER is not set 354# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 4cb787e4991f..5d1c674530e2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
101CONFIG_NFT_QUEUE=m 101CONFIG_NFT_QUEUE=m
102CONFIG_NFT_REJECT=m 102CONFIG_NFT_REJECT=m
103CONFIG_NFT_COMPAT=m 103CONFIG_NFT_COMPAT=m
104CONFIG_NFT_DUP_NETDEV=m
105CONFIG_NFT_FWD_NETDEV=m
104CONFIG_NETFILTER_XT_SET=m 106CONFIG_NETFILTER_XT_SET=m
105CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m 107CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
106CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 108CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
259CONFIG_BRIDGE=m 261CONFIG_BRIDGE=m
260CONFIG_ATALK=m 262CONFIG_ATALK=m
261CONFIG_6LOWPAN=m 263CONFIG_6LOWPAN=m
264CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
265CONFIG_6LOWPAN_GHC_UDP=m
266CONFIG_6LOWPAN_GHC_ICMPV6=m
267CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
268CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
269CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
262CONFIG_DNS_RESOLVER=y 270CONFIG_DNS_RESOLVER=y
263CONFIG_BATMAN_ADV=m 271CONFIG_BATMAN_ADV=m
264CONFIG_BATMAN_ADV_DAT=y 272CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y
341# CONFIG_NET_VENDOR_MARVELL is not set 349# CONFIG_NET_VENDOR_MARVELL is not set
342# CONFIG_NET_VENDOR_MICREL is not set 350# CONFIG_NET_VENDOR_MICREL is not set
343# CONFIG_NET_VENDOR_NATSEMI is not set 351# CONFIG_NET_VENDOR_NATSEMI is not set
352# CONFIG_NET_VENDOR_NETRONOME is not set
344# CONFIG_NET_VENDOR_QUALCOMM is not set 353# CONFIG_NET_VENDOR_QUALCOMM is not set
345# CONFIG_NET_VENDOR_RENESAS is not set 354# CONFIG_NET_VENDOR_RENESAS is not set
346# CONFIG_NET_VENDOR_ROCKER is not set 355# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f9d96bf86910..bafaff6dcd7b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 376 7#define NR_syscalls 377
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 36cf129de663..0ca729665f29 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -381,5 +381,6 @@
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375 383#define __NR_mlock2 375
384#define __NR_copy_file_range 376
384 385
385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 386#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 282cd903f4c4..8bb94261ff97 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -396,3 +396,4 @@ ENTRY(sys_call_table)
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */ 398 .long sys_mlock2 /* 375 */
399 .long sys_copy_file_range
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 57a945e832f4..74a3db92da1b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2085,7 +2085,7 @@ config PAGE_SIZE_32KB
2085 2085
2086config PAGE_SIZE_64KB 2086config PAGE_SIZE_64KB
2087 bool "64kB" 2087 bool "64kB"
2088 depends on !CPU_R3000 && !CPU_TX39XX 2088 depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000
2089 help 2089 help
2090 Using 64kB page size will result in higher performance kernel at 2090 Using 64kB page size will result in higher performance kernel at
2091 the price of higher memory consumption. This option is available on 2091 the price of higher memory consumption. This option is available on
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 459b9b252c3b..d61b1616b604 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -74,6 +74,7 @@
74 timer: timer@10000040 { 74 timer: timer@10000040 {
75 compatible = "syscon"; 75 compatible = "syscon";
76 reg = <0x10000040 0x2c>; 76 reg = <0x10000040 0x2c>;
77 little-endian;
77 }; 78 };
78 79
79 reboot { 80 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 4fc7ecee273c..1a7efa883c5e 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -98,6 +98,7 @@
98 sun_top_ctrl: syscon@404000 { 98 sun_top_ctrl: syscon@404000 {
99 compatible = "brcm,bcm7125-sun-top-ctrl", "syscon"; 99 compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
100 reg = <0x404000 0x60c>; 100 reg = <0x404000 0x60c>;
101 little-endian;
101 }; 102 };
102 103
103 reboot { 104 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index a3039bb53477..d4bf52cfcf17 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -118,6 +118,7 @@
118 sun_top_ctrl: syscon@404000 { 118 sun_top_ctrl: syscon@404000 {
119 compatible = "brcm,bcm7346-sun-top-ctrl", "syscon"; 119 compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
120 reg = <0x404000 0x51c>; 120 reg = <0x404000 0x51c>;
121 little-endian;
121 }; 122 };
122 123
123 reboot { 124 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index 4274ff41ec21..8e2501694d03 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -112,6 +112,7 @@
112 sun_top_ctrl: syscon@404000 { 112 sun_top_ctrl: syscon@404000 {
113 compatible = "brcm,bcm7358-sun-top-ctrl", "syscon"; 113 compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
114 reg = <0x404000 0x51c>; 114 reg = <0x404000 0x51c>;
115 little-endian;
115 }; 116 };
116 117
117 reboot { 118 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index 0dcc9163c27b..7e5f76040fb8 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -112,6 +112,7 @@
112 sun_top_ctrl: syscon@404000 { 112 sun_top_ctrl: syscon@404000 {
113 compatible = "brcm,bcm7360-sun-top-ctrl", "syscon"; 113 compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
114 reg = <0x404000 0x51c>; 114 reg = <0x404000 0x51c>;
115 little-endian;
115 }; 116 };
116 117
117 reboot { 118 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index 2f3f9fc2c478..c739ea77acb0 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -118,6 +118,7 @@
118 sun_top_ctrl: syscon@404000 { 118 sun_top_ctrl: syscon@404000 {
119 compatible = "brcm,bcm7362-sun-top-ctrl", "syscon"; 119 compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
120 reg = <0x404000 0x51c>; 120 reg = <0x404000 0x51c>;
121 little-endian;
121 }; 122 };
122 123
123 reboot { 124 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index bee221b3b568..5f55d0a50a28 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -99,6 +99,7 @@
99 sun_top_ctrl: syscon@404000 { 99 sun_top_ctrl: syscon@404000 {
100 compatible = "brcm,bcm7420-sun-top-ctrl", "syscon"; 100 compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
101 reg = <0x404000 0x60c>; 101 reg = <0x404000 0x60c>;
102 little-endian;
102 }; 103 };
103 104
104 reboot { 105 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 571f30f52e3f..e24d41ab4e30 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -100,6 +100,7 @@
100 sun_top_ctrl: syscon@404000 { 100 sun_top_ctrl: syscon@404000 {
101 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; 101 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
102 reg = <0x404000 0x51c>; 102 reg = <0x404000 0x51c>;
103 little-endian;
103 }; 104 };
104 105
105 reboot { 106 reboot {
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index 614ee211f71a..8b9432cc062b 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -114,6 +114,7 @@
114 sun_top_ctrl: syscon@404000 { 114 sun_top_ctrl: syscon@404000 {
115 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; 115 compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
116 reg = <0x404000 0x51c>; 116 reg = <0x404000 0x51c>;
117 little-endian;
117 }; 118 };
118 119
119 reboot { 120 reboot {
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index cefb7a596878..e090fc388e02 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -227,7 +227,7 @@ struct mips_elf_abiflags_v0 {
227 int __res = 1; \ 227 int __res = 1; \
228 struct elfhdr *__h = (hdr); \ 228 struct elfhdr *__h = (hdr); \
229 \ 229 \
230 if (__h->e_machine != EM_MIPS) \ 230 if (!mips_elf_check_machine(__h)) \
231 __res = 0; \ 231 __res = 0; \
232 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ 232 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
233 __res = 0; \ 233 __res = 0; \
@@ -258,7 +258,7 @@ struct mips_elf_abiflags_v0 {
258 int __res = 1; \ 258 int __res = 1; \
259 struct elfhdr *__h = (hdr); \ 259 struct elfhdr *__h = (hdr); \
260 \ 260 \
261 if (__h->e_machine != EM_MIPS) \ 261 if (!mips_elf_check_machine(__h)) \
262 __res = 0; \ 262 __res = 0; \
263 if (__h->e_ident[EI_CLASS] != ELFCLASS64) \ 263 if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
264 __res = 0; \ 264 __res = 0; \
@@ -285,6 +285,11 @@ struct mips_elf_abiflags_v0 {
285 285
286#endif /* !defined(ELF_ARCH) */ 286#endif /* !defined(ELF_ARCH) */
287 287
288#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
289
290#define vmcore_elf32_check_arch mips_elf_check_machine
291#define vmcore_elf64_check_arch mips_elf_check_machine
292
288struct mips_abi; 293struct mips_abi;
289 294
290extern struct mips_abi mips_abi; 295extern struct mips_abi mips_abi;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 9cbf383b8834..f06f97bd62df 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -179,6 +179,10 @@ static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
179 if (save) 179 if (save)
180 _save_fp(tsk); 180 _save_fp(tsk);
181 __disable_fpu(); 181 __disable_fpu();
182 } else {
183 /* FPU should not have been left enabled with no owner */
184 WARN(read_c0_status() & ST0_CU1,
185 "Orphaned FPU left enabled");
182 } 186 }
183 KSTK_STATUS(tsk) &= ~ST0_CU1; 187 KSTK_STATUS(tsk) &= ~ST0_CU1;
184 clear_tsk_thread_flag(tsk, TIF_USEDFPU); 188 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8ebd3f579b84..3ed10a8d7865 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -128,7 +128,8 @@ static inline int octeon_has_feature(enum octeon_feature feature)
128 case OCTEON_FEATURE_PCIE: 128 case OCTEON_FEATURE_PCIE:
129 return OCTEON_IS_MODEL(OCTEON_CN56XX) 129 return OCTEON_IS_MODEL(OCTEON_CN56XX)
130 || OCTEON_IS_MODEL(OCTEON_CN52XX) 130 || OCTEON_IS_MODEL(OCTEON_CN52XX)
131 || OCTEON_IS_MODEL(OCTEON_CN6XXX); 131 || OCTEON_IS_MODEL(OCTEON_CN6XXX)
132 || OCTEON_IS_MODEL(OCTEON_CN7XXX);
132 133
133 case OCTEON_FEATURE_SRIO: 134 case OCTEON_FEATURE_SRIO:
134 return OCTEON_IS_MODEL(OCTEON_CN63XX) 135 return OCTEON_IS_MODEL(OCTEON_CN63XX)
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3f832c3dd8f5..041153f5cf93 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
45 * User space process size: 2GB. This is hardcoded into a few places, 45 * User space process size: 2GB. This is hardcoded into a few places,
46 * so don't change it unless you know what you are doing. 46 * so don't change it unless you know what you are doing.
47 */ 47 */
48#define TASK_SIZE 0x7fff8000UL 48#define TASK_SIZE 0x80000000UL
49#endif 49#endif
50 50
51#define STACK_TOP_MAX TASK_SIZE 51#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a71da576883c..eebf39549606 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -289,7 +289,7 @@
289 .set reorder 289 .set reorder
290 .set noat 290 .set noat
291 mfc0 a0, CP0_STATUS 291 mfc0 a0, CP0_STATUS
292 li v1, 0xff00 292 li v1, ST0_CU1 | ST0_IM
293 ori a0, STATMASK 293 ori a0, STATMASK
294 xori a0, STATMASK 294 xori a0, STATMASK
295 mtc0 a0, CP0_STATUS 295 mtc0 a0, CP0_STATUS
@@ -330,7 +330,7 @@
330 ori a0, STATMASK 330 ori a0, STATMASK
331 xori a0, STATMASK 331 xori a0, STATMASK
332 mtc0 a0, CP0_STATUS 332 mtc0 a0, CP0_STATUS
333 li v1, 0xff00 333 li v1, ST0_CU1 | ST0_FR | ST0_IM
334 and a0, v1 334 and a0, v1
335 LONG_L v0, PT_STATUS(sp) 335 LONG_L v0, PT_STATUS(sp)
336 nor v1, $0, v1 336 nor v1, $0, v1
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6499d93ae68d..47bc45a67e9b 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
101 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ 101 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
102 if ((config_enabled(CONFIG_32BIT) || 102 if ((config_enabled(CONFIG_32BIT) ||
103 test_tsk_thread_flag(task, TIF_32BIT_REGS)) && 103 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
104 (regs->regs[2] == __NR_syscall)) { 104 (regs->regs[2] == __NR_syscall))
105 i++; 105 i++;
106 n++;
107 }
108 106
109 while (n--) 107 while (n--)
110 ret |= mips_get_syscall_arg(args++, task, regs, i++); 108 ret |= mips_get_syscall_arg(args++, task, regs, i++);
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 90f03a7da665..3129795de940 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -380,16 +380,17 @@
380#define __NR_userfaultfd (__NR_Linux + 357) 380#define __NR_userfaultfd (__NR_Linux + 357)
381#define __NR_membarrier (__NR_Linux + 358) 381#define __NR_membarrier (__NR_Linux + 358)
382#define __NR_mlock2 (__NR_Linux + 359) 382#define __NR_mlock2 (__NR_Linux + 359)
383#define __NR_copy_file_range (__NR_Linux + 360)
383 384
384/* 385/*
385 * Offset of the last Linux o32 flavoured syscall 386 * Offset of the last Linux o32 flavoured syscall
386 */ 387 */
387#define __NR_Linux_syscalls 359 388#define __NR_Linux_syscalls 360
388 389
389#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 390#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
390 391
391#define __NR_O32_Linux 4000 392#define __NR_O32_Linux 4000
392#define __NR_O32_Linux_syscalls 359 393#define __NR_O32_Linux_syscalls 360
393 394
394#if _MIPS_SIM == _MIPS_SIM_ABI64 395#if _MIPS_SIM == _MIPS_SIM_ABI64
395 396
@@ -717,16 +718,17 @@
717#define __NR_userfaultfd (__NR_Linux + 317) 718#define __NR_userfaultfd (__NR_Linux + 317)
718#define __NR_membarrier (__NR_Linux + 318) 719#define __NR_membarrier (__NR_Linux + 318)
719#define __NR_mlock2 (__NR_Linux + 319) 720#define __NR_mlock2 (__NR_Linux + 319)
721#define __NR_copy_file_range (__NR_Linux + 320)
720 722
721/* 723/*
722 * Offset of the last Linux 64-bit flavoured syscall 724 * Offset of the last Linux 64-bit flavoured syscall
723 */ 725 */
724#define __NR_Linux_syscalls 319 726#define __NR_Linux_syscalls 320
725 727
726#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 728#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
727 729
728#define __NR_64_Linux 5000 730#define __NR_64_Linux 5000
729#define __NR_64_Linux_syscalls 319 731#define __NR_64_Linux_syscalls 320
730 732
731#if _MIPS_SIM == _MIPS_SIM_NABI32 733#if _MIPS_SIM == _MIPS_SIM_NABI32
732 734
@@ -1058,15 +1060,16 @@
1058#define __NR_userfaultfd (__NR_Linux + 321) 1060#define __NR_userfaultfd (__NR_Linux + 321)
1059#define __NR_membarrier (__NR_Linux + 322) 1061#define __NR_membarrier (__NR_Linux + 322)
1060#define __NR_mlock2 (__NR_Linux + 323) 1062#define __NR_mlock2 (__NR_Linux + 323)
1063#define __NR_copy_file_range (__NR_Linux + 324)
1061 1064
1062/* 1065/*
1063 * Offset of the last N32 flavoured syscall 1066 * Offset of the last N32 flavoured syscall
1064 */ 1067 */
1065#define __NR_Linux_syscalls 323 1068#define __NR_Linux_syscalls 324
1066 1069
1067#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1070#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1068 1071
1069#define __NR_N32_Linux 6000 1072#define __NR_N32_Linux 6000
1070#define __NR_N32_Linux_syscalls 323 1073#define __NR_N32_Linux_syscalls 324
1071 1074
1072#endif /* _UAPI_ASM_UNISTD_H */ 1075#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 1188e00bb120..1b992c6e3d8e 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -35,7 +35,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
35 int __res = 1; \ 35 int __res = 1; \
36 struct elfhdr *__h = (hdr); \ 36 struct elfhdr *__h = (hdr); \
37 \ 37 \
38 if (__h->e_machine != EM_MIPS) \ 38 if (!mips_elf_check_machine(__h)) \
39 __res = 0; \ 39 __res = 0; \
40 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ 40 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
41 __res = 0; \ 41 __res = 0; \
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 928767858b86..abd3affe5fb3 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -47,7 +47,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
47 int __res = 1; \ 47 int __res = 1; \
48 struct elfhdr *__h = (hdr); \ 48 struct elfhdr *__h = (hdr); \
49 \ 49 \
50 if (__h->e_machine != EM_MIPS) \ 50 if (!mips_elf_check_machine(__h)) \
51 __res = 0; \ 51 __res = 0; \
52 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ 52 if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
53 __res = 0; \ 53 __res = 0; \
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index f2975d4d1e44..eddd5fd6fdfa 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -65,12 +65,10 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
65 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); 65 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
66 status |= KU_USER; 66 status |= KU_USER;
67 regs->cp0_status = status; 67 regs->cp0_status = status;
68 lose_fpu(0);
69 clear_thread_flag(TIF_MSA_CTX_LIVE);
68 clear_used_math(); 70 clear_used_math();
69 clear_fpu_owner();
70 init_dsp(); 71 init_dsp();
71 clear_thread_flag(TIF_USEDMSA);
72 clear_thread_flag(TIF_MSA_CTX_LIVE);
73 disable_msa();
74 regs->cp0_epc = pc; 72 regs->cp0_epc = pc;
75 regs->regs[29] = sp; 73 regs->regs[29] = sp;
76} 74}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c834ba96..a56317444bda 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -595,3 +595,4 @@ EXPORT(sys_call_table)
595 PTR sys_userfaultfd 595 PTR sys_userfaultfd
596 PTR sys_membarrier 596 PTR sys_membarrier
597 PTR sys_mlock2 597 PTR sys_mlock2
598 PTR sys_copy_file_range /* 4360 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac63315d0e..2b2dc14610d0 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -433,4 +433,5 @@ EXPORT(sys_call_table)
433 PTR sys_userfaultfd 433 PTR sys_userfaultfd
434 PTR sys_membarrier 434 PTR sys_membarrier
435 PTR sys_mlock2 435 PTR sys_mlock2
436 PTR sys_copy_file_range /* 5320 */
436 .size sys_call_table,.-sys_call_table 437 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 5a69eb48d0a8..2bf5c8593d91 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
423 PTR sys_userfaultfd 423 PTR sys_userfaultfd
424 PTR sys_membarrier 424 PTR sys_membarrier
425 PTR sys_mlock2 425 PTR sys_mlock2
426 PTR sys_copy_file_range
426 .size sysn32_call_table,.-sysn32_call_table 427 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e4b6d7c97822..c5b759e584c7 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -578,4 +578,5 @@ EXPORT(sys32_call_table)
578 PTR sys_userfaultfd 578 PTR sys_userfaultfd
579 PTR sys_membarrier 579 PTR sys_membarrier
580 PTR sys_mlock2 580 PTR sys_mlock2
581 PTR sys_copy_file_range /* 4360 */
581 .size sys32_call_table,.-sys32_call_table 582 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 569a7d5242dd..5fdaf8bdcd2e 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -782,6 +782,7 @@ static inline void prefill_possible_map(void) {}
782void __init setup_arch(char **cmdline_p) 782void __init setup_arch(char **cmdline_p)
783{ 783{
784 cpu_probe(); 784 cpu_probe();
785 mips_cm_probe();
785 prom_init(); 786 prom_init();
786 787
787 setup_early_fdc_console(); 788 setup_early_fdc_console();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bafcb7ad5c85..ae790c575d4f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -663,7 +663,7 @@ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
663 return -1; 663 return -1;
664} 664}
665 665
666static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) 666static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
667{ 667{
668 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { 668 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
669 int rd = (opcode & MM_RS) >> 16; 669 int rd = (opcode & MM_RS) >> 16;
@@ -1119,11 +1119,12 @@ no_r2_instr:
1119 if (get_isa16_mode(regs->cp0_epc)) { 1119 if (get_isa16_mode(regs->cp0_epc)) {
1120 unsigned short mmop[2] = { 0 }; 1120 unsigned short mmop[2] = { 0 };
1121 1121
1122 if (unlikely(get_user(mmop[0], epc) < 0)) 1122 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1123 status = SIGSEGV; 1123 status = SIGSEGV;
1124 if (unlikely(get_user(mmop[1], epc) < 0)) 1124 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1125 status = SIGSEGV; 1125 status = SIGSEGV;
1126 opcode = (mmop[0] << 16) | mmop[1]; 1126 opcode = mmop[0];
1127 opcode = (opcode << 16) | mmop[1];
1127 1128
1128 if (status < 0) 1129 if (status < 0)
1129 status = simulate_rdhwr_mm(regs, opcode); 1130 status = simulate_rdhwr_mm(regs, opcode);
@@ -1369,26 +1370,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1369 if (unlikely(compute_return_epc(regs) < 0)) 1370 if (unlikely(compute_return_epc(regs) < 0))
1370 break; 1371 break;
1371 1372
1372 if (get_isa16_mode(regs->cp0_epc)) { 1373 if (!get_isa16_mode(regs->cp0_epc)) {
1373 unsigned short mmop[2] = { 0 };
1374
1375 if (unlikely(get_user(mmop[0], epc) < 0))
1376 status = SIGSEGV;
1377 if (unlikely(get_user(mmop[1], epc) < 0))
1378 status = SIGSEGV;
1379 opcode = (mmop[0] << 16) | mmop[1];
1380
1381 if (status < 0)
1382 status = simulate_rdhwr_mm(regs, opcode);
1383 } else {
1384 if (unlikely(get_user(opcode, epc) < 0)) 1374 if (unlikely(get_user(opcode, epc) < 0))
1385 status = SIGSEGV; 1375 status = SIGSEGV;
1386 1376
1387 if (!cpu_has_llsc && status < 0) 1377 if (!cpu_has_llsc && status < 0)
1388 status = simulate_llsc(regs, opcode); 1378 status = simulate_llsc(regs, opcode);
1389
1390 if (status < 0)
1391 status = simulate_rdhwr_normal(regs, opcode);
1392 } 1379 }
1393 1380
1394 if (status < 0) 1381 if (status < 0)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 5c81fdd032c3..353037699512 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -146,7 +146,7 @@ unsigned long arch_mmap_rnd(void)
146{ 146{
147 unsigned long rnd; 147 unsigned long rnd;
148 148
149 rnd = (unsigned long)get_random_int(); 149 rnd = get_random_long();
150 rnd <<= PAGE_SHIFT; 150 rnd <<= PAGE_SHIFT;
151 if (TASK_IS_32BIT_ADDR) 151 if (TASK_IS_32BIT_ADDR)
152 rnd &= 0xfffffful; 152 rnd &= 0xfffffful;
@@ -174,7 +174,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
174 174
175static inline unsigned long brk_rnd(void) 175static inline unsigned long brk_rnd(void)
176{ 176{
177 unsigned long rnd = get_random_int(); 177 unsigned long rnd = get_random_long();
178 178
179 rnd = rnd << PAGE_SHIFT; 179 rnd = rnd << PAGE_SHIFT;
180 /* 8MB for 32bit, 256MB for 64bit */ 180 /* 8MB for 32bit, 256MB for 64bit */
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 3bd0597d9c3d..249647578e58 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -181,10 +181,6 @@ static int __init mips_sc_probe_cm3(void)
181 return 1; 181 return 1;
182} 182}
183 183
184void __weak platform_early_l2_init(void)
185{
186}
187
188static inline int __init mips_sc_probe(void) 184static inline int __init mips_sc_probe(void)
189{ 185{
190 struct cpuinfo_mips *c = &current_cpu_data; 186 struct cpuinfo_mips *c = &current_cpu_data;
@@ -194,12 +190,6 @@ static inline int __init mips_sc_probe(void)
194 /* Mark as not present until probe completed */ 190 /* Mark as not present until probe completed */
195 c->scache.flags |= MIPS_CACHE_NOT_PRESENT; 191 c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
196 192
197 /*
198 * Do we need some platform specific probing before
199 * we configure L2?
200 */
201 platform_early_l2_init();
202
203 if (mips_cm_revision() >= CM_REV_CM3) 193 if (mips_cm_revision() >= CM_REV_CM3)
204 return mips_sc_probe_cm3(); 194 return mips_sc_probe_cm3();
205 195
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 571148c5fd0b..dc2c5214809d 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -293,7 +293,6 @@ mips_pci_controller:
293 console_config(); 293 console_config();
294#endif 294#endif
295 /* Early detection of CMP support */ 295 /* Early detection of CMP support */
296 mips_cm_probe();
297 mips_cpc_probe(); 296 mips_cpc_probe();
298 297
299 if (!register_cps_smp_ops()) 298 if (!register_cps_smp_ops())
@@ -304,10 +303,3 @@ mips_pci_controller:
304 return; 303 return;
305 register_up_smp_ops(); 304 register_up_smp_ops();
306} 305}
307
308void platform_early_l2_init(void)
309{
310 /* L2 configuration lives in the CM3 */
311 if (mips_cm_revision() >= CM_REV_CM3)
312 mips_cm_probe();
313}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index a009ee458934..1ae932c2d78b 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -297,12 +297,12 @@ static int mt7620_pci_probe(struct platform_device *pdev)
297 return PTR_ERR(rstpcie0); 297 return PTR_ERR(rstpcie0);
298 298
299 bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res); 299 bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
300 if (!bridge_base) 300 if (IS_ERR(bridge_base))
301 return -ENOMEM; 301 return PTR_ERR(bridge_base);
302 302
303 pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res); 303 pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
304 if (!pcie_base) 304 if (IS_ERR(pcie_base))
305 return -ENOMEM; 305 return PTR_ERR(pcie_base);
306 306
307 iomem_resource.start = 0; 307 iomem_resource.start = 0;
308 iomem_resource.end = ~0; 308 iomem_resource.end = ~0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd04bb7..9faa18c4f3f7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -557,7 +557,7 @@ choice
557 557
558config PPC_4K_PAGES 558config PPC_4K_PAGES
559 bool "4k page size" 559 bool "4k page size"
560 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 560 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
561 561
562config PPC_16K_PAGES 562config PPC_16K_PAGES
563 bool "16k page size" 563 bool "16k page size"
@@ -566,7 +566,7 @@ config PPC_16K_PAGES
566config PPC_64K_PAGES 566config PPC_64K_PAGES
567 bool "64k page size" 567 bool "64k page size"
568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) 568 depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
569 select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S 569 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
570 570
571config PPC_256K_PAGES 571config PPC_256K_PAGES
572 bool "256k page size" 572 bool "256k page size"
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8d1c41d28318..ac07a30a7934 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -281,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 281extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
282 pmd_t *pmdp); 282 pmd_t *pmdp);
283 283
284#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
285extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
286 unsigned long address, pmd_t *pmdp);
287
284#define pmd_move_must_withdraw pmd_move_must_withdraw 288#define pmd_move_must_withdraw pmd_move_must_withdraw
285struct spinlock; 289struct spinlock;
286static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, 290static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f3d452..867c39b45df6 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@ struct pci_dn;
81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 81#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 82#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */ 83#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
84#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
84 85
85struct eeh_pe { 86struct eeh_pe {
86 int type; /* PE type: PHB/Bus/Device */ 87 int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 8e86b48d0369..32e36b16773f 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
57extern void hcall_tracepoint_regfunc(void); 57extern void hcall_tracepoint_regfunc(void);
58extern void hcall_tracepoint_unregfunc(void); 58extern void hcall_tracepoint_unregfunc(void);
59 59
60TRACE_EVENT_FN(hcall_entry, 60TRACE_EVENT_FN_COND(hcall_entry,
61 61
62 TP_PROTO(unsigned long opcode, unsigned long *args), 62 TP_PROTO(unsigned long opcode, unsigned long *args),
63 63
64 TP_ARGS(opcode, args), 64 TP_ARGS(opcode, args),
65 65
66 TP_CONDITION(cpu_online(raw_smp_processor_id())),
67
66 TP_STRUCT__entry( 68 TP_STRUCT__entry(
67 __field(unsigned long, opcode) 69 __field(unsigned long, opcode)
68 ), 70 ),
@@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry,
76 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc 78 hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
77); 79);
78 80
79TRACE_EVENT_FN(hcall_exit, 81TRACE_EVENT_FN_COND(hcall_exit,
80 82
81 TP_PROTO(unsigned long opcode, unsigned long retval, 83 TP_PROTO(unsigned long opcode, unsigned long retval,
82 unsigned long *retbuf), 84 unsigned long *retbuf),
83 85
84 TP_ARGS(opcode, retval, retbuf), 86 TP_ARGS(opcode, retval, retbuf),
85 87
88 TP_CONDITION(cpu_online(raw_smp_processor_id())),
89
86 TP_STRUCT__entry( 90 TP_STRUCT__entry(
87 __field(unsigned long, opcode) 91 __field(unsigned long, opcode)
88 __field(unsigned long, retval) 92 __field(unsigned long, retval)
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 938742135ee0..650cfb31ea3d 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
418 eeh_pcid_put(dev); 418 eeh_pcid_put(dev);
419 if (driver->err_handler && 419 if (driver->err_handler &&
420 driver->err_handler->error_detected && 420 driver->err_handler->error_detected &&
421 driver->err_handler->slot_reset && 421 driver->err_handler->slot_reset)
422 driver->err_handler->resume)
423 return NULL; 422 return NULL;
424 } 423 }
425 424
@@ -564,6 +563,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
564 */ 563 */
565 eeh_pe_state_mark(pe, EEH_PE_KEEP); 564 eeh_pe_state_mark(pe, EEH_PE_KEEP);
566 if (bus) { 565 if (bus) {
566 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
567 pci_lock_rescan_remove(); 567 pci_lock_rescan_remove();
568 pcibios_remove_pci_devices(bus); 568 pcibios_remove_pci_devices(bus);
569 pci_unlock_rescan_remove(); 569 pci_unlock_rescan_remove();
@@ -803,6 +803,7 @@ perm_error:
803 * the their PCI config any more. 803 * the their PCI config any more.
804 */ 804 */
805 if (frozen_bus) { 805 if (frozen_bus) {
806 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
806 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); 807 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
807 808
808 pci_lock_rescan_remove(); 809 pci_lock_rescan_remove();
@@ -886,6 +887,7 @@ static void eeh_handle_special_event(void)
886 continue; 887 continue;
887 888
888 /* Notify all devices to be down */ 889 /* Notify all devices to be down */
890 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
889 bus = eeh_pe_bus_get(phb_pe); 891 bus = eeh_pe_bus_get(phb_pe);
890 eeh_pe_dev_traverse(pe, 892 eeh_pe_dev_traverse(pe,
891 eeh_report_failure, NULL); 893 eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ca9e5371930e..98f81800e00c 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -928,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
928 bus = pe->phb->bus; 928 bus = pe->phb->bus;
929 } else if (pe->type & EEH_PE_BUS || 929 } else if (pe->type & EEH_PE_BUS ||
930 pe->type & EEH_PE_DEVICE) { 930 pe->type & EEH_PE_DEVICE) {
931 if (pe->bus) { 931 if (pe->state & EEH_PE_PRI_BUS) {
932 bus = pe->bus; 932 bus = pe->bus;
933 goto out; 933 goto out;
934 } 934 }
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ac64ffdb52c8..08b7a40de5f8 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -340,7 +340,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
340 if (name[0] == '.') { 340 if (name[0] == '.') {
341 if (strcmp(name+1, "TOC.") == 0) 341 if (strcmp(name+1, "TOC.") == 0)
342 syms[i].st_shndx = SHN_ABS; 342 syms[i].st_shndx = SHN_ABS;
343 memmove(name, name+1, strlen(name)); 343 syms[i].st_name++;
344 } 344 }
345 } 345 }
346 } 346 }
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e8fee5..3c5736e52a14 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1768,9 +1768,9 @@ static inline unsigned long brk_rnd(void)
1768 1768
1769 /* 8MB for 32bit, 1GB for 64bit */ 1769 /* 8MB for 32bit, 1GB for 64bit */
1770 if (is_32bit_task()) 1770 if (is_32bit_task())
1771 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); 1771 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1772 else 1772 else
1773 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); 1773 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1774 1774
1775 return rnd << PAGE_SHIFT; 1775 return rnd << PAGE_SHIFT;
1776} 1776}
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 0762c1e08c88..edb09912f0c9 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -111,7 +111,13 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
111 */ 111 */
112 if (!(old_pte & _PAGE_COMBO)) { 112 if (!(old_pte & _PAGE_COMBO)) {
113 flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags); 113 flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
114 old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND; 114 /*
115 * clear the old slot details from the old and new pte.
116 * On hash insert failure we use old pte value and we don't
117 * want slot information there if we have a insert failure.
118 */
119 old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
120 new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
115 goto htab_insert_hpte; 121 goto htab_insert_hpte;
116 } 122 }
117 /* 123 /*
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 49b152b0f926..eb2accdd76fd 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -78,9 +78,19 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
78 * base page size. This is because demote_segment won't flush 78 * base page size. This is because demote_segment won't flush
79 * hash page table entries. 79 * hash page table entries.
80 */ 80 */
81 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) 81 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
82 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K, 82 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
83 ssize, flags); 83 ssize, flags);
84 /*
85 * With THP, we also clear the slot information with
86 * respect to all the 64K hash pte mapping the 16MB
87 * page. They are all invalid now. This make sure we
88 * don't find the slot valid when we fault with 4k
89 * base page size.
90 *
91 */
92 memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
93 }
84 } 94 }
85 95
86 valid = hpte_valid(hpte_slot_array, index); 96 valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 0f0502e12f6c..4087705ba90f 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -59,9 +59,9 @@ unsigned long arch_mmap_rnd(void)
59 59
60 /* 8MB for 32bit, 1GB for 64bit */ 60 /* 8MB for 32bit, 1GB for 64bit */
61 if (is_32bit_task()) 61 if (is_32bit_task())
62 rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT)); 62 rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
63 else 63 else
64 rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT)); 64 rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
65 65
66 return rnd << PAGE_SHIFT; 66 return rnd << PAGE_SHIFT;
67} 67}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20d0fab..cdf2123d46db 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
646 return pgtable; 646 return pgtable;
647} 647}
648 648
649void pmdp_huge_split_prepare(struct vm_area_struct *vma,
650 unsigned long address, pmd_t *pmdp)
651{
652 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
653 VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
654
655 /*
656 * We can't mark the pmd none here, because that will cause a race
657 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
658 * we spilt, but at the same time we wan't rest of the ppc64 code
659 * not to insert hash pte on this, because we will be modifying
660 * the deposited pgtable in the caller of this function. Hence
661 * clear the _PAGE_USER so that we move the fault handling to
662 * higher level function and that will serialize against ptl.
663 * We need to flush existing hash pte entries here even though,
664 * the translation is still valid, because we will withdraw
665 * pgtable_t after this.
666 */
667 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
668}
669
670
649/* 671/*
650 * set a new huge pmd. We should not be called for updating 672 * set a new huge pmd. We should not be called for updating
651 * an existing pmd entry. That should go via pmd_hugepage_update. 673 * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,20 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
663 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 685 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
664} 686}
665 687
688/*
689 * We use this to invalidate a pmdp entry before switching from a
690 * hugepte to regular pmd entry.
691 */
666void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 692void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
667 pmd_t *pmdp) 693 pmd_t *pmdp)
668{ 694{
669 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 695 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
696
697 /*
698 * This ensures that generic code that rely on IRQ disabling
699 * to prevent a parallel THP split work as expected.
700 */
701 kick_all_cpus_sync();
670} 702}
671 703
672/* 704/*
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5f152b95ca0c..87f47e55aab6 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
444 * PCI devices of the PE are expected to be removed prior 444 * PCI devices of the PE are expected to be removed prior
445 * to PE reset. 445 * to PE reset.
446 */ 446 */
447 if (!edev->pe->bus) 447 if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
448 edev->pe->bus = pci_find_bus(hose->global_number, 448 edev->pe->bus = pci_find_bus(hose->global_number,
449 pdn->busno); 449 pdn->busno);
450 if (edev->pe->bus)
451 edev->pe->state |= EEH_PE_PRI_BUS;
452 }
450 453
451 /* 454 /*
452 * Enable EEH explicitly so that we will do EEH check 455 * Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 573ae1994097..f90dc04395bf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
3180 3180
3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { 3181static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
3182 .dma_dev_setup = pnv_pci_dma_dev_setup, 3182 .dma_dev_setup = pnv_pci_dma_dev_setup,
3183 .dma_bus_setup = pnv_pci_dma_bus_setup,
3183#ifdef CONFIG_PCI_MSI 3184#ifdef CONFIG_PCI_MSI
3184 .setup_msi_irqs = pnv_setup_msi_irqs, 3185 .setup_msi_irqs = pnv_setup_msi_irqs,
3185 .teardown_msi_irqs = pnv_teardown_msi_irqs, 3186 .teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2f55c86df703..b1ef84a6c9d1 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -599,6 +599,9 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; 599 u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
600 long i; 600 long i;
601 601
602 if (proto_tce & TCE_PCI_WRITE)
603 proto_tce |= TCE_PCI_READ;
604
602 for (i = 0; i < npages; i++) { 605 for (i = 0; i < npages; i++) {
603 unsigned long newtce = proto_tce | 606 unsigned long newtce = proto_tce |
604 ((rpn + i) << tbl->it_page_shift); 607 ((rpn + i) << tbl->it_page_shift);
@@ -620,6 +623,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
620 623
621 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); 624 BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
622 625
626 if (newtce & TCE_PCI_WRITE)
627 newtce |= TCE_PCI_READ;
628
623 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)); 629 oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
624 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE); 630 *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
625 *direction = iommu_tce_direction(oldtce); 631 *direction = iommu_tce_direction(oldtce);
@@ -760,6 +766,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
760 phb->dma_dev_setup(phb, pdev); 766 phb->dma_dev_setup(phb, pdev);
761} 767}
762 768
769void pnv_pci_dma_bus_setup(struct pci_bus *bus)
770{
771 struct pci_controller *hose = bus->sysdata;
772 struct pnv_phb *phb = hose->private_data;
773 struct pnv_ioda_pe *pe;
774
775 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
776 if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
777 continue;
778
779 if (!pe->pbus)
780 continue;
781
782 if (bus->number == ((pe->rid >> 8) & 0xFF)) {
783 pe->pbus = bus;
784 break;
785 }
786 }
787}
788
763void pnv_pci_shutdown(void) 789void pnv_pci_shutdown(void)
764{ 790{
765 struct pci_controller *hose; 791 struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7f56313e8d72..00691a9b99af 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option); 242extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
243 243
244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev); 244extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
245extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
245extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); 246extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
246extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); 247extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
247 248
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index ea91ddfe54eb..629c90865a07 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -40,6 +40,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
40static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) 40static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
41{ 41{
42 fpregs->pad = 0; 42 fpregs->pad = 0;
43 fpregs->fpc = fpu->fpc;
43 if (MACHINE_HAS_VX) 44 if (MACHINE_HAS_VX)
44 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); 45 convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
45 else 46 else
@@ -49,6 +50,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
49 50
50static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) 51static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
51{ 52{
53 fpu->fpc = fpregs->fpc;
52 if (MACHINE_HAS_VX) 54 if (MACHINE_HAS_VX)
53 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); 55 convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
54 else 56 else
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 7aa799134a11..a52b6cca873d 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -37,7 +37,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
37 regs->psw.addr = ip; 37 regs->psw.addr = ip;
38} 38}
39#else 39#else
40#error Live patching support is disabled; check CONFIG_LIVEPATCH 40#error Include linux/livepatch.h, not asm/livepatch.h
41#endif 41#endif
42 42
43#endif 43#endif
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 66c94417c0ba..4af60374eba0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
271 271
272 /* Restore high gprs from signal stack */ 272 /* Restore high gprs from signal stack */
273 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high, 273 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
274 sizeof(&sregs_ext->gprs_high))) 274 sizeof(sregs_ext->gprs_high)))
275 return -EFAULT; 275 return -EFAULT;
276 for (i = 0; i < NUM_GPRS; i++) 276 for (i = 0; i < NUM_GPRS; i++)
277 *(__u32 *)&regs->gprs[i] = gprs_high[i]; 277 *(__u32 *)&regs->gprs[i] = gprs_high[i];
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index cfcba2dd9bb5..0943b11a2f6e 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -260,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
260void perf_callchain_kernel(struct perf_callchain_entry *entry, 260void perf_callchain_kernel(struct perf_callchain_entry *entry,
261 struct pt_regs *regs) 261 struct pt_regs *regs)
262{ 262{
263 unsigned long head; 263 unsigned long head, frame_size;
264 struct stack_frame *head_sf; 264 struct stack_frame *head_sf;
265 265
266 if (user_mode(regs)) 266 if (user_mode(regs))
267 return; 267 return;
268 268
269 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
269 head = regs->gprs[15]; 270 head = regs->gprs[15];
270 head_sf = (struct stack_frame *) head; 271 head_sf = (struct stack_frame *) head;
271 272
@@ -273,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
273 return; 274 return;
274 275
275 head = head_sf->back_chain; 276 head = head_sf->back_chain;
276 head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, 277 head = __store_trace(entry, head,
277 S390_lowcore.async_stack); 278 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
279 S390_lowcore.async_stack + frame_size);
278 280
279 __store_trace(entry, head, S390_lowcore.thread_info, 281 __store_trace(entry, head, S390_lowcore.thread_info,
280 S390_lowcore.thread_info + THREAD_SIZE); 282 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 5acba3cb7220..8f64ebd63767 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -59,26 +59,32 @@ static unsigned long save_context_stack(struct stack_trace *trace,
59 } 59 }
60} 60}
61 61
62void save_stack_trace(struct stack_trace *trace) 62static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
63{ 63{
64 register unsigned long sp asm ("15"); 64 unsigned long new_sp, frame_size;
65 unsigned long orig_sp, new_sp;
66 65
67 orig_sp = sp; 66 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
68 new_sp = save_context_stack(trace, orig_sp, 67 new_sp = save_context_stack(trace, sp,
69 S390_lowcore.panic_stack - PAGE_SIZE, 68 S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
70 S390_lowcore.panic_stack, 1); 69 S390_lowcore.panic_stack + frame_size, 1);
71 if (new_sp != orig_sp)
72 return;
73 new_sp = save_context_stack(trace, new_sp, 70 new_sp = save_context_stack(trace, new_sp,
74 S390_lowcore.async_stack - ASYNC_SIZE, 71 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
75 S390_lowcore.async_stack, 1); 72 S390_lowcore.async_stack + frame_size, 1);
76 if (new_sp != orig_sp)
77 return;
78 save_context_stack(trace, new_sp, 73 save_context_stack(trace, new_sp,
79 S390_lowcore.thread_info, 74 S390_lowcore.thread_info,
80 S390_lowcore.thread_info + THREAD_SIZE, 1); 75 S390_lowcore.thread_info + THREAD_SIZE, 1);
81} 76}
77
78void save_stack_trace(struct stack_trace *trace)
79{
80 register unsigned long r15 asm ("15");
81 unsigned long sp;
82
83 sp = r15;
84 __save_stack_trace(trace, sp);
85 if (trace->nr_entries < trace->max_entries)
86 trace->entries[trace->nr_entries++] = ULONG_MAX;
87}
82EXPORT_SYMBOL_GPL(save_stack_trace); 88EXPORT_SYMBOL_GPL(save_stack_trace);
83 89
84void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 90void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
@@ -86,6 +92,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86 unsigned long sp, low, high; 92 unsigned long sp, low, high;
87 93
88 sp = tsk->thread.ksp; 94 sp = tsk->thread.ksp;
95 if (tsk == current) {
96 /* Get current stack pointer. */
97 asm volatile("la %0,0(15)" : "=a" (sp));
98 }
89 low = (unsigned long) task_stack_page(tsk); 99 low = (unsigned long) task_stack_page(tsk);
90 high = (unsigned long) task_pt_regs(tsk); 100 high = (unsigned long) task_pt_regs(tsk);
91 save_context_stack(trace, sp, low, high, 0); 101 save_context_stack(trace, sp, low, high, 0);
@@ -93,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
93 trace->entries[trace->nr_entries++] = ULONG_MAX; 103 trace->entries[trace->nr_entries++] = ULONG_MAX;
94} 104}
95EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 105EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
106
107void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
108{
109 unsigned long sp;
110
111 sp = kernel_stack_pointer(regs);
112 __save_stack_trace(trace, sp);
113 if (trace->nr_entries < trace->max_entries)
114 trace->entries[trace->nr_entries++] = ULONG_MAX;
115}
116EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 21a5df99552b..dde7654f5c68 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -18,6 +18,9 @@ void trace_s390_diagnose_norecursion(int diag_nr)
18 unsigned long flags; 18 unsigned long flags;
19 unsigned int *depth; 19 unsigned int *depth;
20 20
21 /* Avoid lockdep recursion. */
22 if (IS_ENABLED(CONFIG_LOCKDEP))
23 return;
21 local_irq_save(flags); 24 local_irq_save(flags);
22 depth = this_cpu_ptr(&diagnose_trace_depth); 25 depth = this_cpu_ptr(&diagnose_trace_depth);
23 if (*depth == 0) { 26 if (*depth == 0) {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index fec59c067d0d..792f9c63fbca 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -93,15 +93,19 @@ static int __memcpy_real(void *dest, void *src, size_t count)
93 */ 93 */
94int memcpy_real(void *dest, void *src, size_t count) 94int memcpy_real(void *dest, void *src, size_t count)
95{ 95{
96 int irqs_disabled, rc;
96 unsigned long flags; 97 unsigned long flags;
97 int rc;
98 98
99 if (!count) 99 if (!count)
100 return 0; 100 return 0;
101 local_irq_save(flags); 101 flags = __arch_local_irq_stnsm(0xf8UL);
102 __arch_local_irq_stnsm(0xfbUL); 102 irqs_disabled = arch_irqs_disabled_flags(flags);
103 if (!irqs_disabled)
104 trace_hardirqs_off();
103 rc = __memcpy_real(dest, src, count); 105 rc = __memcpy_real(dest, src, count);
104 local_irq_restore(flags); 106 if (!irqs_disabled)
107 trace_hardirqs_on();
108 __arch_local_irq_ssm(flags);
105 return rc; 109 return rc;
106} 110}
107 111
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index fe0bfe370c45..1884e1759529 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -54,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp,
54 54
55void s390_backtrace(struct pt_regs * const regs, unsigned int depth) 55void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
56{ 56{
57 unsigned long head; 57 unsigned long head, frame_size;
58 struct stack_frame* head_sf; 58 struct stack_frame* head_sf;
59 59
60 if (user_mode(regs)) 60 if (user_mode(regs))
61 return; 61 return;
62 62
63 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
63 head = regs->gprs[15]; 64 head = regs->gprs[15];
64 head_sf = (struct stack_frame*)head; 65 head_sf = (struct stack_frame*)head;
65 66
@@ -68,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
68 69
69 head = head_sf->back_chain; 70 head = head_sf->back_chain;
70 71
71 head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE, 72 head = __show_trace(&depth, head,
72 S390_lowcore.async_stack); 73 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
74 S390_lowcore.async_stack + frame_size);
73 75
74 __show_trace(&depth, head, S390_lowcore.thread_info, 76 __show_trace(&depth, head, S390_lowcore.thread_info,
75 S390_lowcore.thread_info + THREAD_SIZE); 77 S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c690c8e16a96..b489e9759518 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -264,7 +264,7 @@ static unsigned long mmap_rnd(void)
264 unsigned long rnd = 0UL; 264 unsigned long rnd = 0UL;
265 265
266 if (current->flags & PF_RANDOMIZE) { 266 if (current->flags & PF_RANDOMIZE) {
267 unsigned long val = get_random_int(); 267 unsigned long val = get_random_long();
268 if (test_thread_flag(TIF_32BIT)) 268 if (test_thread_flag(TIF_32BIT))
269 rnd = (val % (1UL << (23UL-PAGE_SHIFT))); 269 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
270 else 270 else
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9af2e6338400..c46662f64c39 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -475,6 +475,7 @@ config X86_UV
475 depends on X86_64 475 depends on X86_64
476 depends on X86_EXTENDED_PLATFORM 476 depends on X86_EXTENDED_PLATFORM
477 depends on NUMA 477 depends on NUMA
478 depends on EFI
478 depends on X86_X2APIC 479 depends on X86_X2APIC
479 depends on PCI 480 depends on PCI
480 ---help--- 481 ---help---
@@ -777,8 +778,8 @@ config HPET_TIMER
777 HPET is the next generation timer replacing legacy 8254s. 778 HPET is the next generation timer replacing legacy 8254s.
778 The HPET provides a stable time base on SMP 779 The HPET provides a stable time base on SMP
779 systems, unlike the TSC, but it is more expensive to access, 780 systems, unlike the TSC, but it is more expensive to access,
780 as it is off-chip. You can find the HPET spec at 781 as it is off-chip. The interface used is documented
781 <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>. 782 in the HPET spec, revision 1.
782 783
783 You can safely choose Y here. However, HPET will only be 784 You can safely choose Y here. However, HPET will only be
784 activated if the platform and the BIOS support this feature. 785 activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 77d8c5112900..bb3e376d0f33 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,7 @@ sysenter_past_esp:
294 pushl $__USER_DS /* pt_regs->ss */ 294 pushl $__USER_DS /* pt_regs->ss */
295 pushl %ebp /* pt_regs->sp (stashed in bp) */ 295 pushl %ebp /* pt_regs->sp (stashed in bp) */
296 pushfl /* pt_regs->flags (except IF = 0) */ 296 pushfl /* pt_regs->flags (except IF = 0) */
297 ASM_CLAC /* Clear AC after saving FLAGS */
297 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ 298 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
298 pushl $__USER_CS /* pt_regs->cs */ 299 pushl $__USER_CS /* pt_regs->cs */
299 pushl $0 /* pt_regs->ip = 0 (placeholder) */ 300 pushl $0 /* pt_regs->ip = 0 (placeholder) */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ff1c6d61f332..3c990eeee40b 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,7 @@ ENTRY(entry_INT80_compat)
261 * Interrupts are off on entry. 261 * Interrupts are off on entry.
262 */ 262 */
263 PARAVIRT_ADJUST_EXCEPTION_FRAME 263 PARAVIRT_ADJUST_EXCEPTION_FRAME
264 ASM_CLAC /* Do this early to minimize exposure */
264 SWAPGS 265 SWAPGS
265 266
266 /* 267 /*
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 19c099afa861..e795f5274217 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -41,7 +41,7 @@ static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
41 regs->ip = ip; 41 regs->ip = ip;
42} 42}
43#else 43#else
44#error Live patching support is disabled; check CONFIG_LIVEPATCH 44#error Include linux/livepatch.h, not asm/livepatch.h
45#endif 45#endif
46 46
47#endif /* _ASM_X86_LIVEPATCH_H */ 47#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 46873fbd44e1..d08eacd298c2 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
93extern int (*pcibios_enable_irq)(struct pci_dev *dev); 93extern int (*pcibios_enable_irq)(struct pci_dev *dev);
94extern void (*pcibios_disable_irq)(struct pci_dev *dev); 94extern void (*pcibios_disable_irq)(struct pci_dev *dev);
95 95
96extern bool mp_should_keep_irq(struct device *dev);
97
96struct pci_raw_ops { 98struct pci_raw_ops {
97 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, 99 int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
98 int reg, int len, u32 *val); 100 int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2d5a50cb61a2..20c11d1aa4cc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -766,7 +766,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
766 * Return saved PC of a blocked thread. 766 * Return saved PC of a blocked thread.
767 * What is this good for? it will be always the scheduler or ret_from_fork. 767 * What is this good for? it will be always the scheduler or ret_from_fork.
768 */ 768 */
769#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) 769#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
770 770
771#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) 771#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
772extern unsigned long KSTK_ESP(struct task_struct *task); 772extern unsigned long KSTK_ESP(struct task_struct *task);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd..3fe0eac59462 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
48 48
49 switch (n) { 49 switch (n) {
50 case 1: 50 case 1:
51 __uaccess_begin();
51 __put_user_size(*(u8 *)from, (u8 __user *)to, 52 __put_user_size(*(u8 *)from, (u8 __user *)to,
52 1, ret, 1); 53 1, ret, 1);
54 __uaccess_end();
53 return ret; 55 return ret;
54 case 2: 56 case 2:
57 __uaccess_begin();
55 __put_user_size(*(u16 *)from, (u16 __user *)to, 58 __put_user_size(*(u16 *)from, (u16 __user *)to,
56 2, ret, 2); 59 2, ret, 2);
60 __uaccess_end();
57 return ret; 61 return ret;
58 case 4: 62 case 4:
63 __uaccess_begin();
59 __put_user_size(*(u32 *)from, (u32 __user *)to, 64 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4); 65 4, ret, 4);
66 __uaccess_end();
61 return ret; 67 return ret;
62 case 8: 68 case 8:
69 __uaccess_begin();
63 __put_user_size(*(u64 *)from, (u64 __user *)to, 70 __put_user_size(*(u64 *)from, (u64 __user *)to,
64 8, ret, 8); 71 8, ret, 8);
72 __uaccess_end();
65 return ret; 73 return ret;
66 } 74 }
67 } 75 }
@@ -103,13 +111,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
103 111
104 switch (n) { 112 switch (n) {
105 case 1: 113 case 1:
114 __uaccess_begin();
106 __get_user_size(*(u8 *)to, from, 1, ret, 1); 115 __get_user_size(*(u8 *)to, from, 1, ret, 1);
116 __uaccess_end();
107 return ret; 117 return ret;
108 case 2: 118 case 2:
119 __uaccess_begin();
109 __get_user_size(*(u16 *)to, from, 2, ret, 2); 120 __get_user_size(*(u16 *)to, from, 2, ret, 2);
121 __uaccess_end();
110 return ret; 122 return ret;
111 case 4: 123 case 4:
124 __uaccess_begin();
112 __get_user_size(*(u32 *)to, from, 4, ret, 4); 125 __get_user_size(*(u32 *)to, from, 4, ret, 4);
126 __uaccess_end();
113 return ret; 127 return ret;
114 } 128 }
115 } 129 }
@@ -148,13 +162,19 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
148 162
149 switch (n) { 163 switch (n) {
150 case 1: 164 case 1:
165 __uaccess_begin();
151 __get_user_size(*(u8 *)to, from, 1, ret, 1); 166 __get_user_size(*(u8 *)to, from, 1, ret, 1);
167 __uaccess_end();
152 return ret; 168 return ret;
153 case 2: 169 case 2:
170 __uaccess_begin();
154 __get_user_size(*(u16 *)to, from, 2, ret, 2); 171 __get_user_size(*(u16 *)to, from, 2, ret, 2);
172 __uaccess_end();
155 return ret; 173 return ret;
156 case 4: 174 case 4:
175 __uaccess_begin();
157 __get_user_size(*(u32 *)to, from, 4, ret, 4); 176 __get_user_size(*(u32 *)to, from, 4, ret, 4);
177 __uaccess_end();
158 return ret; 178 return ret;
159 } 179 }
160 } 180 }
@@ -170,13 +190,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
170 190
171 switch (n) { 191 switch (n) {
172 case 1: 192 case 1:
193 __uaccess_begin();
173 __get_user_size(*(u8 *)to, from, 1, ret, 1); 194 __get_user_size(*(u8 *)to, from, 1, ret, 1);
195 __uaccess_end();
174 return ret; 196 return ret;
175 case 2: 197 case 2:
198 __uaccess_begin();
176 __get_user_size(*(u16 *)to, from, 2, ret, 2); 199 __get_user_size(*(u16 *)to, from, 2, ret, 2);
200 __uaccess_end();
177 return ret; 201 return ret;
178 case 4: 202 case 4:
203 __uaccess_begin();
179 __get_user_size(*(u32 *)to, from, 4, ret, 4); 204 __get_user_size(*(u32 *)to, from, 4, ret, 4);
205 __uaccess_end();
180 return ret; 206 return ret;
181 } 207 }
182 } 208 }
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 968d57dd54c9..f320ee32d5a1 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -57,7 +57,7 @@ static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
57{ 57{
58 if (xen_pci_frontend && xen_pci_frontend->enable_msi) 58 if (xen_pci_frontend && xen_pci_frontend->enable_msi)
59 return xen_pci_frontend->enable_msi(dev, vectors); 59 return xen_pci_frontend->enable_msi(dev, vectors);
60 return -ENODEV; 60 return -ENOSYS;
61} 61}
62static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) 62static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
63{ 63{
@@ -69,7 +69,7 @@ static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
69{ 69{
70 if (xen_pci_frontend && xen_pci_frontend->enable_msix) 70 if (xen_pci_frontend && xen_pci_frontend->enable_msix)
71 return xen_pci_frontend->enable_msix(dev, vectors, nvec); 71 return xen_pci_frontend->enable_msix(dev, vectors, nvec);
72 return -ENODEV; 72 return -ENOSYS;
73} 73}
74static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev) 74static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
75{ 75{
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 49742746a6c9..8836fc9fa84b 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -323,6 +323,8 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
323 return 0; 323 return 0;
324 324
325fail: 325fail:
326 if (amd_uncore_nb)
327 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
326 kfree(uncore_nb); 328 kfree(uncore_nb);
327 return -ENOMEM; 329 return -ENOMEM;
328} 330}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1505587d06e9..b9b09fec173b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
650 u16 sel; 650 u16 sel;
651 651
652 la = seg_base(ctxt, addr.seg) + addr.ea; 652 la = seg_base(ctxt, addr.seg) + addr.ea;
653 *linear = la;
654 *max_size = 0; 653 *max_size = 0;
655 switch (mode) { 654 switch (mode) {
656 case X86EMUL_MODE_PROT64: 655 case X86EMUL_MODE_PROT64:
656 *linear = la;
657 if (is_noncanonical_address(la)) 657 if (is_noncanonical_address(la))
658 goto bad; 658 goto bad;
659 659
@@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
662 goto bad; 662 goto bad;
663 break; 663 break;
664 default: 664 default:
665 *linear = la = (u32)la;
665 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 666 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
666 addr.seg); 667 addr.seg);
667 if (!usable) 668 if (!usable)
@@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
689 if (size > *max_size) 690 if (size > *max_size)
690 goto bad; 691 goto bad;
691 } 692 }
692 la &= (u32)-1;
693 break; 693 break;
694 } 694 }
695 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 695 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c9fed957cce..2ce4f05e81d3 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
249 return ret; 249 return ret;
250 250
251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn); 251 kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
252 walker->ptes[level] = pte; 252 walker->ptes[level - 1] = pte;
253 } 253 }
254 return 0; 254 return 0;
255} 255}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4244c2baf57d..f4891f2ece23 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2752,6 +2752,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2752 } 2752 }
2753 2753
2754 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); 2754 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2755 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
2755} 2756}
2756 2757
2757void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 2758void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 982ce34f4a9b..27f89c79a44b 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -232,17 +232,31 @@ ENDPROC(copy_user_enhanced_fast_string)
232 232
233/* 233/*
234 * copy_user_nocache - Uncached memory copy with exception handling 234 * copy_user_nocache - Uncached memory copy with exception handling
235 * This will force destination/source out of cache for more performance. 235 * This will force destination out of cache for more performance.
236 *
237 * Note: Cached memory copy is used when destination or size is not
238 * naturally aligned. That is:
239 * - Require 8-byte alignment when size is 8 bytes or larger.
240 * - Require 4-byte alignment when size is 4 bytes.
236 */ 241 */
237ENTRY(__copy_user_nocache) 242ENTRY(__copy_user_nocache)
238 ASM_STAC 243 ASM_STAC
244
245 /* If size is less than 8 bytes, go to 4-byte copy */
239 cmpl $8,%edx 246 cmpl $8,%edx
240 jb 20f /* less then 8 bytes, go to byte copy loop */ 247 jb .L_4b_nocache_copy_entry
248
249 /* If destination is not 8-byte aligned, "cache" copy to align it */
241 ALIGN_DESTINATION 250 ALIGN_DESTINATION
251
252 /* Set 4x8-byte copy count and remainder */
242 movl %edx,%ecx 253 movl %edx,%ecx
243 andl $63,%edx 254 andl $63,%edx
244 shrl $6,%ecx 255 shrl $6,%ecx
245 jz 17f 256 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
257
258 /* Perform 4x8-byte nocache loop-copy */
259.L_4x8b_nocache_copy_loop:
2461: movq (%rsi),%r8 2601: movq (%rsi),%r8
2472: movq 1*8(%rsi),%r9 2612: movq 1*8(%rsi),%r9
2483: movq 2*8(%rsi),%r10 2623: movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@ ENTRY(__copy_user_nocache)
262 leaq 64(%rsi),%rsi 276 leaq 64(%rsi),%rsi
263 leaq 64(%rdi),%rdi 277 leaq 64(%rdi),%rdi
264 decl %ecx 278 decl %ecx
265 jnz 1b 279 jnz .L_4x8b_nocache_copy_loop
26617: movl %edx,%ecx 280
281 /* Set 8-byte copy count and remainder */
282.L_8b_nocache_copy_entry:
283 movl %edx,%ecx
267 andl $7,%edx 284 andl $7,%edx
268 shrl $3,%ecx 285 shrl $3,%ecx
269 jz 20f 286 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
27018: movq (%rsi),%r8 287
27119: movnti %r8,(%rdi) 288 /* Perform 8-byte nocache loop-copy */
289.L_8b_nocache_copy_loop:
29020: movq (%rsi),%r8
29121: movnti %r8,(%rdi)
272 leaq 8(%rsi),%rsi 292 leaq 8(%rsi),%rsi
273 leaq 8(%rdi),%rdi 293 leaq 8(%rdi),%rdi
274 decl %ecx 294 decl %ecx
275 jnz 18b 295 jnz .L_8b_nocache_copy_loop
27620: andl %edx,%edx 296
277 jz 23f 297 /* If no byte left, we're done */
298.L_4b_nocache_copy_entry:
299 andl %edx,%edx
300 jz .L_finish_copy
301
302 /* If destination is not 4-byte aligned, go to byte copy: */
303 movl %edi,%ecx
304 andl $3,%ecx
305 jnz .L_1b_cache_copy_entry
306
307 /* Set 4-byte copy count (1 or 0) and remainder */
278 movl %edx,%ecx 308 movl %edx,%ecx
27921: movb (%rsi),%al 309 andl $3,%edx
28022: movb %al,(%rdi) 310 shrl $2,%ecx
311 jz .L_1b_cache_copy_entry /* jump if count is 0 */
312
313 /* Perform 4-byte nocache copy: */
31430: movl (%rsi),%r8d
31531: movnti %r8d,(%rdi)
316 leaq 4(%rsi),%rsi
317 leaq 4(%rdi),%rdi
318
319 /* If no bytes left, we're done: */
320 andl %edx,%edx
321 jz .L_finish_copy
322
323 /* Perform byte "cache" loop-copy for the remainder */
324.L_1b_cache_copy_entry:
325 movl %edx,%ecx
326.L_1b_cache_copy_loop:
32740: movb (%rsi),%al
32841: movb %al,(%rdi)
281 incq %rsi 329 incq %rsi
282 incq %rdi 330 incq %rdi
283 decl %ecx 331 decl %ecx
284 jnz 21b 332 jnz .L_1b_cache_copy_loop
28523: xorl %eax,%eax 333
334 /* Finished copying; fence the prior stores */
335.L_finish_copy:
336 xorl %eax,%eax
286 ASM_CLAC 337 ASM_CLAC
287 sfence 338 sfence
288 ret 339 ret
289 340
290 .section .fixup,"ax" 341 .section .fixup,"ax"
29130: shll $6,%ecx 342.L_fixup_4x8b_copy:
343 shll $6,%ecx
292 addl %ecx,%edx 344 addl %ecx,%edx
293 jmp 60f 345 jmp .L_fixup_handle_tail
29440: lea (%rdx,%rcx,8),%rdx 346.L_fixup_8b_copy:
295 jmp 60f 347 lea (%rdx,%rcx,8),%rdx
29650: movl %ecx,%edx 348 jmp .L_fixup_handle_tail
29760: sfence 349.L_fixup_4b_copy:
350 lea (%rdx,%rcx,4),%rdx
351 jmp .L_fixup_handle_tail
352.L_fixup_1b_copy:
353 movl %ecx,%edx
354.L_fixup_handle_tail:
355 sfence
298 jmp copy_user_handle_tail 356 jmp copy_user_handle_tail
299 .previous 357 .previous
300 358
301 _ASM_EXTABLE(1b,30b) 359 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
302 _ASM_EXTABLE(2b,30b) 360 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
303 _ASM_EXTABLE(3b,30b) 361 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
304 _ASM_EXTABLE(4b,30b) 362 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
305 _ASM_EXTABLE(5b,30b) 363 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
306 _ASM_EXTABLE(6b,30b) 364 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
307 _ASM_EXTABLE(7b,30b) 365 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
308 _ASM_EXTABLE(8b,30b) 366 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
309 _ASM_EXTABLE(9b,30b) 367 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
310 _ASM_EXTABLE(10b,30b) 368 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
311 _ASM_EXTABLE(11b,30b) 369 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
312 _ASM_EXTABLE(12b,30b) 370 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
313 _ASM_EXTABLE(13b,30b) 371 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
314 _ASM_EXTABLE(14b,30b) 372 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
315 _ASM_EXTABLE(15b,30b) 373 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
316 _ASM_EXTABLE(16b,30b) 374 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
317 _ASM_EXTABLE(18b,40b) 375 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
318 _ASM_EXTABLE(19b,40b) 376 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
319 _ASM_EXTABLE(21b,50b) 377 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
320 _ASM_EXTABLE(22b,50b) 378 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
379 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
380 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
321ENDPROC(__copy_user_nocache) 381ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eef44d9a3f77..e830c71a1323 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,6 +287,9 @@ static noinline int vmalloc_fault(unsigned long address)
287 if (!pmd_k) 287 if (!pmd_k)
288 return -1; 288 return -1;
289 289
290 if (pmd_huge(*pmd_k))
291 return 0;
292
290 pte_k = pte_offset_kernel(pmd_k, address); 293 pte_k = pte_offset_kernel(pmd_k, address);
291 if (!pte_present(*pte_k)) 294 if (!pte_present(*pte_k))
292 return -1; 295 return -1;
@@ -360,8 +363,6 @@ void vmalloc_sync_all(void)
360 * 64-bit: 363 * 64-bit:
361 * 364 *
362 * Handle a fault on the vmalloc area 365 * Handle a fault on the vmalloc area
363 *
364 * This assumes no large pages in there.
365 */ 366 */
366static noinline int vmalloc_fault(unsigned long address) 367static noinline int vmalloc_fault(unsigned long address)
367{ 368{
@@ -403,17 +404,23 @@ static noinline int vmalloc_fault(unsigned long address)
403 if (pud_none(*pud_ref)) 404 if (pud_none(*pud_ref))
404 return -1; 405 return -1;
405 406
406 if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) 407 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
407 BUG(); 408 BUG();
408 409
410 if (pud_huge(*pud))
411 return 0;
412
409 pmd = pmd_offset(pud, address); 413 pmd = pmd_offset(pud, address);
410 pmd_ref = pmd_offset(pud_ref, address); 414 pmd_ref = pmd_offset(pud_ref, address);
411 if (pmd_none(*pmd_ref)) 415 if (pmd_none(*pmd_ref))
412 return -1; 416 return -1;
413 417
414 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) 418 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
415 BUG(); 419 BUG();
416 420
421 if (pmd_huge(*pmd))
422 return 0;
423
417 pte_ref = pte_offset_kernel(pmd_ref, address); 424 pte_ref = pte_offset_kernel(pmd_ref, address);
418 if (!pte_present(*pte_ref)) 425 if (!pte_present(*pte_ref))
419 return -1; 426 return -1;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb5900372..d8a798d8bf50 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
102 return 0; 102 return 0;
103 } 103 }
104 104
105 page = pte_page(pte);
106 if (pte_devmap(pte)) { 105 if (pte_devmap(pte)) {
107 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); 106 pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
108 if (unlikely(!pgmap)) { 107 if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
115 return 0; 114 return 0;
116 } 115 }
117 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 116 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
117 page = pte_page(pte);
118 get_page(page); 118 get_page(page);
119 put_dev_pagemap(pgmap); 119 put_dev_pagemap(pgmap);
120 SetPageReferenced(page); 120 SetPageReferenced(page);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 96bd1e2bffaf..72bb52f93c3d 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -71,12 +71,12 @@ unsigned long arch_mmap_rnd(void)
71 71
72 if (mmap_is_ia32()) 72 if (mmap_is_ia32())
73#ifdef CONFIG_COMPAT 73#ifdef CONFIG_COMPAT
74 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1); 74 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
75#else 75#else
76 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 76 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
77#endif 77#endif
78 else 78 else
79 rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1); 79 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
80 80
81 return rnd << PAGE_SHIFT; 81 return rnd << PAGE_SHIFT;
82} 82}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b2fd67da1701..ef05755a1900 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
123 break; 123 break;
124 } 124 }
125 125
126 if (regno > nr_registers) { 126 if (regno >= nr_registers) {
127 WARN_ONCE(1, "decoded an instruction with an invalid register"); 127 WARN_ONCE(1, "decoded an instruction with an invalid register");
128 return -EINVAL; 128 return -EINVAL;
129 } 129 }
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index c3b3f653ed0c..d04f8094bc23 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -469,7 +469,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
469{ 469{
470 int i, nid; 470 int i, nid;
471 nodemask_t numa_kernel_nodes = NODE_MASK_NONE; 471 nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
472 unsigned long start, end; 472 phys_addr_t start, end;
473 struct memblock_region *r; 473 struct memblock_region *r;
474 474
475 /* 475 /*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2440814b0069..9cf96d82147a 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -419,24 +419,30 @@ pmd_t *lookup_pmd_address(unsigned long address)
419phys_addr_t slow_virt_to_phys(void *__virt_addr) 419phys_addr_t slow_virt_to_phys(void *__virt_addr)
420{ 420{
421 unsigned long virt_addr = (unsigned long)__virt_addr; 421 unsigned long virt_addr = (unsigned long)__virt_addr;
422 unsigned long phys_addr, offset; 422 phys_addr_t phys_addr;
423 unsigned long offset;
423 enum pg_level level; 424 enum pg_level level;
424 pte_t *pte; 425 pte_t *pte;
425 426
426 pte = lookup_address(virt_addr, &level); 427 pte = lookup_address(virt_addr, &level);
427 BUG_ON(!pte); 428 BUG_ON(!pte);
428 429
430 /*
431 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
432 * before being left-shifted PAGE_SHIFT bits -- this trick is to
433 * make 32-PAE kernel work correctly.
434 */
429 switch (level) { 435 switch (level) {
430 case PG_LEVEL_1G: 436 case PG_LEVEL_1G:
431 phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 437 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
432 offset = virt_addr & ~PUD_PAGE_MASK; 438 offset = virt_addr & ~PUD_PAGE_MASK;
433 break; 439 break;
434 case PG_LEVEL_2M: 440 case PG_LEVEL_2M:
435 phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 441 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
436 offset = virt_addr & ~PMD_PAGE_MASK; 442 offset = virt_addr & ~PMD_PAGE_MASK;
437 break; 443 break;
438 default: 444 default:
439 phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 445 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
440 offset = virt_addr & ~PAGE_MASK; 446 offset = virt_addr & ~PAGE_MASK;
441 } 447 }
442 448
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2879efc73a96..d34b5118b4e8 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -711,28 +711,22 @@ int pcibios_add_device(struct pci_dev *dev)
711 return 0; 711 return 0;
712} 712}
713 713
714int pcibios_alloc_irq(struct pci_dev *dev) 714int pcibios_enable_device(struct pci_dev *dev, int mask)
715{ 715{
716 /* 716 int err;
717 * If the PCI device was already claimed by core code and has
718 * MSI enabled, probing of the pcibios IRQ will overwrite
719 * dev->irq. So bail out if MSI is already enabled.
720 */
721 if (pci_dev_msi_enabled(dev))
722 return -EBUSY;
723 717
724 return pcibios_enable_irq(dev); 718 if ((err = pci_enable_resources(dev, mask)) < 0)
725} 719 return err;
726 720
727void pcibios_free_irq(struct pci_dev *dev) 721 if (!pci_dev_msi_enabled(dev))
728{ 722 return pcibios_enable_irq(dev);
729 if (pcibios_disable_irq) 723 return 0;
730 pcibios_disable_irq(dev);
731} 724}
732 725
733int pcibios_enable_device(struct pci_dev *dev, int mask) 726void pcibios_disable_device (struct pci_dev *dev)
734{ 727{
735 return pci_enable_resources(dev, mask); 728 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
729 pcibios_disable_irq(dev);
736} 730}
737 731
738int pci_ext_cfg_avail(void) 732int pci_ext_cfg_avail(void)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 0d24e7c10145..8b93e634af84 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
215 int polarity; 215 int polarity;
216 int ret; 216 int ret;
217 217
218 if (pci_has_managed_irq(dev)) 218 if (dev->irq_managed && dev->irq > 0)
219 return 0; 219 return 0;
220 220
221 switch (intel_mid_identify_cpu()) { 221 switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
256 256
257static void intel_mid_pci_irq_disable(struct pci_dev *dev) 257static void intel_mid_pci_irq_disable(struct pci_dev *dev)
258{ 258{
259 if (pci_has_managed_irq(dev)) { 259 if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
260 dev->irq > 0) {
260 mp_unmap_irq(dev->irq); 261 mp_unmap_irq(dev->irq);
261 dev->irq_managed = 0; 262 dev->irq_managed = 0;
262 /*
263 * Don't reset dev->irq here, otherwise
264 * intel_mid_pci_irq_enable() will fail on next call.
265 */
266 } 263 }
267} 264}
268 265
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 32e70343e6fd..9bd115484745 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
1202 struct pci_dev *temp_dev; 1202 struct pci_dev *temp_dev;
1203 int irq; 1203 int irq;
1204 1204
1205 if (pci_has_managed_irq(dev)) 1205 if (dev->irq_managed && dev->irq > 0)
1206 return 0; 1206 return 0;
1207 1207
1208 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, 1208 irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
1230 } 1230 }
1231 dev = temp_dev; 1231 dev = temp_dev;
1232 if (irq >= 0) { 1232 if (irq >= 0) {
1233 pci_set_managed_irq(dev, irq); 1233 dev->irq_managed = 1;
1234 dev->irq = irq;
1234 dev_info(&dev->dev, "PCI->APIC IRQ transform: " 1235 dev_info(&dev->dev, "PCI->APIC IRQ transform: "
1235 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq); 1236 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
1236 return 0; 1237 return 0;
@@ -1256,10 +1257,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
1256 return 0; 1257 return 0;
1257} 1258}
1258 1259
1260bool mp_should_keep_irq(struct device *dev)
1261{
1262 if (dev->power.is_prepared)
1263 return true;
1264#ifdef CONFIG_PM
1265 if (dev->power.runtime_status == RPM_SUSPENDING)
1266 return true;
1267#endif
1268
1269 return false;
1270}
1271
1259static void pirq_disable_irq(struct pci_dev *dev) 1272static void pirq_disable_irq(struct pci_dev *dev)
1260{ 1273{
1261 if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) { 1274 if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
1275 dev->irq_managed && dev->irq) {
1262 mp_unmap_irq(dev->irq); 1276 mp_unmap_irq(dev->irq);
1263 pci_reset_managed_irq(dev); 1277 dev->irq = 0;
1278 dev->irq_managed = 0;
1264 } 1279 }
1265} 1280}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index ff31ab464213..beac4dfdade6 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -196,7 +196,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
196 return 0; 196 return 0;
197 197
198error: 198error:
199 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); 199 if (ret == -ENOSYS)
200 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
201 else if (ret)
202 dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
200free: 203free:
201 kfree(v); 204 kfree(v);
202 return ret; 205 return ret;
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index c61b6c332e97..bfadcd0f4944 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -592,14 +592,14 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
592 end = (unsigned long)__end_rodata - 1; 592 end = (unsigned long)__end_rodata - 1;
593 593
594 /* 594 /*
595 * Setup a locked IMR around the physical extent of the kernel 595 * Setup an unlocked IMR around the physical extent of the kernel
596 * from the beginning of the .text secton to the end of the 596 * from the beginning of the .text secton to the end of the
597 * .rodata section as one physically contiguous block. 597 * .rodata section as one physically contiguous block.
598 * 598 *
599 * We don't round up @size since it is already PAGE_SIZE aligned. 599 * We don't round up @size since it is already PAGE_SIZE aligned.
600 * See vmlinux.lds.S for details. 600 * See vmlinux.lds.S for details.
601 */ 601 */
602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
603 if (ret < 0) { 603 if (ret < 0) {
604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
605 size / 1024, start, end); 605 size / 1024, start, end);
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..0363cd731320 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -88,6 +88,19 @@ config BLK_DEV_INTEGRITY
88 T10/SCSI Data Integrity Field or the T13/ATA External Path 88 T10/SCSI Data Integrity Field or the T13/ATA External Path
89 Protection. If in doubt, say N. 89 Protection. If in doubt, say N.
90 90
91config BLK_DEV_DAX
92 bool "Block device DAX support"
93 depends on FS_DAX
94 depends on BROKEN
95 help
96 When DAX support is available (CONFIG_FS_DAX) raw block
97 devices can also support direct userspace access to the
98 storage capacity via MMAP(2) similar to a file on a
99 DAX-enabled filesystem. However, the DAX I/O-path disables
100 some standard I/O-statistics, and the MMAP(2) path has some
101 operational differences due to bypassing the page
102 cache. If in doubt, say N.
103
91config BLK_DEV_THROTTLING 104config BLK_DEV_THROTTLING
92 bool "Block layer bio throttling support" 105 bool "Block layer bio throttling support"
93 depends on BLK_CGROUP=y 106 depends on BLK_CGROUP=y
diff --git a/block/bio.c b/block/bio.c
index dbabd48b1934..cf7591551b17 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,7 +874,7 @@ int submit_bio_wait(int rw, struct bio *bio)
874 bio->bi_private = &ret; 874 bio->bi_private = &ret;
875 bio->bi_end_io = submit_bio_wait_endio; 875 bio->bi_end_io = submit_bio_wait_endio;
876 submit_bio(rw, bio); 876 submit_bio(rw, bio);
877 wait_for_completion(&ret.event); 877 wait_for_completion_io(&ret.event);
878 878
879 return ret.error; 879 return ret.error;
880} 880}
@@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1091 /* 1091 /*
1092 * if we're in a workqueue, the request is orphaned, so 1092 * if we're in a workqueue, the request is orphaned, so
1093 * don't copy into a random user address space, just free. 1093 * don't copy into a random user address space, just free
1094 * and return -EINTR so user space doesn't expect any data.
1094 */ 1095 */
1095 if (current->mm && bio_data_dir(bio) == READ) 1096 if (!current->mm)
1097 ret = -EINTR;
1098 else if (bio_data_dir(bio) == READ)
1096 ret = bio_copy_to_iter(bio, bmd->iter); 1099 ret = bio_copy_to_iter(bio, bmd->iter);
1097 if (bmd->is_our_pages) 1100 if (bmd->is_our_pages)
1098 bio_free_pages(bio); 1101 bio_free_pages(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a37188b559f..66e6f1aae02e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -788,6 +788,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
788{ 788{
789 struct gendisk *disk; 789 struct gendisk *disk;
790 struct blkcg_gq *blkg; 790 struct blkcg_gq *blkg;
791 struct module *owner;
791 unsigned int major, minor; 792 unsigned int major, minor;
792 int key_len, part, ret; 793 int key_len, part, ret;
793 char *body; 794 char *body;
@@ -804,7 +805,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
804 if (!disk) 805 if (!disk)
805 return -ENODEV; 806 return -ENODEV;
806 if (part) { 807 if (part) {
808 owner = disk->fops->owner;
807 put_disk(disk); 809 put_disk(disk);
810 module_put(owner);
808 return -ENODEV; 811 return -ENODEV;
809 } 812 }
810 813
@@ -820,7 +823,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
820 ret = PTR_ERR(blkg); 823 ret = PTR_ERR(blkg);
821 rcu_read_unlock(); 824 rcu_read_unlock();
822 spin_unlock_irq(disk->queue->queue_lock); 825 spin_unlock_irq(disk->queue->queue_lock);
826 owner = disk->fops->owner;
823 put_disk(disk); 827 put_disk(disk);
828 module_put(owner);
824 /* 829 /*
825 * If queue was bypassing, we should retry. Do so after a 830 * If queue was bypassing, we should retry. Do so after a
826 * short msleep(). It isn't strictly necessary but queue 831 * short msleep(). It isn't strictly necessary but queue
@@ -851,9 +856,13 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
851void blkg_conf_finish(struct blkg_conf_ctx *ctx) 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
852 __releases(ctx->disk->queue->queue_lock) __releases(rcu) 857 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
853{ 858{
859 struct module *owner;
860
854 spin_unlock_irq(ctx->disk->queue->queue_lock); 861 spin_unlock_irq(ctx->disk->queue->queue_lock);
855 rcu_read_unlock(); 862 rcu_read_unlock();
863 owner = ctx->disk->fops->owner;
856 put_disk(ctx->disk); 864 put_disk(ctx->disk);
865 module_put(owner);
857} 866}
858EXPORT_SYMBOL_GPL(blkg_conf_finish); 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
859 868
diff --git a/block/blk-core.c b/block/blk-core.c
index ab51685988c2..b83d29755b5a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q)
2455 2455
2456 rq = NULL; 2456 rq = NULL;
2457 break; 2457 break;
2458 } else if (ret == BLKPREP_KILL) { 2458 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2459 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2460
2459 rq->cmd_flags |= REQ_QUIET; 2461 rq->cmd_flags |= REQ_QUIET;
2460 /* 2462 /*
2461 * Mark this request as started so we don't trigger 2463 * Mark this request as started so we don't trigger
2462 * any debug logic in the end I/O path. 2464 * any debug logic in the end I/O path.
2463 */ 2465 */
2464 blk_start_request(rq); 2466 blk_start_request(rq);
2465 __blk_end_request_all(rq, -EIO); 2467 __blk_end_request_all(rq, err);
2466 } else { 2468 } else {
2467 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 2469 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2468 break; 2470 break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c0622fae413..56c0a726b619 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -599,8 +599,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
599 * If a request wasn't started before the queue was 599 * If a request wasn't started before the queue was
600 * marked dying, kill it here or it'll go unnoticed. 600 * marked dying, kill it here or it'll go unnoticed.
601 */ 601 */
602 if (unlikely(blk_queue_dying(rq->q))) 602 if (unlikely(blk_queue_dying(rq->q))) {
603 blk_mq_complete_request(rq, -EIO); 603 rq->errors = -EIO;
604 blk_mq_end_request(rq, rq->errors);
605 }
604 return; 606 return;
605 } 607 }
606 608
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd4973583978..c7bb666aafd1 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 91 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
92 lim->virt_boundary_mask = 0; 92 lim->virt_boundary_mask = 0;
93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; 93 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
94 lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = 94 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
95 BLK_SAFE_MAX_SECTORS; 95 lim->max_dev_sectors = 0;
96 lim->chunk_sectors = 0; 96 lim->chunk_sectors = 0;
97 lim->max_write_same_sectors = 0; 97 lim->max_write_same_sectors = 0;
98 lim->max_discard_sectors = 0; 98 lim->max_discard_sectors = 0;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc487ce1..dd93763057ce 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -147,10 +147,9 @@ static ssize_t queue_discard_granularity_show(struct request_queue *q, char *pag
147 147
148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) 148static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
149{ 149{
150 unsigned long long val;
151 150
152 val = q->limits.max_hw_discard_sectors << 9; 151 return sprintf(page, "%llu\n",
153 return sprintf(page, "%llu\n", val); 152 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
154} 153}
155 154
156static ssize_t queue_discard_max_show(struct request_queue *q, char *page) 155static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index a753df2b3fc2..d0dd7882d8c7 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -39,7 +39,6 @@ struct deadline_data {
39 */ 39 */
40 struct request *next_rq[2]; 40 struct request *next_rq[2];
41 unsigned int batching; /* number of sequential requests made */ 41 unsigned int batching; /* number of sequential requests made */
42 sector_t last_sector; /* head position */
43 unsigned int starved; /* times reads have starved writes */ 42 unsigned int starved; /* times reads have starved writes */
44 43
45 /* 44 /*
@@ -210,8 +209,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq)
210 dd->next_rq[WRITE] = NULL; 209 dd->next_rq[WRITE] = NULL;
211 dd->next_rq[data_dir] = deadline_latter_request(rq); 210 dd->next_rq[data_dir] = deadline_latter_request(rq);
212 211
213 dd->last_sector = rq_end_sector(rq);
214
215 /* 212 /*
216 * take it off the sort and fifo list, move 213 * take it off the sort and fifo list, move
217 * to dispatch queue 214 * to dispatch queue
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 38c1aa89d3a0..28556fce4267 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -65,18 +65,10 @@ struct skcipher_async_req {
65 struct skcipher_async_rsgl first_sgl; 65 struct skcipher_async_rsgl first_sgl;
66 struct list_head list; 66 struct list_head list;
67 struct scatterlist *tsg; 67 struct scatterlist *tsg;
68 char iv[]; 68 atomic_t *inflight;
69 struct skcipher_request req;
69}; 70};
70 71
71#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
72 crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
73
74#define GET_REQ_SIZE(ctx) \
75 crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
76
77#define GET_IV_SIZE(ctx) \
78 crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
79
80#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 72#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
81 sizeof(struct scatterlist) - 1) 73 sizeof(struct scatterlist) - 1)
82 74
@@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
102 94
103static void skcipher_async_cb(struct crypto_async_request *req, int err) 95static void skcipher_async_cb(struct crypto_async_request *req, int err)
104{ 96{
105 struct sock *sk = req->data; 97 struct skcipher_async_req *sreq = req->data;
106 struct alg_sock *ask = alg_sk(sk);
107 struct skcipher_ctx *ctx = ask->private;
108 struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
109 struct kiocb *iocb = sreq->iocb; 98 struct kiocb *iocb = sreq->iocb;
110 99
111 atomic_dec(&ctx->inflight); 100 atomic_dec(sreq->inflight);
112 skcipher_free_async_sgls(sreq); 101 skcipher_free_async_sgls(sreq);
113 kfree(req); 102 kzfree(sreq);
114 iocb->ki_complete(iocb, err, err); 103 iocb->ki_complete(iocb, err, err);
115} 104}
116 105
@@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
306{ 295{
307 struct sock *sk = sock->sk; 296 struct sock *sk = sock->sk;
308 struct alg_sock *ask = alg_sk(sk); 297 struct alg_sock *ask = alg_sk(sk);
298 struct sock *psk = ask->parent;
299 struct alg_sock *pask = alg_sk(psk);
309 struct skcipher_ctx *ctx = ask->private; 300 struct skcipher_ctx *ctx = ask->private;
310 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); 301 struct skcipher_tfm *skc = pask->private;
302 struct crypto_skcipher *tfm = skc->skcipher;
311 unsigned ivsize = crypto_skcipher_ivsize(tfm); 303 unsigned ivsize = crypto_skcipher_ivsize(tfm);
312 struct skcipher_sg_list *sgl; 304 struct skcipher_sg_list *sgl;
313 struct af_alg_control con = {}; 305 struct af_alg_control con = {};
@@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
509{ 501{
510 struct sock *sk = sock->sk; 502 struct sock *sk = sock->sk;
511 struct alg_sock *ask = alg_sk(sk); 503 struct alg_sock *ask = alg_sk(sk);
504 struct sock *psk = ask->parent;
505 struct alg_sock *pask = alg_sk(psk);
512 struct skcipher_ctx *ctx = ask->private; 506 struct skcipher_ctx *ctx = ask->private;
507 struct skcipher_tfm *skc = pask->private;
508 struct crypto_skcipher *tfm = skc->skcipher;
513 struct skcipher_sg_list *sgl; 509 struct skcipher_sg_list *sgl;
514 struct scatterlist *sg; 510 struct scatterlist *sg;
515 struct skcipher_async_req *sreq; 511 struct skcipher_async_req *sreq;
516 struct skcipher_request *req; 512 struct skcipher_request *req;
517 struct skcipher_async_rsgl *last_rsgl = NULL; 513 struct skcipher_async_rsgl *last_rsgl = NULL;
518 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); 514 unsigned int txbufs = 0, len = 0, tx_nents;
519 unsigned int reqlen = sizeof(struct skcipher_async_req) + 515 unsigned int reqsize = crypto_skcipher_reqsize(tfm);
520 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); 516 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
521 int err = -ENOMEM; 517 int err = -ENOMEM;
522 bool mark = false; 518 bool mark = false;
519 char *iv;
523 520
524 lock_sock(sk); 521 sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
525 req = kmalloc(reqlen, GFP_KERNEL); 522 if (unlikely(!sreq))
526 if (unlikely(!req)) 523 goto out;
527 goto unlock;
528 524
529 sreq = GET_SREQ(req, ctx); 525 req = &sreq->req;
526 iv = (char *)(req + 1) + reqsize;
530 sreq->iocb = msg->msg_iocb; 527 sreq->iocb = msg->msg_iocb;
531 memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
532 INIT_LIST_HEAD(&sreq->list); 528 INIT_LIST_HEAD(&sreq->list);
529 sreq->inflight = &ctx->inflight;
530
531 lock_sock(sk);
532 tx_nents = skcipher_all_sg_nents(ctx);
533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); 533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
534 if (unlikely(!sreq->tsg)) { 534 if (unlikely(!sreq->tsg))
535 kfree(req);
536 goto unlock; 535 goto unlock;
537 }
538 sg_init_table(sreq->tsg, tx_nents); 536 sg_init_table(sreq->tsg, tx_nents);
539 memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); 537 memcpy(iv, ctx->iv, ivsize);
540 skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); 538 skcipher_request_set_tfm(req, tfm);
541 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 539 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
542 skcipher_async_cb, sk); 540 skcipher_async_cb, sreq);
543 541
544 while (iov_iter_count(&msg->msg_iter)) { 542 while (iov_iter_count(&msg->msg_iter)) {
545 struct skcipher_async_rsgl *rsgl; 543 struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
615 sg_mark_end(sreq->tsg + txbufs - 1); 613 sg_mark_end(sreq->tsg + txbufs - 1);
616 614
617 skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 615 skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
618 len, sreq->iv); 616 len, iv);
619 err = ctx->enc ? crypto_skcipher_encrypt(req) : 617 err = ctx->enc ? crypto_skcipher_encrypt(req) :
620 crypto_skcipher_decrypt(req); 618 crypto_skcipher_decrypt(req);
621 if (err == -EINPROGRESS) { 619 if (err == -EINPROGRESS) {
622 atomic_inc(&ctx->inflight); 620 atomic_inc(&ctx->inflight);
623 err = -EIOCBQUEUED; 621 err = -EIOCBQUEUED;
622 sreq = NULL;
624 goto unlock; 623 goto unlock;
625 } 624 }
626free: 625free:
627 skcipher_free_async_sgls(sreq); 626 skcipher_free_async_sgls(sreq);
628 kfree(req);
629unlock: 627unlock:
630 skcipher_wmem_wakeup(sk); 628 skcipher_wmem_wakeup(sk);
631 release_sock(sk); 629 release_sock(sk);
630 kzfree(sreq);
631out:
632 return err; 632 return err;
633} 633}
634 634
@@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
637{ 637{
638 struct sock *sk = sock->sk; 638 struct sock *sk = sock->sk;
639 struct alg_sock *ask = alg_sk(sk); 639 struct alg_sock *ask = alg_sk(sk);
640 struct sock *psk = ask->parent;
641 struct alg_sock *pask = alg_sk(psk);
640 struct skcipher_ctx *ctx = ask->private; 642 struct skcipher_ctx *ctx = ask->private;
641 unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( 643 struct skcipher_tfm *skc = pask->private;
642 &ctx->req)); 644 struct crypto_skcipher *tfm = skc->skcipher;
645 unsigned bs = crypto_skcipher_blocksize(tfm);
643 struct skcipher_sg_list *sgl; 646 struct skcipher_sg_list *sgl;
644 struct scatterlist *sg; 647 struct scatterlist *sg;
645 int err = -EAGAIN; 648 int err = -EAGAIN;
@@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
947 ask->private = ctx; 950 ask->private = ctx;
948 951
949 skcipher_request_set_tfm(&ctx->req, skcipher); 952 skcipher_request_set_tfm(&ctx->req, skcipher);
950 skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, 953 skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
954 CRYPTO_TFM_REQ_MAY_BACKLOG,
951 af_alg_complete, &ctx->completion); 955 af_alg_complete, &ctx->completion);
952 956
953 sk->sk_destruct = skcipher_sock_destruct; 957 sk->sk_destruct = skcipher_sock_destruct;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 237f3795cfaa..43fe85f20d57 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
499 if (link->dump == NULL) 499 if (link->dump == NULL)
500 return -EINVAL; 500 return -EINVAL;
501 501
502 down_read(&crypto_alg_sem);
502 list_for_each_entry(alg, &crypto_alg_list, cra_list) 503 list_for_each_entry(alg, &crypto_alg_list, cra_list)
503 dump_alloc += CRYPTO_REPORT_MAXSIZE; 504 dump_alloc += CRYPTO_REPORT_MAXSIZE;
504 505
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
508 .done = link->done, 509 .done = link->done,
509 .min_dump_alloc = dump_alloc, 510 .min_dump_alloc = dump_alloc,
510 }; 511 };
511 return netlink_dump_start(crypto_nlsk, skb, nlh, &c); 512 err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
512 } 513 }
514 up_read(&crypto_alg_sem);
515
516 return err;
513 } 517 }
514 518
515 err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, 519 err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..fb53db187854 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
469 nfit_mem->bdw = NULL; 469 nfit_mem->bdw = NULL;
470} 470}
471 471
472static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 472static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
474{ 474{
475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
476 struct nfit_memdev *nfit_memdev; 476 struct nfit_memdev *nfit_memdev;
477 struct nfit_flush *nfit_flush; 477 struct nfit_flush *nfit_flush;
478 struct nfit_dcr *nfit_dcr;
479 struct nfit_bdw *nfit_bdw; 478 struct nfit_bdw *nfit_bdw;
480 struct nfit_idt *nfit_idt; 479 struct nfit_idt *nfit_idt;
481 u16 idt_idx, range_index; 480 u16 idt_idx, range_index;
482 481
483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
484 if (nfit_dcr->dcr->region_index != dcr)
485 continue;
486 nfit_mem->dcr = nfit_dcr->dcr;
487 break;
488 }
489
490 if (!nfit_mem->dcr) {
491 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
492 spa->range_index, __to_nfit_memdev(nfit_mem)
493 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
494 return -ENODEV;
495 }
496
497 /*
498 * We've found enough to create an nvdimm, optionally
499 * find an associated BDW
500 */
501 list_add(&nfit_mem->list, &acpi_desc->dimms);
502
503 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 482 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
504 if (nfit_bdw->bdw->region_index != dcr) 483 if (nfit_bdw->bdw->region_index != dcr)
505 continue; 484 continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
508 } 487 }
509 488
510 if (!nfit_mem->bdw) 489 if (!nfit_mem->bdw)
511 return 0; 490 return;
512 491
513 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 492 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
514 493
515 if (!nfit_mem->spa_bdw) 494 if (!nfit_mem->spa_bdw)
516 return 0; 495 return;
517 496
518 range_index = nfit_mem->spa_bdw->range_index; 497 range_index = nfit_mem->spa_bdw->range_index;
519 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 498 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
538 } 517 }
539 break; 518 break;
540 } 519 }
541
542 return 0;
543} 520}
544 521
545static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 522static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
548 struct nfit_mem *nfit_mem, *found; 525 struct nfit_mem *nfit_mem, *found;
549 struct nfit_memdev *nfit_memdev; 526 struct nfit_memdev *nfit_memdev;
550 int type = nfit_spa_type(spa); 527 int type = nfit_spa_type(spa);
551 u16 dcr;
552 528
553 switch (type) { 529 switch (type) {
554 case NFIT_SPA_DCR: 530 case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
559 } 535 }
560 536
561 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 537 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
562 int rc; 538 struct nfit_dcr *nfit_dcr;
539 u32 device_handle;
540 u16 dcr;
563 541
564 if (nfit_memdev->memdev->range_index != spa->range_index) 542 if (nfit_memdev->memdev->range_index != spa->range_index)
565 continue; 543 continue;
566 found = NULL; 544 found = NULL;
567 dcr = nfit_memdev->memdev->region_index; 545 dcr = nfit_memdev->memdev->region_index;
546 device_handle = nfit_memdev->memdev->device_handle;
568 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 547 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
569 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 548 if (__to_nfit_memdev(nfit_mem)->device_handle
549 == device_handle) {
570 found = nfit_mem; 550 found = nfit_mem;
571 break; 551 break;
572 } 552 }
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
579 if (!nfit_mem) 559 if (!nfit_mem)
580 return -ENOMEM; 560 return -ENOMEM;
581 INIT_LIST_HEAD(&nfit_mem->list); 561 INIT_LIST_HEAD(&nfit_mem->list);
562 list_add(&nfit_mem->list, &acpi_desc->dimms);
563 }
564
565 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
566 if (nfit_dcr->dcr->region_index != dcr)
567 continue;
568 /*
569 * Record the control region for the dimm. For
570 * the ACPI 6.1 case, where there are separate
571 * control regions for the pmem vs blk
572 * interfaces, be sure to record the extended
573 * blk details.
574 */
575 if (!nfit_mem->dcr)
576 nfit_mem->dcr = nfit_dcr->dcr;
577 else if (nfit_mem->dcr->windows == 0
578 && nfit_dcr->dcr->windows)
579 nfit_mem->dcr = nfit_dcr->dcr;
580 break;
581 }
582
583 if (dcr && !nfit_mem->dcr) {
584 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
585 spa->range_index, dcr);
586 return -ENODEV;
582 } 587 }
583 588
584 if (type == NFIT_SPA_DCR) { 589 if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
595 nfit_mem->idt_dcr = nfit_idt->idt; 600 nfit_mem->idt_dcr = nfit_idt->idt;
596 break; 601 break;
597 } 602 }
603 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
598 } else { 604 } else {
599 /* 605 /*
600 * A single dimm may belong to multiple SPA-PM 606 * A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
603 */ 609 */
604 nfit_mem->memdev_pmem = nfit_memdev->memdev; 610 nfit_mem->memdev_pmem = nfit_memdev->memdev;
605 } 611 }
606
607 if (found)
608 continue;
609
610 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
611 if (rc)
612 return rc;
613 } 612 }
614 613
615 return 0; 614 return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1504 case 1: 1503 case 1:
1505 /* ARS unsupported, but we should never get here */ 1504 /* ARS unsupported, but we should never get here */
1506 return 0; 1505 return 0;
1507 case 2: 1506 case 6:
1508 return -EINVAL;
1509 case 3:
1510 /* ARS is in progress */ 1507 /* ARS is in progress */
1511 msleep(1000); 1508 msleep(1000);
1512 break; 1509 break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1517} 1514}
1518 1515
1519static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1520 struct nd_cmd_ars_status *cmd) 1517 struct nd_cmd_ars_status *cmd, u32 size)
1521{ 1518{
1522 int rc; 1519 int rc;
1523 1520
1524 while (1) { 1521 while (1) {
1525 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
1526 sizeof(*cmd)); 1523 size);
1527 if (rc || cmd->status & 0xffff) 1524 if (rc || cmd->status & 0xffff)
1528 return -ENXIO; 1525 return -ENXIO;
1529 1526
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1538 case 2: 1535 case 2:
1539 /* No ARS performed for the current boot */ 1536 /* No ARS performed for the current boot */
1540 return 0; 1537 return 0;
1538 case 3:
1539 /* TODO: error list overflow support */
1541 default: 1540 default:
1542 return -ENXIO; 1541 return -ENXIO;
1543 } 1542 }
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1581 struct nd_cmd_ars_start *ars_start = NULL; 1580 struct nd_cmd_ars_start *ars_start = NULL;
1582 struct nd_cmd_ars_cap *ars_cap = NULL; 1581 struct nd_cmd_ars_cap *ars_cap = NULL;
1583 u64 start, len, cur, remaining; 1582 u64 start, len, cur, remaining;
1583 u32 ars_status_size;
1584 int rc; 1584 int rc;
1585 1585
1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); 1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1610,14 +1610,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1610 * Check if a full-range ARS has been run. If so, use those results 1610 * Check if a full-range ARS has been run. If so, use those results
1611 * without having to start a new ARS. 1611 * without having to start a new ARS.
1612 */ 1612 */
1613 ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status), 1613 ars_status_size = ars_cap->max_ars_out;
1614 GFP_KERNEL); 1614 ars_status = kzalloc(ars_status_size, GFP_KERNEL);
1615 if (!ars_status) { 1615 if (!ars_status) {
1616 rc = -ENOMEM; 1616 rc = -ENOMEM;
1617 goto out; 1617 goto out;
1618 } 1618 }
1619 1619
1620 rc = ars_get_status(nd_desc, ars_status); 1620 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1621 if (rc) 1621 if (rc)
1622 goto out; 1622 goto out;
1623 1623
@@ -1647,7 +1647,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1647 if (rc) 1647 if (rc)
1648 goto out; 1648 goto out;
1649 1649
1650 rc = ars_get_status(nd_desc, ars_status); 1650 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1651 if (rc) 1651 if (rc)
1652 goto out; 1652 goto out;
1653 1653
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 if (pci_has_managed_irq(dev)) 409 if (dev->irq_managed && dev->irq > 0)
410 return 0; 410 return 0;
411 411
412 entry = acpi_pci_irq_lookup(dev, pin); 412 entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
451 kfree(entry); 451 kfree(entry);
452 return rc; 452 return rc;
453 } 453 }
454 pci_set_managed_irq(dev, rc); 454 dev->irq = rc;
455 dev->irq_managed = 1;
455 456
456 if (link) 457 if (link)
457 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); 458 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
474 u8 pin; 475 u8 pin;
475 476
476 pin = dev->pin; 477 pin = dev->pin;
477 if (!pin || !pci_has_managed_irq(dev)) 478 if (!pin || !dev->irq_managed || dev->irq <= 0)
478 return; 479 return;
479 480
481 /* Keep IOAPIC pin configuration when suspending */
482 if (dev->dev.power.is_prepared)
483 return;
484#ifdef CONFIG_PM
485 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
486 return;
487#endif
488
480 entry = acpi_pci_irq_lookup(dev, pin); 489 entry = acpi_pci_irq_lookup(dev, pin);
481 if (!entry) 490 if (!entry)
482 return; 491 return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
496 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); 505 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
497 if (gsi >= 0) { 506 if (gsi >= 0) {
498 acpi_unregister_gsi(gsi); 507 acpi_unregister_gsi(gsi);
499 pci_reset_managed_irq(dev); 508 dev->irq_managed = 0;
500 } 509 }
501} 510}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> 6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
7 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
8 * 7 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * 9 *
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
438 * enabled system. 437 * enabled system.
439 */ 438 */
440 439
440#define ACPI_MAX_IRQS 256
441#define ACPI_MAX_ISA_IRQ 16 441#define ACPI_MAX_ISA_IRQ 16
442 442
443#define PIRQ_PENALTY_PCI_AVAILABLE (0) 443#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) 447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) 448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
449 449
450static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { 450static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ 451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ 452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ 453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ 464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ 465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ 466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
467 /* >IRQ15 */
467}; 468};
468 469
469struct irq_penalty_info {
470 int irq;
471 int penalty;
472 struct list_head node;
473};
474
475static LIST_HEAD(acpi_irq_penalty_list);
476
477static int acpi_irq_get_penalty(int irq)
478{
479 struct irq_penalty_info *irq_info;
480
481 if (irq < ACPI_MAX_ISA_IRQ)
482 return acpi_irq_isa_penalty[irq];
483
484 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
485 if (irq_info->irq == irq)
486 return irq_info->penalty;
487 }
488
489 return 0;
490}
491
492static int acpi_irq_set_penalty(int irq, int new_penalty)
493{
494 struct irq_penalty_info *irq_info;
495
496 /* see if this is a ISA IRQ */
497 if (irq < ACPI_MAX_ISA_IRQ) {
498 acpi_irq_isa_penalty[irq] = new_penalty;
499 return 0;
500 }
501
502 /* next, try to locate from the dynamic list */
503 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
504 if (irq_info->irq == irq) {
505 irq_info->penalty = new_penalty;
506 return 0;
507 }
508 }
509
510 /* nope, let's allocate a slot for this IRQ */
511 irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
512 if (!irq_info)
513 return -ENOMEM;
514
515 irq_info->irq = irq;
516 irq_info->penalty = new_penalty;
517 list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
518
519 return 0;
520}
521
522static void acpi_irq_add_penalty(int irq, int penalty)
523{
524 int curpen = acpi_irq_get_penalty(irq);
525
526 acpi_irq_set_penalty(irq, curpen + penalty);
527}
528
529int __init acpi_irq_penalty_init(void) 470int __init acpi_irq_penalty_init(void)
530{ 471{
531 struct acpi_pci_link *link; 472 struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
546 link->irq.possible_count; 487 link->irq.possible_count;
547 488
548 for (i = 0; i < link->irq.possible_count; i++) { 489 for (i = 0; i < link->irq.possible_count; i++) {
549 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { 490 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
550 int irqpos = link->irq.possible[i]; 491 acpi_irq_penalty[link->irq.
551 492 possible[i]] +=
552 acpi_irq_add_penalty(irqpos, penalty); 493 penalty;
553 }
554 } 494 }
555 495
556 } else if (link->irq.active) { 496 } else if (link->irq.active) {
557 acpi_irq_add_penalty(link->irq.active, 497 acpi_irq_penalty[link->irq.active] +=
558 PIRQ_PENALTY_PCI_POSSIBLE); 498 PIRQ_PENALTY_PCI_POSSIBLE;
559 } 499 }
560 } 500 }
561 501
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
607 * the use of IRQs 9, 10, 11, and >15. 547 * the use of IRQs 9, 10, 11, and >15.
608 */ 548 */
609 for (i = (link->irq.possible_count - 1); i >= 0; i--) { 549 for (i = (link->irq.possible_count - 1); i >= 0; i--) {
610 if (acpi_irq_get_penalty(irq) > 550 if (acpi_irq_penalty[irq] >
611 acpi_irq_get_penalty(link->irq.possible[i])) 551 acpi_irq_penalty[link->irq.possible[i]])
612 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
613 } 553 }
614 } 554 }
615 if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { 555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
616 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " 556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
617 "Try pci=noacpi or acpi=off\n", 557 "Try pci=noacpi or acpi=off\n",
618 acpi_device_name(link->device), 558 acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
628 acpi_device_bid(link->device)); 568 acpi_device_bid(link->device));
629 return -ENODEV; 569 return -ENODEV;
630 } else { 570 } else {
631 acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); 571 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
632
633 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 572 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
634 acpi_device_name(link->device), 573 acpi_device_name(link->device),
635 acpi_device_bid(link->device), link->irq.active); 574 acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
839} 778}
840 779
841/* 780/*
842 * modify penalty from cmdline 781 * modify acpi_irq_penalty[] from cmdline
843 */ 782 */
844static int __init acpi_irq_penalty_update(char *str, int used) 783static int __init acpi_irq_penalty_update(char *str, int used)
845{ 784{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
857 if (irq < 0) 796 if (irq < 0)
858 continue; 797 continue;
859 798
799 if (irq >= ARRAY_SIZE(acpi_irq_penalty))
800 continue;
801
860 if (used) 802 if (used)
861 acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); 803 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
862 else 804 else
863 acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); 805 acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
864 806
865 if (retval != 2) /* no next number */ 807 if (retval != 2) /* no next number */
866 break; 808 break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
877 */ 819 */
878void acpi_penalize_isa_irq(int irq, int active) 820void acpi_penalize_isa_irq(int irq, int active)
879{ 821{
880 if (irq >= 0) 822 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
881 acpi_irq_add_penalty(irq, active ? 823 if (active)
882 PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 824 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
825 else
826 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
827 }
883} 828}
884 829
885bool acpi_isa_irq_available(int irq) 830bool acpi_isa_irq_available(int irq)
886{ 831{
887 return irq >= 0 && 832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
888 (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
889} 834}
890 835
891/* 836/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
895 */ 840 */
896void acpi_penalize_sci_irq(int irq, int trigger, int polarity) 841void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
897{ 842{
898 int penalty; 843 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
899 844 if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
900 if (irq < 0) 845 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
901 return; 846 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
902 847 else
903 if (trigger != ACPI_MADT_TRIGGER_LEVEL || 848 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
904 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) 849 }
905 penalty = PIRQ_PENALTY_ISA_ALWAYS;
906 else
907 penalty = PIRQ_PENALTY_PCI_USING;
908
909 acpi_irq_add_penalty(irq, penalty);
910} 850}
911 851
912/* 852/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2075 return -EFAULT; 2075 return -EFAULT;
2076 2076
2077 ptr += sizeof(void *); 2077 ptr += sizeof(cookie);
2078 list_for_each_entry(w, &proc->delivered_death, entry) { 2078 list_for_each_entry(w, &proc->delivered_death, entry) {
2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2080 2080
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 594fcabd22cd..546a3692774f 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ 266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
267 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
268 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
269 { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
270 { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
271 { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
272 { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
273 { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
274 { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
275 { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
276 { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
277 { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
278 { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
279 { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
280 { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
281 { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
282 { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
283 { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
284 { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
285 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
286 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
267 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ 287 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
268 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ 288 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
269 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ 289 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a4faa438889c..a44c75d4c284 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -250,6 +250,7 @@ enum {
250 AHCI_HFLAG_MULTI_MSI = 0, 250 AHCI_HFLAG_MULTI_MSI = 0,
251 AHCI_HFLAG_MULTI_MSIX = 0, 251 AHCI_HFLAG_MULTI_MSIX = 0,
252#endif 252#endif
253 AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
253 254
254 /* ap->flags bits */ 255 /* ap->flags bits */
255 256
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index b36cae2fd04b..e87bcec0fd7c 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
317 if (IS_ERR(hpriv)) 317 if (IS_ERR(hpriv))
318 return PTR_ERR(hpriv); 318 return PTR_ERR(hpriv);
319 hpriv->plat_data = priv; 319 hpriv->plat_data = priv;
320 hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
320 321
321 brcm_sata_alpm_init(hpriv); 322 brcm_sata_alpm_init(hpriv);
322 323
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d61740e78d6d..402967902cbe 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
496 } 496 }
497 } 497 }
498 498
499 /* fabricate port_map from cap.nr_ports */ 499 /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
500 if (!port_map) { 500 if (!port_map && vers < 0x10300) {
501 port_map = (1 << ahci_nr_ports(cap)) - 1; 501 port_map = (1 << ahci_nr_ports(cap)) - 1;
502 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); 502 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
503 503
@@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
593int ahci_stop_engine(struct ata_port *ap) 593int ahci_stop_engine(struct ata_port *ap)
594{ 594{
595 void __iomem *port_mmio = ahci_port_base(ap); 595 void __iomem *port_mmio = ahci_port_base(ap);
596 struct ahci_host_priv *hpriv = ap->host->private_data;
596 u32 tmp; 597 u32 tmp;
597 598
599 /*
600 * On some controllers, stopping a port's DMA engine while the port
601 * is in ALPM state (partial or slumber) results in failures on
602 * subsequent DMA engine starts. For those controllers, put the
603 * port back in active state before stopping its DMA engine.
604 */
605 if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
606 (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
607 ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
608 dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
609 return -EIO;
610 }
611
598 tmp = readl(port_mmio + PORT_CMD); 612 tmp = readl(port_mmio + PORT_CMD);
599 613
600 /* check if the HBA is idle */ 614 /* check if the HBA is idle */
@@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
689 void __iomem *port_mmio = ahci_port_base(ap); 703 void __iomem *port_mmio = ahci_port_base(ap);
690 704
691 if (policy != ATA_LPM_MAX_POWER) { 705 if (policy != ATA_LPM_MAX_POWER) {
706 /* wakeup flag only applies to the max power policy */
707 hints &= ~ATA_LPM_WAKE_ONLY;
708
692 /* 709 /*
693 * Disable interrupts on Phy Ready. This keeps us from 710 * Disable interrupts on Phy Ready. This keeps us from
694 * getting woken up due to spurious phy ready 711 * getting woken up due to spurious phy ready
@@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
704 u32 cmd = readl(port_mmio + PORT_CMD); 721 u32 cmd = readl(port_mmio + PORT_CMD);
705 722
706 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { 723 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
707 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); 724 if (!(hints & ATA_LPM_WAKE_ONLY))
725 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
708 cmd |= PORT_CMD_ICC_ACTIVE; 726 cmd |= PORT_CMD_ICC_ACTIVE;
709 727
710 writel(cmd, port_mmio + PORT_CMD); 728 writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
712 730
713 /* wait 10ms to be sure we've come out of LPM state */ 731 /* wait 10ms to be sure we've come out of LPM state */
714 ata_msleep(ap, 10); 732 ata_msleep(ap, 10);
733
734 if (hints & ATA_LPM_WAKE_ONLY)
735 return 0;
715 } else { 736 } else {
716 cmd |= PORT_CMD_ALPE; 737 cmd |= PORT_CMD_ALPE;
717 if (policy == ATA_LPM_MIN_POWER) 738 if (policy == ATA_LPM_MIN_POWER)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb74719d2c1..55e257c268dd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4128 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4128 /* Odd clown on sil3726/4726 PMPs */ 4129 /* Odd clown on sil3726/4726 PMPs */
4129 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4130 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4130 4131
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215a9a22..051b6158d1b7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998{ 998{
999 struct ata_port *ap = qc->ap; 999 struct ata_port *ap = qc->ap;
1000 unsigned long flags;
1001 1000
1002 if (ap->ops->error_handler) { 1001 if (ap->ops->error_handler) {
1003 if (in_wq) { 1002 if (in_wq) {
1004 spin_lock_irqsave(ap->lock, flags);
1005
1006 /* EH might have kicked in while host lock is 1003 /* EH might have kicked in while host lock is
1007 * released. 1004 * released.
1008 */ 1005 */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1014 } else 1011 } else
1015 ata_port_freeze(ap); 1012 ata_port_freeze(ap);
1016 } 1013 }
1017
1018 spin_unlock_irqrestore(ap->lock, flags);
1019 } else { 1014 } else {
1020 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1015 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021 ata_qc_complete(qc); 1016 ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1024 } 1019 }
1025 } else { 1020 } else {
1026 if (in_wq) { 1021 if (in_wq) {
1027 spin_lock_irqsave(ap->lock, flags);
1028 ata_sff_irq_on(ap); 1022 ata_sff_irq_on(ap);
1029 ata_qc_complete(qc); 1023 ata_qc_complete(qc);
1030 spin_unlock_irqrestore(ap->lock, flags);
1031 } else 1024 } else
1032 ata_qc_complete(qc); 1025 ata_qc_complete(qc);
1033 } 1026 }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1048{ 1041{
1049 struct ata_link *link = qc->dev->link; 1042 struct ata_link *link = qc->dev->link;
1050 struct ata_eh_info *ehi = &link->eh_info; 1043 struct ata_eh_info *ehi = &link->eh_info;
1051 unsigned long flags = 0;
1052 int poll_next; 1044 int poll_next;
1053 1045
1046 lockdep_assert_held(ap->lock);
1047
1054 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055 1049
1056 /* Make sure ata_sff_qc_issue() does not throw things 1050 /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
1112 } 1106 }
1113 } 1107 }
1114 1108
1115 /* Send the CDB (atapi) or the first data block (ata pio out).
1116 * During the state transition, interrupt handler shouldn't
1117 * be invoked before the data transfer is complete and
1118 * hsm_task_state is changed. Hence, the following locking.
1119 */
1120 if (in_wq)
1121 spin_lock_irqsave(ap->lock, flags);
1122
1123 if (qc->tf.protocol == ATA_PROT_PIO) { 1109 if (qc->tf.protocol == ATA_PROT_PIO) {
1124 /* PIO data out protocol. 1110 /* PIO data out protocol.
1125 * send first data block. 1111 * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
1135 /* send CDB */ 1121 /* send CDB */
1136 atapi_send_cdb(ap, qc); 1122 atapi_send_cdb(ap, qc);
1137 1123
1138 if (in_wq)
1139 spin_unlock_irqrestore(ap->lock, flags);
1140
1141 /* if polling, ata_sff_pio_task() handles the rest. 1124 /* if polling, ata_sff_pio_task() handles the rest.
1142 * otherwise, interrupt handler takes over from here. 1125 * otherwise, interrupt handler takes over from here.
1143 */ 1126 */
@@ -1296,7 +1279,8 @@ fsm_start:
1296 break; 1279 break;
1297 default: 1280 default:
1298 poll_next = 0; 1281 poll_next = 0;
1299 BUG(); 1282 WARN(true, "ata%d: SFF host state machine in invalid state %d",
1283 ap->print_id, ap->hsm_task_state);
1300 } 1284 }
1301 1285
1302 return poll_next; 1286 return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1361 u8 status; 1345 u8 status;
1362 int poll_next; 1346 int poll_next;
1363 1347
1348 spin_lock_irq(ap->lock);
1349
1364 BUG_ON(ap->sff_pio_task_link == NULL); 1350 BUG_ON(ap->sff_pio_task_link == NULL);
1365 /* qc can be NULL if timeout occurred */ 1351 /* qc can be NULL if timeout occurred */
1366 qc = ata_qc_from_tag(ap, link->active_tag); 1352 qc = ata_qc_from_tag(ap, link->active_tag);
1367 if (!qc) { 1353 if (!qc) {
1368 ap->sff_pio_task_link = NULL; 1354 ap->sff_pio_task_link = NULL;
1369 return; 1355 goto out_unlock;
1370 } 1356 }
1371 1357
1372fsm_start: 1358fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
1381 */ 1367 */
1382 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1368 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1383 if (status & ATA_BUSY) { 1369 if (status & ATA_BUSY) {
1370 spin_unlock_irq(ap->lock);
1384 ata_msleep(ap, 2); 1371 ata_msleep(ap, 2);
1372 spin_lock_irq(ap->lock);
1373
1385 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1374 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1386 if (status & ATA_BUSY) { 1375 if (status & ATA_BUSY) {
1387 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1376 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1388 return; 1377 goto out_unlock;
1389 } 1378 }
1390 } 1379 }
1391 1380
@@ -1402,6 +1391,8 @@ fsm_start:
1402 */ 1391 */
1403 if (poll_next) 1392 if (poll_next)
1404 goto fsm_start; 1393 goto fsm_start;
1394out_unlock:
1395 spin_unlock_irq(ap->lock);
1405} 1396}
1406 1397
1407/** 1398/**
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89f5cf68d80a..04a1582e80bb 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
206 if (mc->release) 206 if (mc->release)
207 mc->release(master, mc->data); 207 mc->release(master, mc->data);
208 } 208 }
209
210 kfree(match->compare);
209} 211}
210 212
211static void devm_component_match_release(struct device *dev, void *res) 213static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
221 if (match->alloc == num) 223 if (match->alloc == num)
222 return 0; 224 return 0;
223 225
224 new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); 226 new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
225 if (!new) 227 if (!new)
226 return -ENOMEM; 228 return -ENOMEM;
227 229
228 if (match->compare) { 230 if (match->compare) {
229 memcpy(new, match->compare, sizeof(*new) * 231 memcpy(new, match->compare, sizeof(*new) *
230 min(match->num, num)); 232 min(match->num, num));
231 devm_kfree(dev, match->compare); 233 kfree(match->compare);
232 } 234 }
233 match->compare = new; 235 match->compare = new;
234 match->alloc = num; 236 match->alloc = num;
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
283} 285}
284EXPORT_SYMBOL(component_match_add_release); 286EXPORT_SYMBOL(component_match_add_release);
285 287
288static void free_master(struct master *master)
289{
290 struct component_match *match = master->match;
291 int i;
292
293 list_del(&master->node);
294
295 if (match) {
296 for (i = 0; i < match->num; i++) {
297 struct component *c = match->compare[i].component;
298 if (c)
299 c->master = NULL;
300 }
301 }
302
303 kfree(master);
304}
305
286int component_master_add_with_match(struct device *dev, 306int component_master_add_with_match(struct device *dev,
287 const struct component_master_ops *ops, 307 const struct component_master_ops *ops,
288 struct component_match *match) 308 struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
309 329
310 ret = try_to_bring_up_master(master, NULL); 330 ret = try_to_bring_up_master(master, NULL);
311 331
312 if (ret < 0) { 332 if (ret < 0)
313 /* Delete off the list if we weren't successful */ 333 free_master(master);
314 list_del(&master->node); 334
315 kfree(master);
316 }
317 mutex_unlock(&component_mutex); 335 mutex_unlock(&component_mutex);
318 336
319 return ret < 0 ? ret : 0; 337 return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
324 const struct component_master_ops *ops) 342 const struct component_master_ops *ops)
325{ 343{
326 struct master *master; 344 struct master *master;
327 int i;
328 345
329 mutex_lock(&component_mutex); 346 mutex_lock(&component_mutex);
330 master = __master_find(dev, ops); 347 master = __master_find(dev, ops);
331 if (master) { 348 if (master) {
332 struct component_match *match = master->match;
333
334 take_down_master(master); 349 take_down_master(master);
335 350 free_master(master);
336 list_del(&master->node);
337
338 if (match) {
339 for (i = 0; i < match->num; i++) {
340 struct component *c = match->compare[i].component;
341 if (c)
342 c->master = NULL;
343 }
344 }
345 kfree(master);
346 } 351 }
347 mutex_unlock(&component_mutex); 352 mutex_unlock(&component_mutex);
348} 353}
@@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
486 491
487 ret = try_to_bring_up_masters(component); 492 ret = try_to_bring_up_masters(component);
488 if (ret < 0) { 493 if (ret < 0) {
494 if (component->master)
495 remove_component(component->master, component);
489 list_del(&component->node); 496 list_del(&component->node);
490 497
491 kfree(component); 498 kfree(component);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8812bfb9e3b8..eea51569f0eb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
133 while (val_size) { 133 while (val_size) {
134 switch (ctx->val_bytes) { 134 switch (ctx->val_bytes) {
135 case 1: 135 case 1:
136 __raw_writeb(*(u8 *)val, ctx->regs + offset); 136 writeb(*(u8 *)val, ctx->regs + offset);
137 break; 137 break;
138 case 2: 138 case 2:
139 __raw_writew(*(u16 *)val, ctx->regs + offset); 139 writew(*(u16 *)val, ctx->regs + offset);
140 break; 140 break;
141 case 4: 141 case 4:
142 __raw_writel(*(u32 *)val, ctx->regs + offset); 142 writel(*(u32 *)val, ctx->regs + offset);
143 break; 143 break;
144#ifdef CONFIG_64BIT 144#ifdef CONFIG_64BIT
145 case 8: 145 case 8:
146 __raw_writeq(*(u64 *)val, ctx->regs + offset); 146 writeq(*(u64 *)val, ctx->regs + offset);
147 break; 147 break;
148#endif 148#endif
149 default: 149 default:
@@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
193 while (val_size) { 193 while (val_size) {
194 switch (ctx->val_bytes) { 194 switch (ctx->val_bytes) {
195 case 1: 195 case 1:
196 *(u8 *)val = __raw_readb(ctx->regs + offset); 196 *(u8 *)val = readb(ctx->regs + offset);
197 break; 197 break;
198 case 2: 198 case 2:
199 *(u16 *)val = __raw_readw(ctx->regs + offset); 199 *(u16 *)val = readw(ctx->regs + offset);
200 break; 200 break;
201 case 4: 201 case 4:
202 *(u32 *)val = __raw_readl(ctx->regs + offset); 202 *(u32 *)val = readl(ctx->regs + offset);
203 break; 203 break;
204#ifdef CONFIG_64BIT 204#ifdef CONFIG_64BIT
205 case 8: 205 case 8:
206 *(u64 *)val = __raw_readq(ctx->regs + offset); 206 *(u64 *)val = readq(ctx->regs + offset);
207 break; 207 break;
208#endif 208#endif
209 default: 209 default:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e251201dd48..84708a5f8c52 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@ static void set_fdc(int drive)
866} 866}
867 867
868/* locks the driver */ 868/* locks the driver */
869static int lock_fdc(int drive, bool interruptible) 869static int lock_fdc(int drive)
870{ 870{
871 if (WARN(atomic_read(&usage_count) == 0, 871 if (WARN(atomic_read(&usage_count) == 0,
872 "Trying to lock fdc while usage count=0\n")) 872 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2173{ 2173{
2174 int ret; 2174 int ret;
2175 2175
2176 if (lock_fdc(drive, true)) 2176 if (lock_fdc(drive))
2177 return -EINTR; 2177 return -EINTR;
2178 2178
2179 set_floppy(drive); 2179 set_floppy(drive);
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
2960{ 2960{
2961 int ret; 2961 int ret;
2962 2962
2963 if (lock_fdc(drive, interruptible)) 2963 if (lock_fdc(drive))
2964 return -EINTR; 2964 return -EINTR;
2965 2965
2966 if (arg == FD_RESET_ALWAYS) 2966 if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3243 if (!capable(CAP_SYS_ADMIN)) 3243 if (!capable(CAP_SYS_ADMIN))
3244 return -EPERM; 3244 return -EPERM;
3245 mutex_lock(&open_lock); 3245 mutex_lock(&open_lock);
3246 if (lock_fdc(drive, true)) { 3246 if (lock_fdc(drive)) {
3247 mutex_unlock(&open_lock); 3247 mutex_unlock(&open_lock);
3248 return -EINTR; 3248 return -EINTR;
3249 } 3249 }
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3263 } else { 3263 } else {
3264 int oldStretch; 3264 int oldStretch;
3265 3265
3266 if (lock_fdc(drive, true)) 3266 if (lock_fdc(drive))
3267 return -EINTR; 3267 return -EINTR;
3268 if (cmd != FDDEFPRM) { 3268 if (cmd != FDDEFPRM) {
3269 /* notice a disk change immediately, else 3269 /* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3349 if (type) 3349 if (type)
3350 *g = &floppy_type[type]; 3350 *g = &floppy_type[type];
3351 else { 3351 else {
3352 if (lock_fdc(drive, false)) 3352 if (lock_fdc(drive))
3353 return -EINTR; 3353 return -EINTR;
3354 if (poll_drive(false, 0) == -EINTR) 3354 if (poll_drive(false, 0) == -EINTR)
3355 return -EINTR; 3355 return -EINTR;
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3433 if (UDRS->fd_ref != 1) 3433 if (UDRS->fd_ref != 1)
3434 /* somebody else has this drive open */ 3434 /* somebody else has this drive open */
3435 return -EBUSY; 3435 return -EBUSY;
3436 if (lock_fdc(drive, true)) 3436 if (lock_fdc(drive))
3437 return -EINTR; 3437 return -EINTR;
3438 3438
3439 /* do the actual eject. Fails on 3439 /* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3445 process_fd_request(); 3445 process_fd_request();
3446 return ret; 3446 return ret;
3447 case FDCLRPRM: 3447 case FDCLRPRM:
3448 if (lock_fdc(drive, true)) 3448 if (lock_fdc(drive))
3449 return -EINTR; 3449 return -EINTR;
3450 current_type[drive] = NULL; 3450 current_type[drive] = NULL;
3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 UDP->flags &= ~FTD_MSG; 3467 UDP->flags &= ~FTD_MSG;
3468 return 0; 3468 return 0;
3469 case FDFMTBEG: 3469 case FDFMTBEG:
3470 if (lock_fdc(drive, true)) 3470 if (lock_fdc(drive))
3471 return -EINTR; 3471 return -EINTR;
3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3473 return -EINTR; 3473 return -EINTR;
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3484 return do_format(drive, &inparam.f); 3484 return do_format(drive, &inparam.f);
3485 case FDFMTEND: 3485 case FDFMTEND:
3486 case FDFLUSH: 3486 case FDFLUSH:
3487 if (lock_fdc(drive, true)) 3487 if (lock_fdc(drive))
3488 return -EINTR; 3488 return -EINTR;
3489 return invalidate_drive(bdev); 3489 return invalidate_drive(bdev);
3490 case FDSETEMSGTRESH: 3490 case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3507 outparam = UDP; 3507 outparam = UDP;
3508 break; 3508 break;
3509 case FDPOLLDRVSTAT: 3509 case FDPOLLDRVSTAT:
3510 if (lock_fdc(drive, true)) 3510 if (lock_fdc(drive))
3511 return -EINTR; 3511 return -EINTR;
3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3513 return -EINTR; 3513 return -EINTR;
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3530 case FDRAWCMD: 3530 case FDRAWCMD:
3531 if (type) 3531 if (type)
3532 return -EINVAL; 3532 return -EINVAL;
3533 if (lock_fdc(drive, true)) 3533 if (lock_fdc(drive))
3534 return -EINTR; 3534 return -EINTR;
3535 set_floppy(drive); 3535 set_floppy(drive);
3536 i = raw_cmd_ioctl(cmd, (void __user *)param); 3536 i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3539 process_fd_request(); 3539 process_fd_request();
3540 return i; 3540 return i;
3541 case FDTWADDLE: 3541 case FDTWADDLE:
3542 if (lock_fdc(drive, true)) 3542 if (lock_fdc(drive))
3543 return -EINTR; 3543 return -EINTR;
3544 twaddle(); 3544 twaddle();
3545 process_fd_request(); 3545 process_fd_request();
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3663 3663
3664 opened_bdev[drive] = bdev; 3664 opened_bdev[drive] = bdev;
3665 3665
3666 if (!(mode & (FMODE_READ|FMODE_WRITE))) {
3667 res = -EINVAL;
3668 goto out;
3669 }
3670
3666 res = -ENXIO; 3671 res = -ENXIO;
3667 3672
3668 if (!floppy_track_buffer) { 3673 if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3711 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3712 UFDCS->rawcmd = 2;
3708 3713
3709 if (!(mode & FMODE_NDELAY)) { 3714 UDRS->last_checked = 0;
3710 if (mode & (FMODE_READ|FMODE_WRITE)) { 3715 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3711 UDRS->last_checked = 0; 3716 check_disk_change(bdev);
3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3717 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3713 check_disk_change(bdev); 3718 goto out;
3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3719 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3715 goto out; 3720 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3721
3717 goto out; 3722 res = -EROFS;
3718 } 3723
3719 res = -EROFS; 3724 if ((mode & FMODE_WRITE) &&
3720 if ((mode & FMODE_WRITE) && 3725 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) 3726 goto out;
3722 goto out; 3727
3723 }
3724 mutex_unlock(&open_lock); 3728 mutex_unlock(&open_lock);
3725 mutex_unlock(&floppy_mutex); 3729 mutex_unlock(&floppy_mutex);
3726 return 0; 3730 return 0;
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3748 return DISK_EVENT_MEDIA_CHANGE; 3752 return DISK_EVENT_MEDIA_CHANGE;
3749 3753
3750 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3754 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3751 lock_fdc(drive, false); 3755 if (lock_fdc(drive))
3756 return -EINTR;
3752 poll_drive(false, 0); 3757 poll_drive(false, 0);
3753 process_fd_request(); 3758 process_fd_request();
3754 } 3759 }
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk)
3847 "VFS: revalidate called on non-open device.\n")) 3852 "VFS: revalidate called on non-open device.\n"))
3848 return -EFAULT; 3853 return -EFAULT;
3849 3854
3850 lock_fdc(drive, false); 3855 res = lock_fdc(drive);
3856 if (res)
3857 return res;
3851 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3852 test_bit(FD_VERIFY_BIT, &UDRS->flags)); 3859 test_bit(FD_VERIFY_BIT, &UDRS->flags));
3853 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { 3860 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97d573c..64a7b5971b57 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
478 id->ver_id = 0x1; 478 id->ver_id = 0x1;
479 id->vmnt = 0; 479 id->vmnt = 0;
480 id->cgrps = 1; 480 id->cgrps = 1;
481 id->cap = 0x3; 481 id->cap = 0x2;
482 id->dom = 0x1; 482 id->dom = 0x1;
483 483
484 id->ppaf.blk_offset = 0; 484 id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@ static int null_add_dev(void)
707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
709 709
710
711 mutex_lock(&lock); 710 mutex_lock(&lock);
712 list_add_tail(&nullb->list, &nullb_list);
713 nullb->index = nullb_indexes++; 711 nullb->index = nullb_indexes++;
714 mutex_unlock(&lock); 712 mutex_unlock(&lock);
715 713
@@ -743,6 +741,10 @@ static int null_add_dev(void)
743 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
744 742
745 add_disk(disk); 743 add_disk(disk);
744
745 mutex_lock(&lock);
746 list_add_tail(&nullb->list, &nullb_list);
747 mutex_unlock(&lock);
746done: 748done:
747 return 0; 749 return 0;
748 750
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91c39f7..83eb9e6bf8b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@ again:
1873 return err; 1873 return err;
1874} 1874}
1875 1875
1876static int negotiate_mq(struct blkfront_info *info)
1877{
1878 unsigned int backend_max_queues = 0;
1879 int err;
1880 unsigned int i;
1881
1882 BUG_ON(info->nr_rings);
1883
1884 /* Check if backend supports multiple queues. */
1885 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1886 "multi-queue-max-queues", "%u", &backend_max_queues);
1887 if (err < 0)
1888 backend_max_queues = 1;
1889
1890 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1891 /* We need at least one ring. */
1892 if (!info->nr_rings)
1893 info->nr_rings = 1;
1894
1895 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1896 if (!info->rinfo) {
1897 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1898 return -ENOMEM;
1899 }
1900
1901 for (i = 0; i < info->nr_rings; i++) {
1902 struct blkfront_ring_info *rinfo;
1903
1904 rinfo = &info->rinfo[i];
1905 INIT_LIST_HEAD(&rinfo->indirect_pages);
1906 INIT_LIST_HEAD(&rinfo->grants);
1907 rinfo->dev_info = info;
1908 INIT_WORK(&rinfo->work, blkif_restart_queue);
1909 spin_lock_init(&rinfo->ring_lock);
1910 }
1911 return 0;
1912}
1876/** 1913/**
1877 * Entry point to this code when a new device is created. Allocate the basic 1914 * Entry point to this code when a new device is created. Allocate the basic
1878 * structures and the ring buffer for communication with the backend, and 1915 * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1883 const struct xenbus_device_id *id) 1920 const struct xenbus_device_id *id)
1884{ 1921{
1885 int err, vdevice; 1922 int err, vdevice;
1886 unsigned int r_index;
1887 struct blkfront_info *info; 1923 struct blkfront_info *info;
1888 unsigned int backend_max_queues = 0;
1889 1924
1890 /* FIXME: Use dynamic device id if this is not set. */ 1925 /* FIXME: Use dynamic device id if this is not set. */
1891 err = xenbus_scanf(XBT_NIL, dev->nodename, 1926 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1936 } 1971 }
1937 1972
1938 info->xbdev = dev; 1973 info->xbdev = dev;
1939 /* Check if backend supports multiple queues. */ 1974 err = negotiate_mq(info);
1940 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1975 if (err) {
1941 "multi-queue-max-queues", "%u", &backend_max_queues);
1942 if (err < 0)
1943 backend_max_queues = 1;
1944
1945 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1946 /* We need at least one ring. */
1947 if (!info->nr_rings)
1948 info->nr_rings = 1;
1949
1950 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1951 if (!info->rinfo) {
1952 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1953 kfree(info); 1976 kfree(info);
1954 return -ENOMEM; 1977 return err;
1955 }
1956
1957 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1958 struct blkfront_ring_info *rinfo;
1959
1960 rinfo = &info->rinfo[r_index];
1961 INIT_LIST_HEAD(&rinfo->indirect_pages);
1962 INIT_LIST_HEAD(&rinfo->grants);
1963 rinfo->dev_info = info;
1964 INIT_WORK(&rinfo->work, blkif_restart_queue);
1965 spin_lock_init(&rinfo->ring_lock);
1966 } 1978 }
1967 1979
1968 mutex_init(&info->mutex); 1980 mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info)
2123static int blkfront_resume(struct xenbus_device *dev) 2135static int blkfront_resume(struct xenbus_device *dev)
2124{ 2136{
2125 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2137 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126 int err; 2138 int err = 0;
2127 2139
2128 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2140 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2129 2141
2130 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131 2143
2144 err = negotiate_mq(info);
2145 if (err)
2146 return err;
2147
2132 err = talk_to_blkback(dev, info); 2148 err = talk_to_blkback(dev, info);
2133 2149
2134 /* 2150 /*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf1d97c..be54e5331a45 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
42/* 42/*
43 * The High Precision Event Timer driver. 43 * The High Precision Event Timer driver.
44 * This driver is closely modelled after the rtc.c driver. 44 * This driver is closely modelled after the rtc.c driver.
45 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 45 * See HPET spec revision 1.
46 */ 46 */
47#define HPET_USER_FREQ (64) 47#define HPET_USER_FREQ (64)
48#define HPET_DRIFT (500) 48#define HPET_DRIFT (500)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
1819EXPORT_SYMBOL(get_random_int); 1819EXPORT_SYMBOL(get_random_int);
1820 1820
1821/* 1821/*
1822 * Same as get_random_int(), but returns unsigned long.
1823 */
1824unsigned long get_random_long(void)
1825{
1826 __u32 *hash;
1827 unsigned long ret;
1828
1829 if (arch_get_random_long(&ret))
1830 return ret;
1831
1832 hash = get_cpu_var(get_random_int_hash);
1833
1834 hash[0] += current->pid + jiffies + random_get_entropy();
1835 md5_transform(hash, random_int_secret);
1836 ret = *(unsigned long *)hash;
1837 put_cpu_var(get_random_int_hash);
1838
1839 return ret;
1840}
1841EXPORT_SYMBOL(get_random_long);
1842
1843/*
1822 * randomize_range() returns a start address such that 1844 * randomize_range() returns a start address such that
1823 * 1845 *
1824 * [...... <range> .....] 1846 * [...... <range> .....]
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e3666058..bae4be6501df 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o 43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o 44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o 45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
46obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o 46obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o 47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
48obj-$(CONFIG_ARCH_U300) += clk-u300.o 48obj-$(CONFIG_ARCH_U300) += clk-u300.o
49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65587e8..7b09a265d79f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node,
289 289
290 num_parents = of_clk_get_parent_count(node); 290 num_parents = of_clk_get_parent_count(node);
291 if (num_parents < 0) 291 if (num_parents < 0)
292 return; 292 num_parents = 0;
293 293
294 data = kzalloc(sizeof(*data), GFP_KERNEL); 294 data = kzalloc(sizeof(*data), GFP_KERNEL);
295 if (!data) 295 if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..89e9ca78bb94 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
299 /* Add the virtual cpufreq device */ 299 /* Add the virtual cpufreq device */
300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq", 300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
301 -1, NULL, 0); 301 -1, NULL, 0);
302 if (!cpufreq_dev) 302 if (IS_ERR(cpufreq_dev))
303 pr_warn("unable to register cpufreq device"); 303 pr_warn("unable to register cpufreq device");
304 304
305 return 0; 305 return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa35a5a..3e0b52daa35f 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = {
247 247
248void __init dove_divider_clk_init(struct device_node *np) 248void __init dove_divider_clk_init(struct device_node *np)
249{ 249{
250 void *base; 250 void __iomem *base;
251 251
252 base = of_iomap(np, 0); 252 base = of_iomap(np, 0);
253 if (WARN_ON(!base)) 253 if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e539e9f6..070037a29ea5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = {
3587 .val_bits = 32, 3587 .val_bits = 32,
3588 .max_register = 0x1fc0, 3588 .max_register = 0x1fc0,
3589 .fast_io = true, 3589 .fast_io = true,
3590 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3591}; 3590};
3592 3591
3593static const struct qcom_cc_desc gcc_apq8084_desc = { 3592static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae881d6a..dd5402bac620 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = {
3005 .val_bits = 32, 3005 .val_bits = 32,
3006 .max_register = 0x3e40, 3006 .max_register = 0x3e40,
3007 .fast_io = true, 3007 .fast_io = true,
3008 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3009}; 3008};
3010 3009
3011static const struct qcom_cc_desc gcc_ipq806x_desc = { 3010static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14dfec4..ad413036f7c7 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = {
2702 .val_bits = 32, 2702 .val_bits = 32,
2703 .max_register = 0x363c, 2703 .max_register = 0x363c,
2704 .fast_io = true, 2704 .fast_io = true,
2705 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2706}; 2705};
2707 2706
2708static const struct qcom_cc_desc gcc_msm8660_desc = { 2707static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d7d0ef..8cc9b2868b41 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = {
3336 .val_bits = 32, 3336 .val_bits = 32,
3337 .max_register = 0x80000, 3337 .max_register = 0x80000,
3338 .fast_io = true, 3338 .fast_io = true,
3339 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3340}; 3339};
3341 3340
3342static const struct qcom_cc_desc gcc_msm8916_desc = { 3341static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e32d44..983dd7dc89a7 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
3468 .val_bits = 32, 3468 .val_bits = 32,
3469 .max_register = 0x3660, 3469 .max_register = 0x3660,
3470 .fast_io = true, 3470 .fast_io = true,
3471 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3472}; 3471};
3473 3472
3474static const struct regmap_config gcc_apq8064_regmap_config = { 3473static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = {
3477 .val_bits = 32, 3476 .val_bits = 32,
3478 .max_register = 0x3880, 3477 .max_register = 0x3880,
3479 .fast_io = true, 3478 .fast_io = true,
3480 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3481}; 3479};
3482 3480
3483static const struct qcom_cc_desc gcc_msm8960_desc = { 3481static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d618e34..335952db309b 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = {
2680 .val_bits = 32, 2680 .val_bits = 32,
2681 .max_register = 0x1fc0, 2681 .max_register = 0x1fc0,
2682 .fast_io = true, 2682 .fast_io = true,
2683 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2684}; 2683};
2685 2684
2686static const struct qcom_cc_desc gcc_msm8974_desc = { 2685static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fadd5f7..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = {
419 .val_bits = 32, 419 .val_bits = 32,
420 .max_register = 0xfc, 420 .max_register = 0xfc,
421 .fast_io = true, 421 .fast_io = true,
422 .val_format_endian = REGMAP_ENDIAN_LITTLE,
423}; 422};
424 423
425static const struct qcom_cc_desc lcc_ipq806x_desc = { 424static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0ea1b8..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = {
524 .val_bits = 32, 524 .val_bits = 32,
525 .max_register = 0xfc, 525 .max_register = 0xfc,
526 .fast_io = true, 526 .fast_io = true,
527 .val_format_endian = REGMAP_ENDIAN_LITTLE,
528}; 527};
529 528
530static const struct qcom_cc_desc lcc_msm8960_desc = { 529static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fda8a0f..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = {
3368 .val_bits = 32, 3368 .val_bits = 32,
3369 .max_register = 0x5104, 3369 .max_register = 0x5104,
3370 .fast_io = true, 3370 .fast_io = true,
3371 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3372}; 3371};
3373 3372
3374static const struct qcom_cc_desc mmcc_apq8084_desc = { 3373static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048d3b9d..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
3029 .val_bits = 32, 3029 .val_bits = 32,
3030 .max_register = 0x334, 3030 .max_register = 0x334,
3031 .fast_io = true, 3031 .fast_io = true,
3032 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3033}; 3032};
3034 3033
3035static const struct regmap_config mmcc_apq8064_regmap_config = { 3034static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = {
3038 .val_bits = 32, 3037 .val_bits = 32,
3039 .max_register = 0x350, 3038 .max_register = 0x350,
3040 .fast_io = true, 3039 .fast_io = true,
3041 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3042}; 3040};
3043 3041
3044static const struct qcom_cc_desc mmcc_msm8960_desc = { 3042static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed93669..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = {
2594 .val_bits = 32, 2594 .val_bits = 32,
2595 .max_register = 0x5104, 2595 .max_register = 0x5104,
2596 .fast_io = true, 2596 .fast_io = true,
2597 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2598}; 2597};
2599 2598
2600static const struct qcom_cc_desc mmcc_msm8974_desc = { 2599static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce98033fbb..bc7fbac83ab7 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
136PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; 136PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; 137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
138 138
139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { 139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
224 RK2928_CLKGATE_CON(2), 2, GFLAGS), 224 RK2928_CLKGATE_CON(2), 2, GFLAGS),
225 225
226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, 226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
227 RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, 227 RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
228 RK2928_CLKGATE_CON(1), 0, GFLAGS), 228 RK2928_CLKGATE_CON(1), 0, GFLAGS),
229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, 229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
230 RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, 230 RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
231 RK2928_CLKGATE_CON(1), 1, GFLAGS), 231 RK2928_CLKGATE_CON(1), 1, GFLAGS),
232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, 232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
233 RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, 233 RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
234 RK2928_CLKGATE_CON(2), 4, GFLAGS), 234 RK2928_CLKGATE_CON(2), 4, GFLAGS),
235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, 235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
236 RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, 236 RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
237 RK2928_CLKGATE_CON(2), 5, GFLAGS), 237 RK2928_CLKGATE_CON(2), 5, GFLAGS),
238 238
239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, 239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
243 RK2928_CLKGATE_CON(1), 8, GFLAGS), 243 RK2928_CLKGATE_CON(1), 8, GFLAGS),
244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, 244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
245 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 245 RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
246 RK2928_CLKGATE_CON(1), 8, GFLAGS), 246 RK2928_CLKGATE_CON(1), 10, GFLAGS),
247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, 247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
248 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 248 RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
249 RK2928_CLKGATE_CON(1), 8, GFLAGS), 249 RK2928_CLKGATE_CON(1), 12, GFLAGS),
250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, 250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
251 RK2928_CLKSEL_CON(17), 0, 251 RK2928_CLKSEL_CON(17), 0,
252 RK2928_CLKGATE_CON(1), 9, GFLAGS, 252 RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
279 RK2928_CLKGATE_CON(3), 2, GFLAGS), 279 RK2928_CLKGATE_CON(3), 2, GFLAGS),
280 280
281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, 281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
282 RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, 282 RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
283 RK2928_CLKGATE_CON(2), 11, GFLAGS), 283 RK2928_CLKGATE_CON(2), 11, GFLAGS),
284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, 284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), 285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
286 286
287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, 287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
288 RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, 288 RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
289 RK2928_CLKGATE_CON(2), 13, GFLAGS), 289 RK2928_CLKGATE_CON(2), 13, GFLAGS),
290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, 290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), 291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
344 RK2928_CLKGATE_CON(10), 5, GFLAGS), 344 RK2928_CLKGATE_CON(10), 5, GFLAGS),
345 345
346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, 346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), 347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, 348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), 349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
350 350
351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, 351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
352 RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, 352 RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
353 RK2928_CLKGATE_CON(2), 6, GFLAGS), 353 RK2928_CLKGATE_CON(2), 6, GFLAGS),
354 354
355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, 355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede522269..21f3ea909fab 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), 780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
781 781
782 /* pclk_pd_alive gates */ 782 /* pclk_pd_alive gates */
783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), 783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), 784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), 785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), 786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), 787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), 788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), 789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
790 790
791 /* 791 /*
792 * pclk_vio gates 792 * pclk_vio gates
@@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), 796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
797 797
798 /* pclk_pd_pmu gates */ 798 /* pclk_pd_pmu gates */
799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), 799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), 800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), 801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), 803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
805 805
806 /* timer gates */ 806 /* timer gates */
807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), 807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f35d45c..74e7544f861b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
450 struct emc_timing *timing = tegra->timings + (i++); 450 struct emc_timing *timing = tegra->timings + (i++);
451 451
452 err = load_one_timing_from_dt(tegra, timing, child); 452 err = load_one_timing_from_dt(tegra, timing, child);
453 if (err) 453 if (err) {
454 of_node_put(child);
454 return err; 455 return err;
456 }
455 457
456 timing->ram_code = ram_code; 458 timing->ram_code = ram_code;
457 } 459 }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
499 * fuses until the apbmisc driver is loaded. 501 * fuses until the apbmisc driver is loaded.
500 */ 502 */
501 err = load_timings_from_dt(tegra, node, node_ram_code); 503 err = load_timings_from_dt(tegra, node, node_ram_code);
504 of_node_put(node);
502 if (err) 505 if (err)
503 return ERR_PTR(err); 506 return ERR_PTR(err);
504 of_node_put(node);
505 break; 507 break;
506 } 508 }
507 509
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce0738ee76..62ea38187b71 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@ enum clk_id {
11 tegra_clk_afi, 11 tegra_clk_afi,
12 tegra_clk_amx, 12 tegra_clk_amx,
13 tegra_clk_amx1, 13 tegra_clk_amx1,
14 tegra_clk_apb2ape,
14 tegra_clk_apbdma, 15 tegra_clk_apbdma,
15 tegra_clk_apbif, 16 tegra_clk_apbif,
16 tegra_clk_ape, 17 tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfab30b3..6ac3f843e7ca 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ 86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
87 PLLE_SS_CNTL_SSC_BYP) 87 PLLE_SS_CNTL_SSC_BYP)
88#define PLLE_SS_MAX_MASK 0x1ff 88#define PLLE_SS_MAX_MASK 0x1ff
89#define PLLE_SS_MAX_VAL 0x25 89#define PLLE_SS_MAX_VAL_TEGRA114 0x25
90#define PLLE_SS_MAX_VAL_TEGRA210 0x21
90#define PLLE_SS_INC_MASK (0xff << 16) 91#define PLLE_SS_INC_MASK (0xff << 16)
91#define PLLE_SS_INC_VAL (0x1 << 16) 92#define PLLE_SS_INC_VAL (0x1 << 16)
92#define PLLE_SS_INCINTRV_MASK (0x3f << 24) 93#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
93#define PLLE_SS_INCINTRV_VAL (0x20 << 24) 94#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
95#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
94#define PLLE_SS_COEFFICIENTS_MASK \ 96#define PLLE_SS_COEFFICIENTS_MASK \
95 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) 97 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
96#define PLLE_SS_COEFFICIENTS_VAL \ 98#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
97 (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) 99 (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
100 PLLE_SS_INCINTRV_VAL_TEGRA114)
101#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
102 (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
103 PLLE_SS_INCINTRV_VAL_TEGRA210)
98 104
99#define PLLE_AUX_PLLP_SEL BIT(2) 105#define PLLE_AUX_PLLP_SEL BIT(2)
100#define PLLE_AUX_USE_LOCKDET BIT(3) 106#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
880static int clk_plle_enable(struct clk_hw *hw) 886static int clk_plle_enable(struct clk_hw *hw)
881{ 887{
882 struct tegra_clk_pll *pll = to_clk_pll(hw); 888 struct tegra_clk_pll *pll = to_clk_pll(hw);
883 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 889 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
884 struct tegra_clk_pll_freq_table sel; 890 struct tegra_clk_pll_freq_table sel;
885 u32 val; 891 u32 val;
886 int err; 892 int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1378 u32 val; 1384 u32 val;
1379 int ret; 1385 int ret;
1380 unsigned long flags = 0; 1386 unsigned long flags = 0;
1381 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 1387 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
1382 1388
1383 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 1389 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
1384 return -EINVAL; 1390 return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1401 val |= PLLE_MISC_IDDQ_SW_CTRL; 1407 val |= PLLE_MISC_IDDQ_SW_CTRL;
1402 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 1408 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
1403 val |= PLLE_MISC_PLLE_PTS; 1409 val |= PLLE_MISC_PLLE_PTS;
1404 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 1410 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
1405 pll_writel_misc(val, pll); 1411 pll_writel_misc(val, pll);
1406 udelay(5); 1412 udelay(5);
1407 1413
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1428 val = pll_readl(PLLE_SS_CTRL, pll); 1434 val = pll_readl(PLLE_SS_CTRL, pll);
1429 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 1435 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
1430 val &= ~PLLE_SS_COEFFICIENTS_MASK; 1436 val &= ~PLLE_SS_COEFFICIENTS_MASK;
1431 val |= PLLE_SS_COEFFICIENTS_VAL; 1437 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
1432 pll_writel(val, PLLE_SS_CTRL, pll); 1438 pll_writel(val, PLLE_SS_CTRL, pll);
1433 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 1439 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
1434 pll_writel(val, PLLE_SS_CTRL, pll); 1440 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2012 struct tegra_clk_pll *pll = to_clk_pll(hw); 2018 struct tegra_clk_pll *pll = to_clk_pll(hw);
2013 struct tegra_clk_pll_freq_table sel; 2019 struct tegra_clk_pll_freq_table sel;
2014 u32 val; 2020 u32 val;
2015 int ret; 2021 int ret = 0;
2016 unsigned long flags = 0; 2022 unsigned long flags = 0;
2017 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 2023 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2018 2024
2019 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 2025 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
2020 return -EINVAL; 2026 return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2022 if (pll->lock) 2028 if (pll->lock)
2023 spin_lock_irqsave(pll->lock, flags); 2029 spin_lock_irqsave(pll->lock, flags);
2024 2030
2031 val = pll_readl(pll->params->aux_reg, pll);
2032 if (val & PLLE_AUX_SEQ_ENABLE)
2033 goto out;
2034
2025 val = pll_readl_base(pll); 2035 val = pll_readl_base(pll);
2026 val &= ~BIT(30); /* Disable lock override */ 2036 val &= ~BIT(30); /* Disable lock override */
2027 pll_writel_base(val, pll); 2037 pll_writel_base(val, pll);
2028 2038
2029 val = pll_readl(pll->params->aux_reg, pll);
2030 val |= PLLE_AUX_ENABLE_SWCTL;
2031 val &= ~PLLE_AUX_SEQ_ENABLE;
2032 pll_writel(val, pll->params->aux_reg, pll);
2033 udelay(1);
2034
2035 val = pll_readl_misc(pll); 2039 val = pll_readl_misc(pll);
2036 val |= PLLE_MISC_LOCK_ENABLE; 2040 val |= PLLE_MISC_LOCK_ENABLE;
2037 val |= PLLE_MISC_IDDQ_SW_CTRL; 2041 val |= PLLE_MISC_IDDQ_SW_CTRL;
2038 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 2042 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
2039 val |= PLLE_MISC_PLLE_PTS; 2043 val |= PLLE_MISC_PLLE_PTS;
2040 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 2044 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
2041 pll_writel_misc(val, pll); 2045 pll_writel_misc(val, pll);
2042 udelay(5); 2046 udelay(5);
2043 2047
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2067 val = pll_readl(PLLE_SS_CTRL, pll); 2071 val = pll_readl(PLLE_SS_CTRL, pll);
2068 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 2072 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
2069 val &= ~PLLE_SS_COEFFICIENTS_MASK; 2073 val &= ~PLLE_SS_COEFFICIENTS_MASK;
2070 val |= PLLE_SS_COEFFICIENTS_VAL; 2074 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
2071 pll_writel(val, PLLE_SS_CTRL, pll); 2075 pll_writel(val, PLLE_SS_CTRL, pll);
2072 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 2076 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
2073 pll_writel(val, PLLE_SS_CTRL, pll); 2077 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
2104 if (pll->lock) 2108 if (pll->lock)
2105 spin_lock_irqsave(pll->lock, flags); 2109 spin_lock_irqsave(pll->lock, flags);
2106 2110
2111 /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
2112 val = pll_readl(pll->params->aux_reg, pll);
2113 if (val & PLLE_AUX_SEQ_ENABLE)
2114 goto out;
2115
2107 val = pll_readl_base(pll); 2116 val = pll_readl_base(pll);
2108 val &= ~PLLE_BASE_ENABLE; 2117 val &= ~PLLE_BASE_ENABLE;
2109 pll_writel_base(val, pll); 2118 pll_writel_base(val, pll);
2110 2119
2120 val = pll_readl(pll->params->aux_reg, pll);
2121 val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
2122 pll_writel(val, pll->params->aux_reg, pll);
2123
2111 val = pll_readl_misc(pll); 2124 val = pll_readl_misc(pll);
2112 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; 2125 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
2113 pll_writel_misc(val, pll); 2126 pll_writel_misc(val, pll);
2114 udelay(1); 2127 udelay(1);
2115 2128
2129out:
2116 if (pll->lock) 2130 if (pll->lock)
2117 spin_unlock_irqrestore(pll->lock, flags); 2131 spin_unlock_irqrestore(pll->lock, flags);
2118} 2132}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a888a6..ea2b9cbf9e70 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), 773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), 774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), 775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
776 MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), 776 MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), 777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), 778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), 779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), 782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), 783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), 784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
785 MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), 785 I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), 786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), 787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), 788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), 829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), 830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), 831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
832 GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
832}; 833};
833 834
834static struct tegra_periph_init_data div_clks[] = { 835static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20e3af6..474de0f0c26d 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
67 "pll_p", "pll_p_out4", "unused", 67 "pll_p", "pll_p_out4", "unused",
68 "unused", "pll_x", "pll_x_out0" }; 68 "unused", "pll_x", "pll_x_out0" };
69 69
70const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { 70static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
71 .gen = gen4, 71 .gen = gen4,
72 .sclk_parents = sclk_parents, 72 .sclk_parents = sclk_parents,
73 .cclk_g_parents = cclk_g_parents, 73 .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
93 "unused", "unused", "unused", "unused", 93 "unused", "unused", "unused", "unused",
94 "dfllCPU_out" }; 94 "dfllCPU_out" };
95 95
96const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { 96static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
97 .gen = gen5, 97 .gen = gen5,
98 .sclk_parents = sclk_parents_gen5, 98 .sclk_parents = sclk_parents_gen5,
99 .cclk_g_parents = cclk_g_parents_gen5, 99 .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
171 *dt_clk = clk; 171 *dt_clk = clk;
172} 172}
173 173
174void __init tegra_super_clk_init(void __iomem *clk_base, 174static void __init tegra_super_clk_init(void __iomem *clk_base,
175 void __iomem *pmc_base, 175 void __iomem *pmc_base,
176 struct tegra_clk *tegra_clks, 176 struct tegra_clk *tegra_clks,
177 struct tegra_clk_pll_params *params, 177 struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c44ea83..637041fd53ad 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
59#define PLLC3_MISC3 0x50c 59#define PLLC3_MISC3 0x50c
60 60
61#define PLLM_BASE 0x90 61#define PLLM_BASE 0x90
62#define PLLM_MISC0 0x9c
63#define PLLM_MISC1 0x98 62#define PLLM_MISC1 0x98
63#define PLLM_MISC2 0x9c
64#define PLLP_BASE 0xa0 64#define PLLP_BASE 0xa0
65#define PLLP_MISC0 0xac 65#define PLLP_MISC0 0xac
66#define PLLP_MISC1 0x680 66#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
99#define PLLC4_MISC0 0x5a8 99#define PLLC4_MISC0 0x5a8
100#define PLLC4_OUT 0x5e4 100#define PLLC4_OUT 0x5e4
101#define PLLMB_BASE 0x5e8 101#define PLLMB_BASE 0x5e8
102#define PLLMB_MISC0 0x5ec 102#define PLLMB_MISC1 0x5ec
103#define PLLA1_BASE 0x6a4 103#define PLLA1_BASE 0x6a4
104#define PLLA1_MISC0 0x6a8 104#define PLLA1_MISC0 0x6a8
105#define PLLA1_MISC1 0x6ac 105#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
243}; 243};
244 244
245static const char *mux_pllmcp_clkm[] = { 245static const char *mux_pllmcp_clkm[] = {
246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
247 "pll_p",
247}; 248};
248#define mux_pllmcp_clkm_idx NULL 249#define mux_pllmcp_clkm_idx NULL
249 250
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
367/* PLLMB */ 368/* PLLMB */
368#define PLLMB_BASE_LOCK (1 << 27) 369#define PLLMB_BASE_LOCK (1 << 27)
369 370
370#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) 371#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
371#define PLLMB_MISC0_IDDQ (1 << 17) 372#define PLLMB_MISC1_IDDQ (1 << 17)
372#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) 373#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
373 374
374#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 375#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
375#define PLLMB_MISC0_WRITE_MASK 0x0007ffff 376#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
376 377
377/* PLLP */ 378/* PLLP */
378#define PLLP_BASE_OVERRIDE (1 << 28) 379#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
457 PLLCX_MISC3_WRITE_MASK); 458 PLLCX_MISC3_WRITE_MASK);
458} 459}
459 460
460void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) 461static void tegra210_pllcx_set_defaults(const char *name,
462 struct tegra_clk_pll *pllcx)
461{ 463{
462 pllcx->params->defaults_set = true; 464 pllcx->params->defaults_set = true;
463 465
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
482 udelay(1); 484 udelay(1);
483} 485}
484 486
485void _pllc_set_defaults(struct tegra_clk_pll *pllcx) 487static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
486{ 488{
487 tegra210_pllcx_set_defaults("PLL_C", pllcx); 489 tegra210_pllcx_set_defaults("PLL_C", pllcx);
488} 490}
489 491
490void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) 492static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
491{ 493{
492 tegra210_pllcx_set_defaults("PLL_C2", pllcx); 494 tegra210_pllcx_set_defaults("PLL_C2", pllcx);
493} 495}
494 496
495void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) 497static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
496{ 498{
497 tegra210_pllcx_set_defaults("PLL_C3", pllcx); 499 tegra210_pllcx_set_defaults("PLL_C3", pllcx);
498} 500}
499 501
500void _plla1_set_defaults(struct tegra_clk_pll *pllcx) 502static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
501{ 503{
502 tegra210_pllcx_set_defaults("PLL_A1", pllcx); 504 tegra210_pllcx_set_defaults("PLL_A1", pllcx);
503} 505}
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
507 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. 509 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
508 * Fractional SDM is allowed to provide exact audio rates. 510 * Fractional SDM is allowed to provide exact audio rates.
509 */ 511 */
510void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) 512static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
511{ 513{
512 u32 mask; 514 u32 mask;
513 u32 val = readl_relaxed(clk_base + plla->params->base_reg); 515 u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
559 * PLLD 561 * PLLD
560 * PLL with fractional SDM. 562 * PLL with fractional SDM.
561 */ 563 */
562void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) 564static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
563{ 565{
564 u32 val; 566 u32 val;
565 u32 mask = 0xffff; 567 u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
698 udelay(1); 700 udelay(1);
699} 701}
700 702
701void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) 703static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
702{ 704{
703 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, 705 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
704 PLLD2_MISC1_CFG_DEFAULT_VALUE, 706 PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
706 PLLD2_MISC3_CTRL2_DEFAULT_VALUE); 708 PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
707} 709}
708 710
709void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) 711static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
710{ 712{
711 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, 713 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
712 PLLDP_MISC1_CFG_DEFAULT_VALUE, 714 PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
719 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. 721 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
720 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. 722 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
721 */ 723 */
722void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) 724static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
723{ 725{
724 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); 726 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
725} 727}
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
728 * PLLRE 730 * PLLRE
729 * VCO is exposed to the clock tree directly along with post-divider output 731 * VCO is exposed to the clock tree directly along with post-divider output
730 */ 732 */
731void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) 733static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
732{ 734{
733 u32 mask; 735 u32 mask;
734 u32 val = readl_relaxed(clk_base + pllre->params->base_reg); 736 u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
780{ 782{
781 unsigned long input_rate; 783 unsigned long input_rate;
782 784
783 if (!IS_ERR_OR_NULL(hw->clk)) { 785 /* cf rate */
786 if (!IS_ERR_OR_NULL(hw->clk))
784 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 787 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
785 /* cf rate */ 788 else
786 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
787 } else {
788 input_rate = 38400000; 789 input_rate = 38400000;
789 } 790
791 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
790 792
791 switch (input_rate) { 793 switch (input_rate) {
792 case 12000000: 794 case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
841 PLLX_MISC5_WRITE_MASK); 843 PLLX_MISC5_WRITE_MASK);
842} 844}
843 845
844void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) 846static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
845{ 847{
846 u32 val; 848 u32 val;
847 u32 step_a, step_b; 849 u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
901} 903}
902 904
903/* PLLMB */ 905/* PLLMB */
904void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) 906static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
905{ 907{
906 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); 908 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
907 909
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
914 * PLL is ON: check if defaults already set, then set those 916 * PLL is ON: check if defaults already set, then set those
915 * that can be updated in flight. 917 * that can be updated in flight.
916 */ 918 */
917 val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); 919 val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
918 mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; 920 mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
919 _pll_misc_chk_default(clk_base, pllmb->params, 0, val, 921 _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
920 ~mask & PLLMB_MISC0_WRITE_MASK); 922 ~mask & PLLMB_MISC1_WRITE_MASK);
921 923
922 /* Enable lock detect */ 924 /* Enable lock detect */
923 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); 925 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
924 val &= ~mask; 926 val &= ~mask;
925 val |= PLLMB_MISC0_DEFAULT_VALUE & mask; 927 val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
926 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); 928 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
927 udelay(1); 929 udelay(1);
928 930
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
930 } 932 }
931 933
932 /* set IDDQ, enable lock detect */ 934 /* set IDDQ, enable lock detect */
933 writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, 935 writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
934 clk_base + pllmb->params->ext_misc_reg[0]); 936 clk_base + pllmb->params->ext_misc_reg[0]);
935 udelay(1); 937 udelay(1);
936} 938}
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
960 ~mask & PLLP_MISC1_WRITE_MASK); 962 ~mask & PLLP_MISC1_WRITE_MASK);
961} 963}
962 964
963void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) 965static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
964{ 966{
965 u32 mask; 967 u32 mask;
966 u32 val = readl_relaxed(clk_base + pllp->params->base_reg); 968 u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
1022 ~mask & PLLU_MISC1_WRITE_MASK); 1024 ~mask & PLLU_MISC1_WRITE_MASK);
1023} 1025}
1024 1026
1025void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) 1027static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
1026{ 1028{
1027 u32 val = readl_relaxed(clk_base + pllu->params->base_reg); 1029 u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
1028 1030
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
1212 cfg->m *= PLL_SDM_COEFF; 1214 cfg->m *= PLL_SDM_COEFF;
1213} 1215}
1214 1216
1215unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, 1217static unsigned long
1216 unsigned long parent_rate) 1218tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
1219 unsigned long parent_rate)
1217{ 1220{
1218 unsigned long vco_min = params->vco_min; 1221 unsigned long vco_min = params->vco_min;
1219 1222
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
1386 .mdiv_default = 3, 1389 .mdiv_default = 3,
1387 .div_nmp = &pllc_nmp, 1390 .div_nmp = &pllc_nmp,
1388 .freq_table = pll_cx_freq_table, 1391 .freq_table = pll_cx_freq_table,
1389 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1392 .flags = TEGRA_PLL_USE_LOCK,
1390 .set_defaults = _pllc_set_defaults, 1393 .set_defaults = _pllc_set_defaults,
1391 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1394 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1392}; 1395};
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
1425 .ext_misc_reg[2] = PLLC2_MISC2, 1428 .ext_misc_reg[2] = PLLC2_MISC2,
1426 .ext_misc_reg[3] = PLLC2_MISC3, 1429 .ext_misc_reg[3] = PLLC2_MISC3,
1427 .freq_table = pll_cx_freq_table, 1430 .freq_table = pll_cx_freq_table,
1428 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1431 .flags = TEGRA_PLL_USE_LOCK,
1429 .set_defaults = _pllc2_set_defaults, 1432 .set_defaults = _pllc2_set_defaults,
1430 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1433 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1431}; 1434};
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
1455 .ext_misc_reg[2] = PLLC3_MISC2, 1458 .ext_misc_reg[2] = PLLC3_MISC2,
1456 .ext_misc_reg[3] = PLLC3_MISC3, 1459 .ext_misc_reg[3] = PLLC3_MISC3,
1457 .freq_table = pll_cx_freq_table, 1460 .freq_table = pll_cx_freq_table,
1458 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1461 .flags = TEGRA_PLL_USE_LOCK,
1459 .set_defaults = _pllc3_set_defaults, 1462 .set_defaults = _pllc3_set_defaults,
1460 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1463 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1461}; 1464};
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1505 .base_reg = PLLC4_BASE, 1508 .base_reg = PLLC4_BASE,
1506 .misc_reg = PLLC4_MISC0, 1509 .misc_reg = PLLC4_MISC0,
1507 .lock_mask = PLL_BASE_LOCK, 1510 .lock_mask = PLL_BASE_LOCK,
1508 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1509 .lock_delay = 300, 1511 .lock_delay = 300,
1510 .max_p = PLL_QLIN_PDIV_MAX, 1512 .max_p = PLL_QLIN_PDIV_MAX,
1511 .ext_misc_reg[0] = PLLC4_MISC0, 1513 .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1517 .div_nmp = &pllss_nmp, 1519 .div_nmp = &pllss_nmp,
1518 .freq_table = pll_c4_vco_freq_table, 1520 .freq_table = pll_c4_vco_freq_table,
1519 .set_defaults = tegra210_pllc4_set_defaults, 1521 .set_defaults = tegra210_pllc4_set_defaults,
1520 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1522 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1521 TEGRA_PLL_VCO_OUT,
1522 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1523 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1523}; 1524};
1524 1525
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
1559 .vco_min = 800000000, 1560 .vco_min = 800000000,
1560 .vco_max = 1866000000, 1561 .vco_max = 1866000000,
1561 .base_reg = PLLM_BASE, 1562 .base_reg = PLLM_BASE,
1562 .misc_reg = PLLM_MISC1, 1563 .misc_reg = PLLM_MISC2,
1563 .lock_mask = PLL_BASE_LOCK, 1564 .lock_mask = PLL_BASE_LOCK,
1564 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, 1565 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
1565 .lock_delay = 300, 1566 .lock_delay = 300,
1566 .iddq_reg = PLLM_MISC0, 1567 .iddq_reg = PLLM_MISC2,
1567 .iddq_bit_idx = PLLM_IDDQ_BIT, 1568 .iddq_bit_idx = PLLM_IDDQ_BIT,
1568 .max_p = PLL_QLIN_PDIV_MAX, 1569 .max_p = PLL_QLIN_PDIV_MAX,
1569 .ext_misc_reg[0] = PLLM_MISC0, 1570 .ext_misc_reg[0] = PLLM_MISC2,
1570 .ext_misc_reg[0] = PLLM_MISC1, 1571 .ext_misc_reg[1] = PLLM_MISC1,
1571 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1572 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1572 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1573 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1573 .div_nmp = &pllm_nmp, 1574 .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
1586 .vco_min = 800000000, 1587 .vco_min = 800000000,
1587 .vco_max = 1866000000, 1588 .vco_max = 1866000000,
1588 .base_reg = PLLMB_BASE, 1589 .base_reg = PLLMB_BASE,
1589 .misc_reg = PLLMB_MISC0, 1590 .misc_reg = PLLMB_MISC1,
1590 .lock_mask = PLL_BASE_LOCK, 1591 .lock_mask = PLL_BASE_LOCK,
1591 .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
1592 .lock_delay = 300, 1592 .lock_delay = 300,
1593 .iddq_reg = PLLMB_MISC0, 1593 .iddq_reg = PLLMB_MISC1,
1594 .iddq_bit_idx = PLLMB_IDDQ_BIT, 1594 .iddq_bit_idx = PLLMB_IDDQ_BIT,
1595 .max_p = PLL_QLIN_PDIV_MAX, 1595 .max_p = PLL_QLIN_PDIV_MAX,
1596 .ext_misc_reg[0] = PLLMB_MISC0, 1596 .ext_misc_reg[0] = PLLMB_MISC1,
1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1598 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1598 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1599 .div_nmp = &pllm_nmp, 1599 .div_nmp = &pllm_nmp,
1600 .freq_table = pll_m_freq_table, 1600 .freq_table = pll_m_freq_table,
1601 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1601 .flags = TEGRA_PLL_USE_LOCK,
1602 .set_defaults = tegra210_pllmb_set_defaults, 1602 .set_defaults = tegra210_pllmb_set_defaults,
1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1604}; 1604};
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1671 .base_reg = PLLRE_BASE, 1671 .base_reg = PLLRE_BASE,
1672 .misc_reg = PLLRE_MISC0, 1672 .misc_reg = PLLRE_MISC0,
1673 .lock_mask = PLLRE_MISC_LOCK, 1673 .lock_mask = PLLRE_MISC_LOCK,
1674 .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
1675 .lock_delay = 300, 1674 .lock_delay = 300,
1676 .max_p = PLL_QLIN_PDIV_MAX, 1675 .max_p = PLL_QLIN_PDIV_MAX,
1677 .ext_misc_reg[0] = PLLRE_MISC0, 1676 .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1681 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1680 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1682 .div_nmp = &pllre_nmp, 1681 .div_nmp = &pllre_nmp,
1683 .freq_table = pll_re_vco_freq_table, 1682 .freq_table = pll_re_vco_freq_table,
1684 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | 1683 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
1685 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1686 .set_defaults = tegra210_pllre_set_defaults, 1684 .set_defaults = tegra210_pllre_set_defaults,
1687 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1685 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1688}; 1686};
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
1712 .base_reg = PLLP_BASE, 1710 .base_reg = PLLP_BASE,
1713 .misc_reg = PLLP_MISC0, 1711 .misc_reg = PLLP_MISC0,
1714 .lock_mask = PLL_BASE_LOCK, 1712 .lock_mask = PLL_BASE_LOCK,
1715 .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
1716 .lock_delay = 300, 1713 .lock_delay = 300,
1717 .iddq_reg = PLLP_MISC0, 1714 .iddq_reg = PLLP_MISC0,
1718 .iddq_bit_idx = PLLXP_IDDQ_BIT, 1715 .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
1721 .div_nmp = &pllp_nmp, 1718 .div_nmp = &pllp_nmp,
1722 .freq_table = pll_p_freq_table, 1719 .freq_table = pll_p_freq_table,
1723 .fixed_rate = 408000000, 1720 .fixed_rate = 408000000,
1724 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | 1721 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1725 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1726 .set_defaults = tegra210_pllp_set_defaults, 1722 .set_defaults = tegra210_pllp_set_defaults,
1727 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1723 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1728}; 1724};
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
1750 .ext_misc_reg[2] = PLLA1_MISC2, 1746 .ext_misc_reg[2] = PLLA1_MISC2,
1751 .ext_misc_reg[3] = PLLA1_MISC3, 1747 .ext_misc_reg[3] = PLLA1_MISC3,
1752 .freq_table = pll_cx_freq_table, 1748 .freq_table = pll_cx_freq_table,
1753 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1749 .flags = TEGRA_PLL_USE_LOCK,
1754 .set_defaults = _plla1_set_defaults, 1750 .set_defaults = _plla1_set_defaults,
1755 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1751 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1756}; 1752};
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
1787 .base_reg = PLLA_BASE, 1783 .base_reg = PLLA_BASE,
1788 .misc_reg = PLLA_MISC0, 1784 .misc_reg = PLLA_MISC0,
1789 .lock_mask = PLL_BASE_LOCK, 1785 .lock_mask = PLL_BASE_LOCK,
1790 .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
1791 .lock_delay = 300, 1786 .lock_delay = 300,
1792 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1787 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1793 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1788 .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
1802 .ext_misc_reg[1] = PLLA_MISC1, 1797 .ext_misc_reg[1] = PLLA_MISC1,
1803 .ext_misc_reg[2] = PLLA_MISC2, 1798 .ext_misc_reg[2] = PLLA_MISC2,
1804 .freq_table = pll_a_freq_table, 1799 .freq_table = pll_a_freq_table,
1805 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | 1800 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
1806 TEGRA_PLL_HAS_LOCK_ENABLE,
1807 .set_defaults = tegra210_plla_set_defaults, 1801 .set_defaults = tegra210_plla_set_defaults,
1808 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1802 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1809 .set_gain = tegra210_clk_pll_set_gain, 1803 .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
1836 .base_reg = PLLD_BASE, 1830 .base_reg = PLLD_BASE,
1837 .misc_reg = PLLD_MISC0, 1831 .misc_reg = PLLD_MISC0,
1838 .lock_mask = PLL_BASE_LOCK, 1832 .lock_mask = PLL_BASE_LOCK,
1839 .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
1840 .lock_delay = 1000, 1833 .lock_delay = 1000,
1841 .iddq_reg = PLLD_MISC0, 1834 .iddq_reg = PLLD_MISC0,
1842 .iddq_bit_idx = PLLD_IDDQ_BIT, 1835 .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
1850 .ext_misc_reg[0] = PLLD_MISC0, 1843 .ext_misc_reg[0] = PLLD_MISC0,
1851 .ext_misc_reg[1] = PLLD_MISC1, 1844 .ext_misc_reg[1] = PLLD_MISC1,
1852 .freq_table = pll_d_freq_table, 1845 .freq_table = pll_d_freq_table,
1853 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1846 .flags = TEGRA_PLL_USE_LOCK,
1854 .mdiv_default = 1, 1847 .mdiv_default = 1,
1855 .set_defaults = tegra210_plld_set_defaults, 1848 .set_defaults = tegra210_plld_set_defaults,
1856 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1849 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
1876 .base_reg = PLLD2_BASE, 1869 .base_reg = PLLD2_BASE,
1877 .misc_reg = PLLD2_MISC0, 1870 .misc_reg = PLLD2_MISC0,
1878 .lock_mask = PLL_BASE_LOCK, 1871 .lock_mask = PLL_BASE_LOCK,
1879 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1880 .lock_delay = 300, 1872 .lock_delay = 300,
1881 .iddq_reg = PLLD2_BASE, 1873 .iddq_reg = PLLD2_BASE,
1882 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1874 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
1897 .mdiv_default = 1, 1889 .mdiv_default = 1,
1898 .freq_table = tegra210_pll_d2_freq_table, 1890 .freq_table = tegra210_pll_d2_freq_table,
1899 .set_defaults = tegra210_plld2_set_defaults, 1891 .set_defaults = tegra210_plld2_set_defaults,
1900 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1892 .flags = TEGRA_PLL_USE_LOCK,
1901 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1893 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1902 .set_gain = tegra210_clk_pll_set_gain, 1894 .set_gain = tegra210_clk_pll_set_gain,
1903 .adjust_vco = tegra210_clk_adjust_vco_min, 1895 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
1920 .base_reg = PLLDP_BASE, 1912 .base_reg = PLLDP_BASE,
1921 .misc_reg = PLLDP_MISC, 1913 .misc_reg = PLLDP_MISC,
1922 .lock_mask = PLL_BASE_LOCK, 1914 .lock_mask = PLL_BASE_LOCK,
1923 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1924 .lock_delay = 300, 1915 .lock_delay = 300,
1925 .iddq_reg = PLLDP_BASE, 1916 .iddq_reg = PLLDP_BASE,
1926 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1917 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
1941 .mdiv_default = 1, 1932 .mdiv_default = 1,
1942 .freq_table = pll_dp_freq_table, 1933 .freq_table = pll_dp_freq_table,
1943 .set_defaults = tegra210_plldp_set_defaults, 1934 .set_defaults = tegra210_plldp_set_defaults,
1944 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1935 .flags = TEGRA_PLL_USE_LOCK,
1945 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1936 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1946 .set_gain = tegra210_clk_pll_set_gain, 1937 .set_gain = tegra210_clk_pll_set_gain,
1947 .adjust_vco = tegra210_clk_adjust_vco_min, 1938 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1973 .base_reg = PLLU_BASE, 1964 .base_reg = PLLU_BASE,
1974 .misc_reg = PLLU_MISC0, 1965 .misc_reg = PLLU_MISC0,
1975 .lock_mask = PLL_BASE_LOCK, 1966 .lock_mask = PLL_BASE_LOCK,
1976 .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
1977 .lock_delay = 1000, 1967 .lock_delay = 1000,
1978 .iddq_reg = PLLU_MISC0, 1968 .iddq_reg = PLLU_MISC0,
1979 .iddq_bit_idx = PLLU_IDDQ_BIT, 1969 .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1983 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1973 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1984 .div_nmp = &pllu_nmp, 1974 .div_nmp = &pllu_nmp,
1985 .freq_table = pll_u_freq_table, 1975 .freq_table = pll_u_freq_table,
1986 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1976 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1987 TEGRA_PLL_VCO_OUT,
1988 .set_defaults = tegra210_pllu_set_defaults, 1977 .set_defaults = tegra210_pllu_set_defaults,
1989 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1978 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1990}; 1979};
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
2218 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, 2207 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
2219 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, 2208 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
2220 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, 2209 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
2210 [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
2221}; 2211};
2222 2212
2223static struct tegra_devclk devclks[] __initdata = { 2213static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
2519 2509
2520 /* PLLU_VCO */ 2510 /* PLLU_VCO */
2521 val = readl(clk_base + pll_u_vco_params.base_reg); 2511 val = readl(clk_base + pll_u_vco_params.base_reg);
2522 val &= ~BIT(24); /* disable PLLU_OVERRIDE */ 2512 val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
2523 writel(val, clk_base + pll_u_vco_params.base_reg); 2513 writel(val, clk_base + pll_u_vco_params.base_reg);
2524 2514
2525 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, 2515 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2738 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, 2728 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
2739 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, 2729 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
2740 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, 2730 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
2741 { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
2742 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2743 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, 2731 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
2744 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, 2732 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
2745 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, 2733 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
460 460
461 parent = clk_hw_get_parent(hw); 461 parent = clk_hw_get_parent(hw);
462 462
463 if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { 463 if (clk_hw_get_rate(hw) ==
464 clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
464 WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); 465 WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
465 r = _omap3_noncore_dpll_bypass(clk); 466 r = _omap3_noncore_dpll_bypass(clk);
466 } else { 467 } else {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb2c9b5..3bca438ecd19 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
78 ret = regmap_read(icst->map, icst->vcoreg_off, &val); 78 ret = regmap_read(icst->map, icst->vcoreg_off, &val);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81
82 /* Mask the 18 bits used by the VCO */
83 val &= ~0x7ffff;
81 val |= vco.v | (vco.r << 9) | (vco.s << 16); 84 val |= vco.v | (vco.r << 9) | (vco.s << 16);
82 85
83 /* This magic unlocks the VCO so it can be controlled */ 86 /* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 20de861aa0ea..8bf9914d4d15 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); 783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
784 784
785 clk_disable_unprepare(dd->iclk); 785 clk_disable(dd->iclk);
786 786
787 if (req->base.complete) 787 if (req->base.complete)
788 req->base.complete(&req->base, err); 788 req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
795{ 795{
796 int err; 796 int err;
797 797
798 err = clk_prepare_enable(dd->iclk); 798 err = clk_enable(dd->iclk);
799 if (err) 799 if (err)
800 return err; 800 return err;
801 801
@@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
822 dev_info(dd->dev, 822 dev_info(dd->dev,
823 "version: 0x%x\n", dd->hw_version); 823 "version: 0x%x\n", dd->hw_version);
824 824
825 clk_disable_unprepare(dd->iclk); 825 clk_disable(dd->iclk);
826} 826}
827 827
828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
1410 goto res_err; 1410 goto res_err;
1411 } 1411 }
1412 1412
1413 err = clk_prepare(sha_dd->iclk);
1414 if (err)
1415 goto res_err;
1416
1413 atmel_sha_hw_version_init(sha_dd); 1417 atmel_sha_hw_version_init(sha_dd);
1414 1418
1415 atmel_sha_get_cap(sha_dd); 1419 atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
1421 if (IS_ERR(pdata)) { 1425 if (IS_ERR(pdata)) {
1422 dev_err(&pdev->dev, "platform data not available\n"); 1426 dev_err(&pdev->dev, "platform data not available\n");
1423 err = PTR_ERR(pdata); 1427 err = PTR_ERR(pdata);
1424 goto res_err; 1428 goto iclk_unprepare;
1425 } 1429 }
1426 } 1430 }
1427 if (!pdata->dma_slave) { 1431 if (!pdata->dma_slave) {
1428 err = -ENXIO; 1432 err = -ENXIO;
1429 goto res_err; 1433 goto iclk_unprepare;
1430 } 1434 }
1431 err = atmel_sha_dma_init(sha_dd, pdata); 1435 err = atmel_sha_dma_init(sha_dd, pdata);
1432 if (err) 1436 if (err)
@@ -1457,6 +1461,8 @@ err_algs:
1457 if (sha_dd->caps.has_dma) 1461 if (sha_dd->caps.has_dma)
1458 atmel_sha_dma_cleanup(sha_dd); 1462 atmel_sha_dma_cleanup(sha_dd);
1459err_sha_dma: 1463err_sha_dma:
1464iclk_unprepare:
1465 clk_unprepare(sha_dd->iclk);
1460res_err: 1466res_err:
1461 tasklet_kill(&sha_dd->done_task); 1467 tasklet_kill(&sha_dd->done_task);
1462sha_dd_err: 1468sha_dd_err:
@@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
1483 if (sha_dd->caps.has_dma) 1489 if (sha_dd->caps.has_dma)
1484 atmel_sha_dma_cleanup(sha_dd); 1490 atmel_sha_dma_cleanup(sha_dd);
1485 1491
1486 iounmap(sha_dd->io_base); 1492 clk_unprepare(sha_dd->iclk);
1487
1488 clk_put(sha_dd->iclk);
1489
1490 if (sha_dd->irq >= 0)
1491 free_irq(sha_dd->irq, sha_dd);
1492 1493
1493 return 0; 1494 return 0;
1494} 1495}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e3366e33..c0656e7f37b5 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
306 return -ENOMEM; 306 return -ENOMEM;
307 307
308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); 308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
309 if (!dma->cache_pool) 309 if (!dma->padding_pool)
310 return -ENOMEM; 310 return -ENOMEM;
311 311
312 cesa->dma = dma; 312 cesa->dma = dma;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
500 clk_set_min_rate(tegra->emc_clock, rate); 500 clk_set_min_rate(tegra->emc_clock, rate);
501 clk_set_rate(tegra->emc_clock, 0); 501 clk_set_rate(tegra->emc_clock, 0);
502 502
503 *freq = rate;
504
503 return 0; 505 return 0;
504} 506}
505 507
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8c4cf8..10e6774ab2a2 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
221 } 221 }
222 222
223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
224 efivar_validate(name, data, size) == false) { 224 efivar_validate(vendor, name, data, size) == false) {
225 printk(KERN_ERR "efivars: Malformed variable content\n"); 225 printk(KERN_ERR "efivars: Malformed variable content\n");
226 return -EINVAL; 226 return -EINVAL;
227 } 227 }
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
447 } 447 }
448 448
449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
450 efivar_validate(name, data, size) == false) { 450 efivar_validate(new_var->VendorGuid, name, data,
451 size) == false) {
451 printk(KERN_ERR "efivars: Malformed variable content\n"); 452 printk(KERN_ERR "efivars: Malformed variable content\n");
452 return -EINVAL; 453 return -EINVAL;
453 } 454 }
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
540static int 541static int
541efivar_create_sysfs_entry(struct efivar_entry *new_var) 542efivar_create_sysfs_entry(struct efivar_entry *new_var)
542{ 543{
543 int i, short_name_size; 544 int short_name_size;
544 char *short_name; 545 char *short_name;
545 unsigned long variable_name_size; 546 unsigned long utf8_name_size;
546 efi_char16_t *variable_name; 547 efi_char16_t *variable_name = new_var->var.VariableName;
547 int ret; 548 int ret;
548 549
549 variable_name = new_var->var.VariableName;
550 variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
551
552 /* 550 /*
553 * Length of the variable bytes in ASCII, plus the '-' separator, 551 * Length of the variable bytes in UTF8, plus the '-' separator,
554 * plus the GUID, plus trailing NUL 552 * plus the GUID, plus trailing NUL
555 */ 553 */
556 short_name_size = variable_name_size / sizeof(efi_char16_t) 554 utf8_name_size = ucs2_utf8size(variable_name);
557 + 1 + EFI_VARIABLE_GUID_LEN + 1; 555 short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
558
559 short_name = kzalloc(short_name_size, GFP_KERNEL);
560 556
557 short_name = kmalloc(short_name_size, GFP_KERNEL);
561 if (!short_name) 558 if (!short_name)
562 return -ENOMEM; 559 return -ENOMEM;
563 560
564 /* Convert Unicode to normal chars (assume top bits are 0), 561 ucs2_as_utf8(short_name, variable_name, short_name_size);
565 ala UTF-8 */ 562
566 for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
567 short_name[i] = variable_name[i] & 0xFF;
568 }
569 /* This is ugly, but necessary to separate one vendor's 563 /* This is ugly, but necessary to separate one vendor's
570 private variables from another's. */ 564 private variables from another's. */
571 565 short_name[utf8_name_size] = '-';
572 *(short_name + strlen(short_name)) = '-';
573 efi_guid_to_str(&new_var->var.VendorGuid, 566 efi_guid_to_str(&new_var->var.VendorGuid,
574 short_name + strlen(short_name)); 567 short_name + utf8_name_size + 1);
575 568
576 new_var->kobj.kset = efivars_kset; 569 new_var->kobj.kset = efivars_kset;
577 570
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb10517f..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
165} 165}
166 166
167struct variable_validate { 167struct variable_validate {
168 efi_guid_t vendor;
168 char *name; 169 char *name;
169 bool (*validate)(efi_char16_t *var_name, int match, u8 *data, 170 bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
170 unsigned long len); 171 unsigned long len);
171}; 172};
172 173
174/*
175 * This is the list of variables we need to validate, as well as the
176 * whitelist for what we think is safe not to default to immutable.
177 *
178 * If it has a validate() method that's not NULL, it'll go into the
179 * validation routine. If not, it is assumed valid, but still used for
180 * whitelisting.
181 *
182 * Note that it's sorted by {vendor,name}, but globbed names must come after
183 * any other name with the same prefix.
184 */
173static const struct variable_validate variable_validate[] = { 185static const struct variable_validate variable_validate[] = {
174 { "BootNext", validate_uint16 }, 186 { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
175 { "BootOrder", validate_boot_order }, 187 { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
176 { "DriverOrder", validate_boot_order }, 188 { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
177 { "Boot*", validate_load_option }, 189 { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
178 { "Driver*", validate_load_option }, 190 { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
179 { "ConIn", validate_device_path }, 191 { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
180 { "ConInDev", validate_device_path }, 192 { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
181 { "ConOut", validate_device_path }, 193 { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
182 { "ConOutDev", validate_device_path }, 194 { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
183 { "ErrOut", validate_device_path }, 195 { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
184 { "ErrOutDev", validate_device_path }, 196 { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
185 { "Timeout", validate_uint16 }, 197 { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
186 { "Lang", validate_ascii_string }, 198 { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
187 { "PlatformLang", validate_ascii_string }, 199 { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
188 { "", NULL }, 200 { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
201 { LINUX_EFI_CRASH_GUID, "*", NULL },
202 { NULL_GUID, "", NULL },
189}; 203};
190 204
205static bool
206variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match)
208{
209 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match];
211 char u = var_name[*match];
212
213 /* Wildcard in the matching name means we've matched */
214 if (c == '*')
215 return true;
216
217 /* Case sensitive match */
218 if (!c && *match == len)
219 return true;
220
221 if (c != u)
222 return false;
223
224 if (!c)
225 return true;
226 }
227 return true;
228}
229
191bool 230bool
192efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) 231efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
232 unsigned long data_size)
193{ 233{
194 int i; 234 int i;
195 u16 *unicode_name = var_name; 235 unsigned long utf8_size;
236 u8 *utf8_name;
196 237
197 for (i = 0; variable_validate[i].validate != NULL; i++) { 238 utf8_size = ucs2_utf8size(var_name);
198 const char *name = variable_validate[i].name; 239 utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
199 int match; 240 if (!utf8_name)
241 return false;
200 242
201 for (match = 0; ; match++) { 243 ucs2_as_utf8(utf8_name, var_name, utf8_size);
202 char c = name[match]; 244 utf8_name[utf8_size] = '\0';
203 u16 u = unicode_name[match];
204 245
205 /* All special variables are plain ascii */ 246 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
206 if (u > 127) 247 const char *name = variable_validate[i].name;
207 return true; 248 int match = 0;
208 249
209 /* Wildcard in the matching name means we've matched */ 250 if (efi_guidcmp(vendor, variable_validate[i].vendor))
210 if (c == '*') 251 continue;
211 return variable_validate[i].validate(var_name,
212 match, data, len);
213 252
214 /* Case sensitive match */ 253 if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
215 if (c != u) 254 if (variable_validate[i].validate == NULL)
216 break; 255 break;
217 256 kfree(utf8_name);
218 /* Reached the end of the string while matching */ 257 return variable_validate[i].validate(var_name, match,
219 if (!c) 258 data, data_size);
220 return variable_validate[i].validate(var_name,
221 match, data, len);
222 } 259 }
223 } 260 }
224 261 kfree(utf8_name);
225 return true; 262 return true;
226} 263}
227EXPORT_SYMBOL_GPL(efivar_validate); 264EXPORT_SYMBOL_GPL(efivar_validate);
228 265
266bool
267efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
268 size_t len)
269{
270 int i;
271 bool found = false;
272 int match = 0;
273
274 /*
275 * Check if our variable is in the validated variables list
276 */
277 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
278 if (efi_guidcmp(variable_validate[i].vendor, vendor))
279 continue;
280
281 if (variable_matches(var_name, len,
282 variable_validate[i].name, &match)) {
283 found = true;
284 break;
285 }
286 }
287
288 /*
289 * If it's in our list, it is removable.
290 */
291 return found;
292}
293EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
294
229static efi_status_t 295static efi_status_t
230check_var_size(u32 attributes, unsigned long size) 296check_var_size(u32 attributes, unsigned long size)
231{ 297{
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
852 918
853 *set = false; 919 *set = false;
854 920
855 if (efivar_validate(name, data, *size) == false) 921 if (efivar_validate(*vendor, name, data, *size) == false)
856 return -EINVAL; 922 return -EINVAL;
857 923
858 /* 924 /*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 2aeaebd1c6e7..3f87a03abc22 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
312 handle_simple_irq, IRQ_TYPE_NONE); 312 handle_simple_irq, IRQ_TYPE_NONE);
313 313
314 if (ret) { 314 if (ret) {
315 dev_info(&pdev->dev, "could not add irqchip\n"); 315 dev_err(&pdev->dev, "could not add irqchip\n");
316 return ret; 316 goto teardown;
317 } 317 }
318 318
319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, 319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
326skip_irq: 326skip_irq:
327 return 0; 327 return 0;
328teardown: 328teardown:
329 of_mm_gpiochip_remove(&altera_gc->mmchip);
329 pr_err("%s: registration failed with status %d\n", 330 pr_err("%s: registration failed with status %d\n",
330 node->full_name, ret); 331 node->full_name, ret);
331 332
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ec58f4288649..cd007a67b302 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
195static int davinci_gpio_probe(struct platform_device *pdev) 195static int davinci_gpio_probe(struct platform_device *pdev)
196{ 196{
197 int i, base; 197 int i, base;
198 unsigned ngpio; 198 unsigned ngpio, nbank;
199 struct davinci_gpio_controller *chips; 199 struct davinci_gpio_controller *chips;
200 struct davinci_gpio_platform_data *pdata; 200 struct davinci_gpio_platform_data *pdata;
201 struct davinci_gpio_regs __iomem *regs; 201 struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
224 if (WARN_ON(ARCH_NR_GPIOS < ngpio)) 224 if (WARN_ON(ARCH_NR_GPIOS < ngpio))
225 ngpio = ARCH_NR_GPIOS; 225 ngpio = ARCH_NR_GPIOS;
226 226
227 nbank = DIV_ROUND_UP(ngpio, 32);
227 chips = devm_kzalloc(dev, 228 chips = devm_kzalloc(dev,
228 ngpio * sizeof(struct davinci_gpio_controller), 229 nbank * sizeof(struct davinci_gpio_controller),
229 GFP_KERNEL); 230 GFP_KERNEL);
230 if (!chips) 231 if (!chips)
231 return -ENOMEM; 232 return -ENOMEM;
@@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
511 return irq; 512 return irq;
512 } 513 }
513 514
514 irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, 515 irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
515 &davinci_gpio_irq_ops, 516 &davinci_gpio_irq_ops,
516 chips); 517 chips);
517 if (!irq_domain) { 518 if (!irq_domain) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 82edf95b7740..5e7770f9a415 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
87extern int amdgpu_sched_hw_submission; 87extern int amdgpu_sched_hw_submission;
88extern int amdgpu_enable_semaphores; 88extern int amdgpu_enable_semaphores;
89extern int amdgpu_powerplay; 89extern int amdgpu_powerplay;
90extern unsigned amdgpu_pcie_gen_cap;
91extern unsigned amdgpu_pcie_lane_cap;
90 92
91#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 93#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
92#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 94#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
132#define AMDGPU_RESET_VCE (1 << 13) 134#define AMDGPU_RESET_VCE (1 << 13)
133#define AMDGPU_RESET_VCE1 (1 << 14) 135#define AMDGPU_RESET_VCE1 (1 << 14)
134 136
135/* CG block flags */
136#define AMDGPU_CG_BLOCK_GFX (1 << 0)
137#define AMDGPU_CG_BLOCK_MC (1 << 1)
138#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
139#define AMDGPU_CG_BLOCK_UVD (1 << 3)
140#define AMDGPU_CG_BLOCK_VCE (1 << 4)
141#define AMDGPU_CG_BLOCK_HDP (1 << 5)
142#define AMDGPU_CG_BLOCK_BIF (1 << 6)
143
144/* CG flags */
145#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
146#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
147#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
148#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
149#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
150#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
151#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
152#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
153#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
154#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
155#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
156#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
157#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
158#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
159#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
160#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
161#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
162
163/* PG flags */
164#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
165#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
166#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
167#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
168#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
169#define AMDGPU_PG_SUPPORT_CP (1 << 5)
170#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
171#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
172#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
173#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
174#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
175
176/* GFX current status */ 137/* GFX current status */
177#define AMDGPU_GFX_NORMAL_MODE 0x00000000L 138#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
178#define AMDGPU_GFX_SAFE_MODE 0x00000001L 139#define AMDGPU_GFX_SAFE_MODE 0x00000001L
@@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
606 uint32_t align; 567 uint32_t align;
607}; 568};
608 569
609struct amdgpu_sa_bo;
610
611/* sub-allocation buffer */ 570/* sub-allocation buffer */
612struct amdgpu_sa_bo { 571struct amdgpu_sa_bo {
613 struct list_head olist; 572 struct list_head olist;
@@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2360int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2319int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2361 uint32_t flags); 2320 uint32_t flags);
2362bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2321bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2322bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2323 unsigned long end);
2363bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2324bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2364uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2325uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2365 struct ttm_mem_reg *mem); 2326 struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a081dda9fa2f..7a4b101e10c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
795 case CGS_SYSTEM_INFO_PCIE_MLW: 795 case CGS_SYSTEM_INFO_PCIE_MLW:
796 sys_info->value = adev->pm.pcie_mlw_mask; 796 sys_info->value = adev->pm.pcie_mlw_mask;
797 break; 797 break;
798 case CGS_SYSTEM_INFO_CG_FLAGS:
799 sys_info->value = adev->cg_flags;
800 break;
801 case CGS_SYSTEM_INFO_PG_FLAGS:
802 sys_info->value = adev->pg_flags;
803 break;
798 default: 804 default:
799 return -ENODEV; 805 return -ENODEV;
800 } 806 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 65531463f88e..51bfc114584e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1795 } 1795 }
1796 1796
1797 /* post card */ 1797 /* post card */
1798 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1798 if (!amdgpu_card_posted(adev))
1799 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1799 1800
1800 r = amdgpu_resume(adev); 1801 r = amdgpu_resume(adev);
1802 if (r)
1803 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
1801 1804
1802 amdgpu_fence_driver_resume(adev); 1805 amdgpu_fence_driver_resume(adev);
1803 1806
1804 r = amdgpu_ib_ring_tests(adev); 1807 if (resume) {
1805 if (r) 1808 r = amdgpu_ib_ring_tests(adev);
1806 DRM_ERROR("ib ring test failed (%d).\n", r); 1809 if (r)
1810 DRM_ERROR("ib ring test failed (%d).\n", r);
1811 }
1807 1812
1808 r = amdgpu_late_init(adev); 1813 r = amdgpu_late_init(adev);
1809 if (r) 1814 if (r)
@@ -1933,80 +1938,97 @@ retry:
1933 return r; 1938 return r;
1934} 1939}
1935 1940
1941#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1942#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1943
1936void amdgpu_get_pcie_info(struct amdgpu_device *adev) 1944void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1937{ 1945{
1938 u32 mask; 1946 u32 mask;
1939 int ret; 1947 int ret;
1940 1948
1941 if (pci_is_root_bus(adev->pdev->bus)) 1949 if (amdgpu_pcie_gen_cap)
1942 return; 1950 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
1943 1951
1944 if (amdgpu_pcie_gen2 == 0) 1952 if (amdgpu_pcie_lane_cap)
1945 return; 1953 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
1946 1954
1947 if (adev->flags & AMD_IS_APU) 1955 /* covers APUs as well */
1956 if (pci_is_root_bus(adev->pdev->bus)) {
1957 if (adev->pm.pcie_gen_mask == 0)
1958 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1959 if (adev->pm.pcie_mlw_mask == 0)
1960 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
1948 return; 1961 return;
1962 }
1949 1963
1950 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1964 if (adev->pm.pcie_gen_mask == 0) {
1951 if (!ret) { 1965 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1952 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 1966 if (!ret) {
1953 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 1967 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
1954 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 1968 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1955 1969 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
1956 if (mask & DRM_PCIE_SPEED_25) 1970
1957 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 1971 if (mask & DRM_PCIE_SPEED_25)
1958 if (mask & DRM_PCIE_SPEED_50) 1972 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
1959 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; 1973 if (mask & DRM_PCIE_SPEED_50)
1960 if (mask & DRM_PCIE_SPEED_80) 1974 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
1961 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; 1975 if (mask & DRM_PCIE_SPEED_80)
1962 } 1976 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
1963 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 1977 } else {
1964 if (!ret) { 1978 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1965 switch (mask) { 1979 }
1966 case 32: 1980 }
1967 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 1981 if (adev->pm.pcie_mlw_mask == 0) {
1968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1982 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
1969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1983 if (!ret) {
1970 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1984 switch (mask) {
1971 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1985 case 32:
1972 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1986 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
1973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1974 break; 1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1975 case 16: 1989 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1976 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1990 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1977 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1991 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1978 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1979 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1993 break;
1980 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1994 case 16:
1981 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1995 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1982 break; 1996 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1983 case 12: 1997 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1984 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1985 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1986 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2000 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2001 break;
1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2002 case 12:
1989 break; 2003 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1990 case 8: 2004 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1991 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 2005 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2006 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1993 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2007 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1994 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2008 break;
1995 break; 2009 case 8:
1996 case 4: 2010 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1997 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 2011 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1998 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2012 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1999 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2013 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2000 break; 2014 break;
2001 case 2: 2015 case 4:
2002 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 2016 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
2003 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 2017 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2004 break; 2018 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2005 case 1: 2019 break;
2006 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 2020 case 2:
2007 break; 2021 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
2008 default: 2022 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
2009 break; 2023 break;
2024 case 1:
2025 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
2026 break;
2027 default:
2028 break;
2029 }
2030 } else {
2031 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
2010 } 2032 }
2011 } 2033 }
2012} 2034}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0a805..8297bc319369 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
72 72
73 struct drm_crtc *crtc = &amdgpuCrtc->base; 73 struct drm_crtc *crtc = &amdgpuCrtc->base;
74 unsigned long flags; 74 unsigned long flags;
75 unsigned i; 75 unsigned i, repcnt = 4;
76 int vpos, hpos, stat, min_udelay; 76 int vpos, hpos, stat, min_udelay = 0;
77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 77 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
78 78
79 amdgpu_flip_wait_fence(adev, &work->excl); 79 amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
96 * In practice this won't execute very often unless on very fast 96 * In practice this won't execute very often unless on very fast
97 * machines because the time window for this to happen is very small. 97 * machines because the time window for this to happen is very small.
98 */ 98 */
99 for (;;) { 99 while (amdgpuCrtc->enabled && repcnt--) {
100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 100 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
101 * start in hpos, and to the "fudged earlier" vblank start in 101 * start in hpos, and to the "fudged earlier" vblank start in
102 * vpos. 102 * vpos.
@@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
114 /* Sleep at least until estimated real start of hw vblank */ 114 /* Sleep at least until estimated real start of hw vblank */
115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 115 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 if (min_udelay > vblank->framedur_ns / 2000) {
118 /* Don't wait ridiculously long - something is wrong */
119 repcnt = 0;
120 break;
121 }
117 usleep_range(min_udelay, 2 * min_udelay); 122 usleep_range(min_udelay, 2 * min_udelay);
118 spin_lock_irqsave(&crtc->dev->event_lock, flags); 123 spin_lock_irqsave(&crtc->dev->event_lock, flags);
119 }; 124 };
120 125
126 if (!repcnt)
127 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
128 "framedur %d, linedur %d, stat %d, vpos %d, "
129 "hpos %d\n", work->crtc_id, min_udelay,
130 vblank->framedur_ns / 1000,
131 vblank->linedur_ns / 1000, stat, vpos, hpos);
132
121 /* do the flip (mmio) */ 133 /* do the flip (mmio) */
122 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 134 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
123 /* set the flip status */ 135 /* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9c1af8976bef..9ef1db87cf26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
83int amdgpu_sched_hw_submission = 2; 83int amdgpu_sched_hw_submission = 2;
84int amdgpu_enable_semaphores = 0; 84int amdgpu_enable_semaphores = 0;
85int amdgpu_powerplay = -1; 85int amdgpu_powerplay = -1;
86unsigned amdgpu_pcie_gen_cap = 0;
87unsigned amdgpu_pcie_lane_cap = 0;
86 88
87MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 89MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
88module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 90module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
170module_param_named(powerplay, amdgpu_powerplay, int, 0444); 172module_param_named(powerplay, amdgpu_powerplay, int, 0444);
171#endif 173#endif
172 174
175MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
176module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
177
178MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
179module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
180
173static struct pci_device_id pciidlist[] = { 181static struct pci_device_id pciidlist[] = {
174#ifdef CONFIG_DRM_AMDGPU_CIK 182#ifdef CONFIG_DRM_AMDGPU_CIK
175 /* Kaveri */ 183 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f782cd14..d20c2a8929cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
596 break; 596 break;
597 } 597 }
598 ttm_eu_backoff_reservation(&ticket, &list); 598 ttm_eu_backoff_reservation(&ticket, &list);
599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 599 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
600 !amdgpu_vm_debug)
600 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 601 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
601 602
602 drm_gem_object_unreference_unlocked(gobj); 603 drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2b2038..d4e2780c0796 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
142 142
143 list_for_each_entry(bo, &node->bos, mn_list) { 143 list_for_each_entry(bo, &node->bos, mn_list) {
144 144
145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 145 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
146 end))
146 continue; 147 continue;
147 148
148 r = amdgpu_bo_reserve(bo, true); 149 r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84eaea4a..66855b62a603 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
113 struct drm_device *ddev = dev_get_drvdata(dev); 113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private; 114 struct amdgpu_device *adev = ddev->dev_private;
115 115
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
119
116 if (adev->pp_enabled) { 120 if (adev->pp_enabled) {
117 enum amd_dpm_forced_level level; 121 enum amd_dpm_forced_level level;
118 122
@@ -140,6 +144,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
140 enum amdgpu_dpm_forced_level level; 144 enum amdgpu_dpm_forced_level level;
141 int ret = 0; 145 int ret = 0;
142 146
147 /* Can't force performance level when the card is off */
148 if ((adev->flags & AMD_IS_PX) &&
149 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
150 return -EINVAL;
151
143 if (strncmp("low", buf, strlen("low")) == 0) { 152 if (strncmp("low", buf, strlen("low")) == 0) {
144 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 153 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
145 } else if (strncmp("high", buf, strlen("high")) == 0) { 154 } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
157 mutex_lock(&adev->pm.mutex); 166 mutex_lock(&adev->pm.mutex);
158 if (adev->pm.dpm.thermal_active) { 167 if (adev->pm.dpm.thermal_active) {
159 count = -EINVAL; 168 count = -EINVAL;
169 mutex_unlock(&adev->pm.mutex);
160 goto fail; 170 goto fail;
161 } 171 }
162 ret = amdgpu_dpm_force_performance_level(adev, level); 172 ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
167 mutex_unlock(&adev->pm.mutex); 177 mutex_unlock(&adev->pm.mutex);
168 } 178 }
169fail: 179fail:
170 mutex_unlock(&adev->pm.mutex);
171
172 return count; 180 return count;
173} 181}
174 182
@@ -182,8 +190,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
182 char *buf) 190 char *buf)
183{ 191{
184 struct amdgpu_device *adev = dev_get_drvdata(dev); 192 struct amdgpu_device *adev = dev_get_drvdata(dev);
193 struct drm_device *ddev = adev->ddev;
185 int temp; 194 int temp;
186 195
196 /* Can't get temperature when the card is off */
197 if ((adev->flags & AMD_IS_PX) &&
198 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
199 return -EINVAL;
200
187 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 201 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
188 temp = 0; 202 temp = 0;
189 else 203 else
@@ -634,8 +648,6 @@ force:
634 648
635 /* update display watermarks based on new power state */ 649 /* update display watermarks based on new power state */
636 amdgpu_display_bandwidth_update(adev); 650 amdgpu_display_bandwidth_update(adev);
637 /* update displays */
638 amdgpu_dpm_display_configuration_changed(adev);
639 651
640 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 652 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
641 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 653 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -655,6 +667,9 @@ force:
655 667
656 amdgpu_dpm_post_set_power_state(adev); 668 amdgpu_dpm_post_set_power_state(adev);
657 669
670 /* update displays */
671 amdgpu_dpm_display_configuration_changed(adev);
672
658 if (adev->pm.funcs->force_performance_level) { 673 if (adev->pm.funcs->force_performance_level) {
659 if (adev->pm.dpm.thermal_active) { 674 if (adev->pm.dpm.thermal_active) {
660 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; 675 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
847 struct drm_info_node *node = (struct drm_info_node *) m->private; 862 struct drm_info_node *node = (struct drm_info_node *) m->private;
848 struct drm_device *dev = node->minor->dev; 863 struct drm_device *dev = node->minor->dev;
849 struct amdgpu_device *adev = dev->dev_private; 864 struct amdgpu_device *adev = dev->dev_private;
865 struct drm_device *ddev = adev->ddev;
850 866
851 if (!adev->pm.dpm_enabled) { 867 if (!adev->pm.dpm_enabled) {
852 seq_printf(m, "dpm not enabled\n"); 868 seq_printf(m, "dpm not enabled\n");
853 return 0; 869 return 0;
854 } 870 }
855 if (adev->pp_enabled) { 871 if ((adev->flags & AMD_IS_PX) &&
872 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
873 seq_printf(m, "PX asic powered off\n");
874 } else if (adev->pp_enabled) {
856 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 875 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
857 } else { 876 } else {
858 mutex_lock(&adev->pm.mutex); 877 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb0434b..ca72a2e487b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
354 354
355 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) 355 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
356 if (fences[i]) 356 if (fences[i])
357 fences[count++] = fences[i]; 357 fences[count++] = fence_get(fences[i]);
358 358
359 if (count) { 359 if (count) {
360 spin_unlock(&sa_manager->wq.lock); 360 spin_unlock(&sa_manager->wq.lock);
361 t = fence_wait_any_timeout(fences, count, false, 361 t = fence_wait_any_timeout(fences, count, false,
362 MAX_SCHEDULE_TIMEOUT); 362 MAX_SCHEDULE_TIMEOUT);
363 for (i = 0; i < count; ++i)
364 fence_put(fences[i]);
365
363 r = (t > 0) ? 0 : t; 366 r = (t > 0) ? 0 : t;
364 spin_lock(&sa_manager->wq.lock); 367 spin_lock(&sa_manager->wq.lock);
365 } else { 368 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 55cf05e1c81c..1cbb16e15307 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
712 0, PAGE_SIZE, 712 0, PAGE_SIZE,
713 PCI_DMA_BIDIRECTIONAL); 713 PCI_DMA_BIDIRECTIONAL);
714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 714 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
715 while (--i) { 715 while (i--) {
716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 716 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 717 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
718 gtt->ttm.dma_address[i] = 0; 718 gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
783 return !!gtt->userptr; 783 return !!gtt->userptr;
784} 784}
785 785
786bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
787 unsigned long end)
788{
789 struct amdgpu_ttm_tt *gtt = (void *)ttm;
790 unsigned long size;
791
792 if (gtt == NULL)
793 return false;
794
795 if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
796 return false;
797
798 size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
799 if (gtt->userptr > end || gtt->userptr + size <= start)
800 return false;
801
802 return true;
803}
804
786bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) 805bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
787{ 806{
788 struct amdgpu_ttm_tt *gtt = (void *)ttm; 807 struct amdgpu_ttm_tt *gtt = (void *)ttm;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8b4731d4e10e..474ca02b0949 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -31,6 +31,7 @@
31#include "ci_dpm.h" 31#include "ci_dpm.h"
32#include "gfx_v7_0.h" 32#include "gfx_v7_0.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include <linux/seq_file.h> 35#include <linux/seq_file.h>
35 36
36#include "smu/smu_7_0_1_d.h" 37#include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
5835 u8 frev, crev; 5836 u8 frev, crev;
5836 struct ci_power_info *pi; 5837 struct ci_power_info *pi;
5837 int ret; 5838 int ret;
5838 u32 mask;
5839 5839
5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5841 if (pi == NULL) 5841 if (pi == NULL)
5842 return -ENOMEM; 5842 return -ENOMEM;
5843 adev->pm.dpm.priv = pi; 5843 adev->pm.dpm.priv = pi;
5844 5844
5845 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 5845 pi->sys_pcie_mask =
5846 if (ret) 5846 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5847 pi->sys_pcie_mask = 0; 5847 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5848 else 5848
5849 pi->sys_pcie_mask = mask;
5850 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 5849 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5851 5850
5852 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; 5851 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index fd9c9588ef46..155965ed14a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
1762 if (amdgpu_aspm == 0) 1762 if (amdgpu_aspm == 0)
1763 return; 1763 return;
1764 1764
1765 if (pci_is_root_bus(adev->pdev->bus))
1766 return;
1767
1765 /* XXX double check APUs */ 1768 /* XXX double check APUs */
1766 if (adev->flags & AMD_IS_APU) 1769 if (adev->flags & AMD_IS_APU)
1767 return; 1770 return;
@@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
2332 switch (adev->asic_type) { 2335 switch (adev->asic_type) {
2333 case CHIP_BONAIRE: 2336 case CHIP_BONAIRE:
2334 adev->cg_flags = 2337 adev->cg_flags =
2335 AMDGPU_CG_SUPPORT_GFX_MGCG | 2338 AMD_CG_SUPPORT_GFX_MGCG |
2336 AMDGPU_CG_SUPPORT_GFX_MGLS | 2339 AMD_CG_SUPPORT_GFX_MGLS |
2337 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2340 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2338 AMDGPU_CG_SUPPORT_GFX_CGLS | 2341 AMD_CG_SUPPORT_GFX_CGLS |
2339 AMDGPU_CG_SUPPORT_GFX_CGTS | 2342 AMD_CG_SUPPORT_GFX_CGTS |
2340 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2343 AMD_CG_SUPPORT_GFX_CGTS_LS |
2341 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2344 AMD_CG_SUPPORT_GFX_CP_LS |
2342 AMDGPU_CG_SUPPORT_MC_LS | 2345 AMD_CG_SUPPORT_MC_LS |
2343 AMDGPU_CG_SUPPORT_MC_MGCG | 2346 AMD_CG_SUPPORT_MC_MGCG |
2344 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2347 AMD_CG_SUPPORT_SDMA_MGCG |
2345 AMDGPU_CG_SUPPORT_SDMA_LS | 2348 AMD_CG_SUPPORT_SDMA_LS |
2346 AMDGPU_CG_SUPPORT_BIF_LS | 2349 AMD_CG_SUPPORT_BIF_LS |
2347 AMDGPU_CG_SUPPORT_VCE_MGCG | 2350 AMD_CG_SUPPORT_VCE_MGCG |
2348 AMDGPU_CG_SUPPORT_UVD_MGCG | 2351 AMD_CG_SUPPORT_UVD_MGCG |
2349 AMDGPU_CG_SUPPORT_HDP_LS | 2352 AMD_CG_SUPPORT_HDP_LS |
2350 AMDGPU_CG_SUPPORT_HDP_MGCG; 2353 AMD_CG_SUPPORT_HDP_MGCG;
2351 adev->pg_flags = 0; 2354 adev->pg_flags = 0;
2352 adev->external_rev_id = adev->rev_id + 0x14; 2355 adev->external_rev_id = adev->rev_id + 0x14;
2353 break; 2356 break;
2354 case CHIP_HAWAII: 2357 case CHIP_HAWAII:
2355 adev->cg_flags = 2358 adev->cg_flags =
2356 AMDGPU_CG_SUPPORT_GFX_MGCG | 2359 AMD_CG_SUPPORT_GFX_MGCG |
2357 AMDGPU_CG_SUPPORT_GFX_MGLS | 2360 AMD_CG_SUPPORT_GFX_MGLS |
2358 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2361 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2359 AMDGPU_CG_SUPPORT_GFX_CGLS | 2362 AMD_CG_SUPPORT_GFX_CGLS |
2360 AMDGPU_CG_SUPPORT_GFX_CGTS | 2363 AMD_CG_SUPPORT_GFX_CGTS |
2361 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2364 AMD_CG_SUPPORT_GFX_CP_LS |
2362 AMDGPU_CG_SUPPORT_MC_LS | 2365 AMD_CG_SUPPORT_MC_LS |
2363 AMDGPU_CG_SUPPORT_MC_MGCG | 2366 AMD_CG_SUPPORT_MC_MGCG |
2364 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2367 AMD_CG_SUPPORT_SDMA_MGCG |
2365 AMDGPU_CG_SUPPORT_SDMA_LS | 2368 AMD_CG_SUPPORT_SDMA_LS |
2366 AMDGPU_CG_SUPPORT_BIF_LS | 2369 AMD_CG_SUPPORT_BIF_LS |
2367 AMDGPU_CG_SUPPORT_VCE_MGCG | 2370 AMD_CG_SUPPORT_VCE_MGCG |
2368 AMDGPU_CG_SUPPORT_UVD_MGCG | 2371 AMD_CG_SUPPORT_UVD_MGCG |
2369 AMDGPU_CG_SUPPORT_HDP_LS | 2372 AMD_CG_SUPPORT_HDP_LS |
2370 AMDGPU_CG_SUPPORT_HDP_MGCG; 2373 AMD_CG_SUPPORT_HDP_MGCG;
2371 adev->pg_flags = 0; 2374 adev->pg_flags = 0;
2372 adev->external_rev_id = 0x28; 2375 adev->external_rev_id = 0x28;
2373 break; 2376 break;
2374 case CHIP_KAVERI: 2377 case CHIP_KAVERI:
2375 adev->cg_flags = 2378 adev->cg_flags =
2376 AMDGPU_CG_SUPPORT_GFX_MGCG | 2379 AMD_CG_SUPPORT_GFX_MGCG |
2377 AMDGPU_CG_SUPPORT_GFX_MGLS | 2380 AMD_CG_SUPPORT_GFX_MGLS |
2378 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2381 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2379 AMDGPU_CG_SUPPORT_GFX_CGLS | 2382 AMD_CG_SUPPORT_GFX_CGLS |
2380 AMDGPU_CG_SUPPORT_GFX_CGTS | 2383 AMD_CG_SUPPORT_GFX_CGTS |
2381 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2384 AMD_CG_SUPPORT_GFX_CGTS_LS |
2382 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2385 AMD_CG_SUPPORT_GFX_CP_LS |
2383 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2386 AMD_CG_SUPPORT_SDMA_MGCG |
2384 AMDGPU_CG_SUPPORT_SDMA_LS | 2387 AMD_CG_SUPPORT_SDMA_LS |
2385 AMDGPU_CG_SUPPORT_BIF_LS | 2388 AMD_CG_SUPPORT_BIF_LS |
2386 AMDGPU_CG_SUPPORT_VCE_MGCG | 2389 AMD_CG_SUPPORT_VCE_MGCG |
2387 AMDGPU_CG_SUPPORT_UVD_MGCG | 2390 AMD_CG_SUPPORT_UVD_MGCG |
2388 AMDGPU_CG_SUPPORT_HDP_LS | 2391 AMD_CG_SUPPORT_HDP_LS |
2389 AMDGPU_CG_SUPPORT_HDP_MGCG; 2392 AMD_CG_SUPPORT_HDP_MGCG;
2390 adev->pg_flags = 2393 adev->pg_flags =
2391 /*AMDGPU_PG_SUPPORT_GFX_PG | 2394 /*AMD_PG_SUPPORT_GFX_PG |
2392 AMDGPU_PG_SUPPORT_GFX_SMG | 2395 AMD_PG_SUPPORT_GFX_SMG |
2393 AMDGPU_PG_SUPPORT_GFX_DMG |*/ 2396 AMD_PG_SUPPORT_GFX_DMG |*/
2394 AMDGPU_PG_SUPPORT_UVD | 2397 AMD_PG_SUPPORT_UVD |
2395 /*AMDGPU_PG_SUPPORT_VCE | 2398 /*AMD_PG_SUPPORT_VCE |
2396 AMDGPU_PG_SUPPORT_CP | 2399 AMD_PG_SUPPORT_CP |
2397 AMDGPU_PG_SUPPORT_GDS | 2400 AMD_PG_SUPPORT_GDS |
2398 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2401 AMD_PG_SUPPORT_RLC_SMU_HS |
2399 AMDGPU_PG_SUPPORT_ACP | 2402 AMD_PG_SUPPORT_ACP |
2400 AMDGPU_PG_SUPPORT_SAMU |*/ 2403 AMD_PG_SUPPORT_SAMU |*/
2401 0; 2404 0;
2402 if (adev->pdev->device == 0x1312 || 2405 if (adev->pdev->device == 0x1312 ||
2403 adev->pdev->device == 0x1316 || 2406 adev->pdev->device == 0x1316 ||
@@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
2409 case CHIP_KABINI: 2412 case CHIP_KABINI:
2410 case CHIP_MULLINS: 2413 case CHIP_MULLINS:
2411 adev->cg_flags = 2414 adev->cg_flags =
2412 AMDGPU_CG_SUPPORT_GFX_MGCG | 2415 AMD_CG_SUPPORT_GFX_MGCG |
2413 AMDGPU_CG_SUPPORT_GFX_MGLS | 2416 AMD_CG_SUPPORT_GFX_MGLS |
2414 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2417 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2415 AMDGPU_CG_SUPPORT_GFX_CGLS | 2418 AMD_CG_SUPPORT_GFX_CGLS |
2416 AMDGPU_CG_SUPPORT_GFX_CGTS | 2419 AMD_CG_SUPPORT_GFX_CGTS |
2417 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2420 AMD_CG_SUPPORT_GFX_CGTS_LS |
2418 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2421 AMD_CG_SUPPORT_GFX_CP_LS |
2419 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2422 AMD_CG_SUPPORT_SDMA_MGCG |
2420 AMDGPU_CG_SUPPORT_SDMA_LS | 2423 AMD_CG_SUPPORT_SDMA_LS |
2421 AMDGPU_CG_SUPPORT_BIF_LS | 2424 AMD_CG_SUPPORT_BIF_LS |
2422 AMDGPU_CG_SUPPORT_VCE_MGCG | 2425 AMD_CG_SUPPORT_VCE_MGCG |
2423 AMDGPU_CG_SUPPORT_UVD_MGCG | 2426 AMD_CG_SUPPORT_UVD_MGCG |
2424 AMDGPU_CG_SUPPORT_HDP_LS | 2427 AMD_CG_SUPPORT_HDP_LS |
2425 AMDGPU_CG_SUPPORT_HDP_MGCG; 2428 AMD_CG_SUPPORT_HDP_MGCG;
2426 adev->pg_flags = 2429 adev->pg_flags =
2427 /*AMDGPU_PG_SUPPORT_GFX_PG | 2430 /*AMD_PG_SUPPORT_GFX_PG |
2428 AMDGPU_PG_SUPPORT_GFX_SMG | */ 2431 AMD_PG_SUPPORT_GFX_SMG | */
2429 AMDGPU_PG_SUPPORT_UVD | 2432 AMD_PG_SUPPORT_UVD |
2430 /*AMDGPU_PG_SUPPORT_VCE | 2433 /*AMD_PG_SUPPORT_VCE |
2431 AMDGPU_PG_SUPPORT_CP | 2434 AMD_PG_SUPPORT_CP |
2432 AMDGPU_PG_SUPPORT_GDS | 2435 AMD_PG_SUPPORT_GDS |
2433 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2436 AMD_PG_SUPPORT_RLC_SMU_HS |
2434 AMDGPU_PG_SUPPORT_SAMU |*/ 2437 AMD_PG_SUPPORT_SAMU |*/
2435 0; 2438 0;
2436 if (adev->asic_type == CHIP_KABINI) { 2439 if (adev->asic_type == CHIP_KABINI) {
2437 if (adev->rev_id == 0) 2440 if (adev->rev_id == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ceddf08..c55ecf0ea845 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
885{ 885{
886 u32 orig, data; 886 u32 orig, data;
887 887
888 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { 888 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
889 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 889 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
890 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 890 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
891 } else { 891 } else {
@@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
906{ 906{
907 u32 orig, data; 907 u32 orig, data;
908 908
909 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { 909 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
910 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 910 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
911 data |= 0x100; 911 data |= 0x100;
912 if (orig != data) 912 if (orig != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 4dd17f2dd905..9056355309d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
445 pi->gfx_pg_threshold = 500; 445 pi->gfx_pg_threshold = 500;
446 pi->caps_fps = true; 446 pi->caps_fps = true;
447 /* uvd */ 447 /* uvd */
448 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 448 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
449 pi->caps_uvd_dpm = true; 449 pi->caps_uvd_dpm = true;
450 /* vce */ 450 /* vce */
451 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 451 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
452 pi->caps_vce_dpm = true; 452 pi->caps_vce_dpm = true;
453 /* acp */ 453 /* acp */
454 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 454 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
455 pi->caps_acp_dpm = true; 455 pi->caps_acp_dpm = true;
456 456
457 pi->caps_stable_power_state = false; 457 pi->caps_stable_power_state = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 6c76139de1c9..7732059ae30f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
4109 4109
4110 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 4110 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
4111 4111
4112 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { 4112 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4113 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 4113 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
4114 4114
4115 tmp = gfx_v7_0_halt_rlc(adev); 4115 tmp = gfx_v7_0_halt_rlc(adev);
@@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
4147{ 4147{
4148 u32 data, orig, tmp = 0; 4148 u32 data, orig, tmp = 0;
4149 4149
4150 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { 4150 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4151 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { 4151 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4152 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { 4152 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4153 orig = data = RREG32(mmCP_MEM_SLP_CNTL); 4153 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
4154 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 4154 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4155 if (orig != data) 4155 if (orig != data)
@@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
4176 4176
4177 gfx_v7_0_update_rlc(adev, tmp); 4177 gfx_v7_0_update_rlc(adev, tmp);
4178 4178
4179 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { 4179 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
4180 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 4180 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
4181 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; 4181 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
4182 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 4182 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
4183 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 4183 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
4184 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 4184 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
4185 if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && 4185 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
4186 (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) 4186 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
4187 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 4187 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
4188 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; 4188 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
4189 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 4189 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
4249 u32 data, orig; 4249 u32 data, orig;
4250 4250
4251 orig = data = RREG32(mmRLC_PG_CNTL); 4251 orig = data = RREG32(mmRLC_PG_CNTL);
4252 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 4252 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
4253 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 4253 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
4254 else 4254 else
4255 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 4255 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
4263 u32 data, orig; 4263 u32 data, orig;
4264 4264
4265 orig = data = RREG32(mmRLC_PG_CNTL); 4265 orig = data = RREG32(mmRLC_PG_CNTL);
4266 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 4266 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
4267 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 4267 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
4268 else 4268 else
4269 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 4269 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
4276 u32 data, orig; 4276 u32 data, orig;
4277 4277
4278 orig = data = RREG32(mmRLC_PG_CNTL); 4278 orig = data = RREG32(mmRLC_PG_CNTL);
4279 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) 4279 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
4280 data &= ~0x8000; 4280 data &= ~0x8000;
4281 else 4281 else
4282 data |= 0x8000; 4282 data |= 0x8000;
@@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
4289 u32 data, orig; 4289 u32 data, orig;
4290 4290
4291 orig = data = RREG32(mmRLC_PG_CNTL); 4291 orig = data = RREG32(mmRLC_PG_CNTL);
4292 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) 4292 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
4293 data &= ~0x2000; 4293 data &= ~0x2000;
4294 else 4294 else
4295 data |= 0x2000; 4295 data |= 0x2000;
@@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
4370{ 4370{
4371 u32 data, orig; 4371 u32 data, orig;
4372 4372
4373 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { 4373 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
4374 orig = data = RREG32(mmRLC_PG_CNTL); 4374 orig = data = RREG32(mmRLC_PG_CNTL);
4375 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 4375 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
4376 if (orig != data) 4376 if (orig != data)
@@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
4442 u32 data, orig; 4442 u32 data, orig;
4443 4443
4444 orig = data = RREG32(mmRLC_PG_CNTL); 4444 orig = data = RREG32(mmRLC_PG_CNTL);
4445 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) 4445 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
4446 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 4446 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
4447 else 4447 else
4448 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 4448 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
4456 u32 data, orig; 4456 u32 data, orig;
4457 4457
4458 orig = data = RREG32(mmRLC_PG_CNTL); 4458 orig = data = RREG32(mmRLC_PG_CNTL);
4459 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) 4459 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
4460 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 4460 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
4461 else 4461 else
4462 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 4462 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4623 4623
4624static void gfx_v7_0_init_pg(struct amdgpu_device *adev) 4624static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4625{ 4625{
4626 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4626 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4627 AMDGPU_PG_SUPPORT_GFX_SMG | 4627 AMD_PG_SUPPORT_GFX_SMG |
4628 AMDGPU_PG_SUPPORT_GFX_DMG | 4628 AMD_PG_SUPPORT_GFX_DMG |
4629 AMDGPU_PG_SUPPORT_CP | 4629 AMD_PG_SUPPORT_CP |
4630 AMDGPU_PG_SUPPORT_GDS | 4630 AMD_PG_SUPPORT_GDS |
4631 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4631 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4632 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); 4632 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4633 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); 4633 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4634 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4634 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4635 gfx_v7_0_init_gfx_cgpg(adev); 4635 gfx_v7_0_init_gfx_cgpg(adev);
4636 gfx_v7_0_enable_cp_pg(adev, true); 4636 gfx_v7_0_enable_cp_pg(adev, true);
4637 gfx_v7_0_enable_gds_pg(adev, true); 4637 gfx_v7_0_enable_gds_pg(adev, true);
@@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4643 4643
4644static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) 4644static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4645{ 4645{
4646 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4646 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4647 AMDGPU_PG_SUPPORT_GFX_SMG | 4647 AMD_PG_SUPPORT_GFX_SMG |
4648 AMDGPU_PG_SUPPORT_GFX_DMG | 4648 AMD_PG_SUPPORT_GFX_DMG |
4649 AMDGPU_PG_SUPPORT_CP | 4649 AMD_PG_SUPPORT_CP |
4650 AMDGPU_PG_SUPPORT_GDS | 4650 AMD_PG_SUPPORT_GDS |
4651 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4651 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4652 gfx_v7_0_update_gfx_pg(adev, false); 4652 gfx_v7_0_update_gfx_pg(adev, false);
4653 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4653 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4654 gfx_v7_0_enable_cp_pg(adev, false); 4654 gfx_v7_0_enable_cp_pg(adev, false);
4655 gfx_v7_0_enable_gds_pg(adev, false); 4655 gfx_v7_0_enable_gds_pg(adev, false);
4656 } 4656 }
@@ -5527,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
5527 if (state == AMD_PG_STATE_GATE) 5527 if (state == AMD_PG_STATE_GATE)
5528 gate = true; 5528 gate = true;
5529 5529
5530 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 5530 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5531 AMDGPU_PG_SUPPORT_GFX_SMG | 5531 AMD_PG_SUPPORT_GFX_SMG |
5532 AMDGPU_PG_SUPPORT_GFX_DMG | 5532 AMD_PG_SUPPORT_GFX_DMG |
5533 AMDGPU_PG_SUPPORT_CP | 5533 AMD_PG_SUPPORT_CP |
5534 AMDGPU_PG_SUPPORT_GDS | 5534 AMD_PG_SUPPORT_GDS |
5535 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 5535 AMD_PG_SUPPORT_RLC_SMU_HS)) {
5536 gfx_v7_0_update_gfx_pg(adev, gate); 5536 gfx_v7_0_update_gfx_pg(adev, gate);
5537 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 5537 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5538 gfx_v7_0_enable_cp_pg(adev, gate); 5538 gfx_v7_0_enable_cp_pg(adev, gate);
5539 gfx_v7_0_enable_gds_pg(adev, gate); 5539 gfx_v7_0_enable_gds_pg(adev, gate);
5540 } 5540 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8f8ec37ecd88..1c40bd90afbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4995,7 +4995,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4995 case AMDGPU_IRQ_STATE_ENABLE: 4995 case AMDGPU_IRQ_STATE_ENABLE:
4996 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4996 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4997 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4997 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4998 PRIV_REG_INT_ENABLE, 0); 4998 PRIV_REG_INT_ENABLE, 1);
4999 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4999 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
5000 break; 5000 break;
5001 default: 5001 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 8aa2991ab379..b8060795b27b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -792,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
792 792
793 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 793 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
794 orig = data = RREG32(mc_cg_registers[i]); 794 orig = data = RREG32(mc_cg_registers[i]);
795 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) 795 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
796 data |= mc_cg_ls_en[i]; 796 data |= mc_cg_ls_en[i];
797 else 797 else
798 data &= ~mc_cg_ls_en[i]; 798 data &= ~mc_cg_ls_en[i];
@@ -809,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
809 809
810 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 810 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
811 orig = data = RREG32(mc_cg_registers[i]); 811 orig = data = RREG32(mc_cg_registers[i]);
812 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) 812 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
813 data |= mc_cg_en[i]; 813 data |= mc_cg_en[i];
814 else 814 else
815 data &= ~mc_cg_en[i]; 815 data &= ~mc_cg_en[i];
@@ -825,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
825 825
826 orig = data = RREG32_PCIE(ixPCIE_CNTL2); 826 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
827 827
828 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { 828 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
829 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); 829 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
830 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); 830 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
831 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); 831 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -848,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
848 848
849 orig = data = RREG32(mmHDP_HOST_PATH_CNTL); 849 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
850 850
851 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) 851 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
852 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); 852 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
853 else 853 else
854 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); 854 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -864,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
864 864
865 orig = data = RREG32(mmHDP_MEM_POWER_LS); 865 orig = data = RREG32(mmHDP_MEM_POWER_LS);
866 866
867 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) 867 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
868 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); 868 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
869 else 869 else
870 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); 870 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c7f1db..654d76723bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
2859 pi->voltage_drop_t = 0; 2859 pi->voltage_drop_t = 0;
2860 pi->caps_sclk_throttle_low_notification = false; 2860 pi->caps_sclk_throttle_low_notification = false;
2861 pi->caps_fps = false; /* true? */ 2861 pi->caps_fps = false; /* true? */
2862 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 2862 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
2863 pi->caps_uvd_dpm = true; 2863 pi->caps_uvd_dpm = true;
2864 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 2864 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
2865 pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; 2865 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
2866 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 2866 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
2867 pi->caps_stable_p_state = false; 2867 pi->caps_stable_p_state = false;
2868 2868
2869 ret = kv_parse_sys_info_table(adev); 2869 ret = kv_parse_sys_info_table(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5e9f73af83a8..fbd3767671bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
611{ 611{
612 u32 orig, data; 612 u32 orig, data;
613 613
614 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { 614 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 615 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
616 data = 0xfff; 616 data = 0xfff;
617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 617 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
830 bool gate = false; 830 bool gate = false;
831 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 831 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
832 832
833 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
834 return 0;
835
833 if (state == AMD_CG_STATE_GATE) 836 if (state == AMD_CG_STATE_GATE)
834 gate = true; 837 gate = true;
835 838
@@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
848 * revisit this when there is a cleaner line between 851 * revisit this when there is a cleaner line between
849 * the smc and the hw blocks 852 * the smc and the hw blocks
850 */ 853 */
851 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 854 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
855
856 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
857 return 0;
852 858
853 if (state == AMD_PG_STATE_GATE) { 859 if (state == AMD_PG_STATE_GATE) {
854 uvd_v4_2_stop(adev); 860 uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 38864f562981..57f1c5bf3bf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
774static int uvd_v5_0_set_clockgating_state(void *handle, 774static int uvd_v5_0_set_clockgating_state(void *handle,
775 enum amd_clockgating_state state) 775 enum amd_clockgating_state state)
776{ 776{
777 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
778
779 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
780 return 0;
781
777 return 0; 782 return 0;
778} 783}
779 784
@@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
789 */ 794 */
790 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 795 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
791 796
797 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
798 return 0;
799
792 if (state == AMD_PG_STATE_GATE) { 800 if (state == AMD_PG_STATE_GATE) {
793 uvd_v5_0_stop(adev); 801 uvd_v5_0_stop(adev);
794 return 0; 802 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3d5913926436..0b365b7651ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
532 uvd_v6_0_mc_resume(adev); 532 uvd_v6_0_mc_resume(adev);
533 533
534 /* Set dynamic clock gating in S/W control mode */ 534 /* Set dynamic clock gating in S/W control mode */
535 if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { 535 if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
536 if (adev->flags & AMD_IS_APU) 536 if (adev->flags & AMD_IS_APU)
537 cz_set_uvd_clock_gating_branches(adev, false); 537 cz_set_uvd_clock_gating_branches(adev, false);
538 else 538 else
@@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1001 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1001 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1002 1002
1003 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) 1003 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
1004 return 0; 1004 return 0;
1005 1005
1006 if (enable) { 1006 if (enable) {
@@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
1030 */ 1030 */
1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1031 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1032 1032
1033 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1034 return 0;
1035
1033 if (state == AMD_PG_STATE_GATE) { 1036 if (state == AMD_PG_STATE_GATE) {
1034 uvd_v6_0_stop(adev); 1037 uvd_v6_0_stop(adev);
1035 return 0; 1038 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 52ac7a8f1e58..a822edacfa95 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
373{ 373{
374 bool sw_cg = false; 374 bool sw_cg = false;
375 375
376 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { 376 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
377 if (sw_cg) 377 if (sw_cg)
378 vce_v2_0_set_sw_cg(adev, true); 378 vce_v2_0_set_sw_cg(adev, true);
379 else 379 else
@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
608 */ 608 */
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 610
611 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
612 return 0;
613
611 if (state == AMD_PG_STATE_GATE) 614 if (state == AMD_PG_STATE_GATE)
612 /* XXX do we need a vce_v2_0_stop()? */ 615 /* XXX do we need a vce_v2_0_stop()? */
613 return 0; 616 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index e99af81e4aec..d662fa9f9091 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
277 WREG32_P(mmVCE_STATUS, 0, ~1); 277 WREG32_P(mmVCE_STATUS, 0, ~1);
278 278
279 /* Set Clock-Gating off */ 279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) 280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false); 281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
282 282
283 if (r) { 283 if (r) {
@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
677 int i; 677 int i;
678 678
679 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) 679 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
680 return 0; 680 return 0;
681 681
682 mutex_lock(&adev->grbm_idx_mutex); 682 mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
728 */ 728 */
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730 730
731 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
732 return 0;
733
731 if (state == AMD_PG_STATE_GATE) 734 if (state == AMD_PG_STATE_GATE)
732 /* XXX do we need a vce_v3_0_stop()? */ 735 /* XXX do we need a vce_v3_0_stop()? */
733 return 0; 736 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 89f5a1ff6f43..0d14d108a6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle)
1457 case CHIP_STONEY: 1457 case CHIP_STONEY:
1458 adev->has_uvd = true; 1458 adev->has_uvd = true;
1459 adev->cg_flags = 0; 1459 adev->cg_flags = 0;
1460 /* Disable UVD pg */ 1460 adev->pg_flags = 0;
1461 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1462 adev->external_rev_id = adev->rev_id + 0x1; 1461 adev->external_rev_id = adev->rev_id + 0x1;
1463 break; 1462 break;
1464 default: 1463 default:
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1195d06f55bc..dbf7e6413cab 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,6 +85,38 @@ enum amd_powergating_state {
85 AMD_PG_STATE_UNGATE, 85 AMD_PG_STATE_UNGATE,
86}; 86};
87 87
88/* CG flags */
89#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
90#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
91#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
92#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
93#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
94#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
95#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
96#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
97#define AMD_CG_SUPPORT_MC_LS (1 << 8)
98#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
99#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
100#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
101#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
102#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
103#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
104#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
105#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
106
107/* PG flags */
108#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
109#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
110#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
111#define AMD_PG_SUPPORT_UVD (1 << 3)
112#define AMD_PG_SUPPORT_VCE (1 << 4)
113#define AMD_PG_SUPPORT_CP (1 << 5)
114#define AMD_PG_SUPPORT_GDS (1 << 6)
115#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
116#define AMD_PG_SUPPORT_SDMA (1 << 8)
117#define AMD_PG_SUPPORT_ACP (1 << 9)
118#define AMD_PG_SUPPORT_SAMU (1 << 10)
119
88enum amd_pm_state_type { 120enum amd_pm_state_type {
89 /* not used for dpm */ 121 /* not used for dpm */
90 POWER_STATE_TYPE_DEFAULT, 122 POWER_STATE_TYPE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 713aec954692..aec38fc3834f 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -109,6 +109,8 @@ enum cgs_system_info_id {
109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, 109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
110 CGS_SYSTEM_INFO_PCIE_GEN_INFO, 110 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
111 CGS_SYSTEM_INFO_PCIE_MLW, 111 CGS_SYSTEM_INFO_PCIE_MLW,
112 CGS_SYSTEM_INFO_CG_FLAGS,
113 CGS_SYSTEM_INFO_PG_FLAGS,
112 CGS_SYSTEM_INFO_ID_MAXIMUM, 114 CGS_SYSTEM_INFO_ID_MAXIMUM,
113}; 115};
114 116
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
31static int pem_init(struct pp_eventmgr *eventmgr) 31static int pem_init(struct pp_eventmgr *eventmgr)
32{ 32{
33 int result = 0; 33 int result = 0;
34 struct pem_event_data event_data; 34 struct pem_event_data event_data = { {0} };
35 35
36 /* Initialize PowerPlay feature info */ 36 /* Initialize PowerPlay feature info */
37 pem_init_feature_info(eventmgr); 37 pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
52 52
53static void pem_fini(struct pp_eventmgr *eventmgr) 53static void pem_fini(struct pp_eventmgr *eventmgr)
54{ 54{
55 struct pem_event_data event_data; 55 struct pem_event_data event_data = { {0} };
56 56
57 pem_uninit_featureInfo(eventmgr); 57 pem_uninit_featureInfo(eventmgr);
58 pem_unregister_interrupts(eventmgr); 58 pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0874ab42ee95..cf01177ca3b5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
174{ 174{
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
176 uint32_t i; 176 uint32_t i;
177 struct cgs_system_info sys_info = {0};
178 int result;
177 179
178 cz_hwmgr->gfx_ramp_step = 256*25/100; 180 cz_hwmgr->gfx_ramp_step = 256*25/100;
179 181
@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 249 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DisableVoltageIsland); 250 PHM_PlatformCaps_DisableVoltageIsland);
249 251
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_UVDPowerGating);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_VCEPowerGating);
256 sys_info.size = sizeof(struct cgs_system_info);
257 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
258 result = cgs_query_system_info(hwmgr->device, &sys_info);
259 if (!result) {
260 if (sys_info.value & AMD_PG_SUPPORT_UVD)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_UVDPowerGating);
263 if (sys_info.value & AMD_PG_SUPPORT_VCE)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_VCEPowerGating);
266 }
267
250 return 0; 268 return 0;
251} 269}
252 270
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 44a925006479..980d3bf8ea76 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4453 phw_tonga_ulv_parm *ulv; 4453 phw_tonga_ulv_parm *ulv;
4454 struct cgs_system_info sys_info = {0};
4454 4455
4455 PP_ASSERT_WITH_CODE((NULL != hwmgr), 4456 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4456 "Invalid Parameter!", return -1;); 4457 "Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4615 4616
4616 data->vddc_phase_shed_control = 0; 4617 data->vddc_phase_shed_control = 0;
4617 4618
4618 if (0 == result) { 4619 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4619 struct cgs_system_info sys_info = {0}; 4620 PHM_PlatformCaps_UVDPowerGating);
4621 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4622 PHM_PlatformCaps_VCEPowerGating);
4623 sys_info.size = sizeof(struct cgs_system_info);
4624 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
4625 result = cgs_query_system_info(hwmgr->device, &sys_info);
4626 if (!result) {
4627 if (sys_info.value & AMD_PG_SUPPORT_UVD)
4628 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4629 PHM_PlatformCaps_UVDPowerGating);
4630 if (sys_info.value & AMD_PG_SUPPORT_VCE)
4631 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4632 PHM_PlatformCaps_VCEPowerGating);
4633 }
4620 4634
4635 if (0 == result) {
4621 data->is_tlu_enabled = 0; 4636 data->is_tlu_enabled = 0;
4622 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 4637 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4623 TONGA_MAX_HARDWARE_POWERLEVELS; 4638 TONGA_MAX_HARDWARE_POWERLEVELS;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193885f1..9a7b44616b55 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
65 */ 65 */
66 state->allow_modeset = true; 66 state->allow_modeset = true;
67 67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc, 68 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL); 69 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs) 70 if (!state->crtcs)
@@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
83 sizeof(*state->plane_states), GFP_KERNEL); 81 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states) 82 if (!state->plane_states)
85 goto fail; 83 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96 84
97 state->dev = dev; 85 state->dev = dev;
98 86
@@ -823,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
823 811
824 index = drm_connector_index(connector); 812 index = drm_connector_index(connector);
825 813
826 /*
827 * Construction of atomic state updates can race with a connector
828 * hot-add which might overflow. In this case flip the table and just
829 * restart the entire ioctl - no one is fast enough to livelock a cpu
830 * with physical hotplug events anyway.
831 *
832 * Note that we only grab the indexes once we have the right lock to
833 * prevent hotplug/unplugging of connectors. So removal is no problem,
834 * at most the array is a bit too large.
835 */
836 if (index >= state->num_connector) { 814 if (index >= state->num_connector) {
837 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); 815 struct drm_connector **c;
838 return ERR_PTR(-EAGAIN); 816 struct drm_connector_state **cs;
817 int alloc = max(index + 1, config->num_connector);
818
819 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
820 if (!c)
821 return ERR_PTR(-ENOMEM);
822
823 state->connectors = c;
824 memset(&state->connectors[state->num_connector], 0,
825 sizeof(*state->connectors) * (alloc - state->num_connector));
826
827 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
828 if (!cs)
829 return ERR_PTR(-ENOMEM);
830
831 state->connector_states = cs;
832 memset(&state->connector_states[state->num_connector], 0,
833 sizeof(*state->connector_states) * (alloc - state->num_connector));
834 state->num_connector = alloc;
839 } 835 }
840 836
841 if (state->connector_states[index]) 837 if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7c523060a076..4f2d3e161593 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1493,7 +1493,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
1493{ 1493{
1494 int i; 1494 int i;
1495 1495
1496 for (i = 0; i < dev->mode_config.num_connector; i++) { 1496 for (i = 0; i < state->num_connector; i++) {
1497 struct drm_connector *connector = state->connectors[i]; 1497 struct drm_connector *connector = state->connectors[i];
1498 1498
1499 if (!connector) 1499 if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab29747e..f6191215b2cb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
918 connector->base.properties = &connector->properties; 918 connector->base.properties = &connector->properties;
919 connector->dev = dev; 919 connector->dev = dev;
920 connector->funcs = funcs; 920 connector->funcs = funcs;
921
922 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
923 if (connector->connector_id < 0) {
924 ret = connector->connector_id;
925 goto out_put;
926 }
927
921 connector->connector_type = connector_type; 928 connector->connector_type = connector_type;
922 connector->connector_type_id = 929 connector->connector_type_id =
923 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 930 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
924 if (connector->connector_type_id < 0) { 931 if (connector->connector_type_id < 0) {
925 ret = connector->connector_type_id; 932 ret = connector->connector_type_id;
926 goto out_put; 933 goto out_put_id;
927 } 934 }
928 connector->name = 935 connector->name =
929 kasprintf(GFP_KERNEL, "%s-%d", 936 kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
931 connector->connector_type_id); 938 connector->connector_type_id);
932 if (!connector->name) { 939 if (!connector->name) {
933 ret = -ENOMEM; 940 ret = -ENOMEM;
934 goto out_put; 941 goto out_put_type_id;
935 } 942 }
936 943
937 INIT_LIST_HEAD(&connector->probed_modes); 944 INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
959 } 966 }
960 967
961 connector->debugfs_entry = NULL; 968 connector->debugfs_entry = NULL;
962 969out_put_type_id:
970 if (ret)
971 ida_remove(connector_ida, connector->connector_type_id);
972out_put_id:
973 if (ret)
974 ida_remove(&config->connector_ida, connector->connector_id);
963out_put: 975out_put:
964 if (ret) 976 if (ret)
965 drm_mode_object_put(dev, &connector->base); 977 drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
996 ida_remove(&drm_connector_enum_list[connector->connector_type].ida, 1008 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
997 connector->connector_type_id); 1009 connector->connector_type_id);
998 1010
1011 ida_remove(&dev->mode_config.connector_ida,
1012 connector->connector_id);
1013
999 kfree(connector->display_info.bus_formats); 1014 kfree(connector->display_info.bus_formats);
1000 drm_mode_object_put(dev, &connector->base); 1015 drm_mode_object_put(dev, &connector->base);
1001 kfree(connector->name); 1016 kfree(connector->name);
@@ -1013,32 +1028,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
1013EXPORT_SYMBOL(drm_connector_cleanup); 1028EXPORT_SYMBOL(drm_connector_cleanup);
1014 1029
1015/** 1030/**
1016 * drm_connector_index - find the index of a registered connector
1017 * @connector: connector to find index for
1018 *
1019 * Given a registered connector, return the index of that connector within a DRM
1020 * device's list of connectors.
1021 */
1022unsigned int drm_connector_index(struct drm_connector *connector)
1023{
1024 unsigned int index = 0;
1025 struct drm_connector *tmp;
1026 struct drm_mode_config *config = &connector->dev->mode_config;
1027
1028 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
1029
1030 drm_for_each_connector(tmp, connector->dev) {
1031 if (tmp == connector)
1032 return index;
1033
1034 index++;
1035 }
1036
1037 BUG();
1038}
1039EXPORT_SYMBOL(drm_connector_index);
1040
1041/**
1042 * drm_connector_register - register a connector 1031 * drm_connector_register - register a connector
1043 * @connector: the connector to register 1032 * @connector: the connector to register
1044 * 1033 *
@@ -5789,6 +5778,7 @@ void drm_mode_config_init(struct drm_device *dev)
5789 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5778 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5790 idr_init(&dev->mode_config.crtc_idr); 5779 idr_init(&dev->mode_config.crtc_idr);
5791 idr_init(&dev->mode_config.tile_idr); 5780 idr_init(&dev->mode_config.tile_idr);
5781 ida_init(&dev->mode_config.connector_ida);
5792 5782
5793 drm_modeset_lock_all(dev); 5783 drm_modeset_lock_all(dev);
5794 drm_mode_create_standard_properties(dev); 5784 drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5869 crtc->funcs->destroy(crtc); 5859 crtc->funcs->destroy(crtc);
5870 } 5860 }
5871 5861
5862 ida_destroy(&dev->mode_config.connector_ida);
5872 idr_destroy(&dev->mode_config.tile_idr); 5863 idr_destroy(&dev->mode_config.tile_idr);
5873 idr_destroy(&dev->mode_config.crtc_idr); 5864 idr_destroy(&dev->mode_config.crtc_idr);
5874 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5865 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8ae13de272c4..27fbd79d0daf 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1159,11 +1159,13 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1159 drm_dp_put_port(port);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 drm_mode_connector_set_tile_property(port->connector); 1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 1164 drm_mode_connector_set_tile_property(port->connector);
1165 }
1165 (*mstb->mgr->cbs->register_connector)(port->connector); 1166 (*mstb->mgr->cbs->register_connector)(port->connector);
1166 } 1167 }
1168
1167out: 1169out:
1168 /* put reference to this port */ 1170 /* put reference to this port */
1169 drm_dp_put_port(port); 1171 drm_dp_put_port(port);
@@ -1188,8 +1190,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1188 port->ddps = conn_stat->displayport_device_plug_status; 1190 port->ddps = conn_stat->displayport_device_plug_status;
1189 1191
1190 if (old_ddps != port->ddps) { 1192 if (old_ddps != port->ddps) {
1191 dowork = true;
1192 if (port->ddps) { 1193 if (port->ddps) {
1194 dowork = true;
1193 } else { 1195 } else {
1194 port->available_pbn = 0; 1196 port->available_pbn = 0;
1195 } 1197 }
@@ -1294,13 +1296,8 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1294 if (port->input) 1296 if (port->input)
1295 continue; 1297 continue;
1296 1298
1297 if (!port->ddps) { 1299 if (!port->ddps)
1298 if (port->cached_edid) {
1299 kfree(port->cached_edid);
1300 port->cached_edid = NULL;
1301 }
1302 continue; 1300 continue;
1303 }
1304 1301
1305 if (!port->available_pbn) 1302 if (!port->available_pbn)
1306 drm_dp_send_enum_path_resources(mgr, mstb, port); 1303 drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1311,12 +1308,6 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1311 drm_dp_check_and_send_link_address(mgr, mstb_child); 1308 drm_dp_check_and_send_link_address(mgr, mstb_child);
1312 drm_dp_put_mst_branch_device(mstb_child); 1309 drm_dp_put_mst_branch_device(mstb_child);
1313 } 1310 }
1314 } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
1315 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
1316 if (!port->cached_edid) {
1317 port->cached_edid =
1318 drm_get_edid(port->connector, &port->aux.ddc);
1319 }
1320 } 1311 }
1321 } 1312 }
1322} 1313}
@@ -1336,8 +1327,6 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1336 drm_dp_check_and_send_link_address(mgr, mstb); 1327 drm_dp_check_and_send_link_address(mgr, mstb);
1337 drm_dp_put_mst_branch_device(mstb); 1328 drm_dp_put_mst_branch_device(mstb);
1338 } 1329 }
1339
1340 (*mgr->cbs->hotplug)(mgr);
1341} 1330}
1342 1331
1343static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1332static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1597,6 +1586,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1597 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1598 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1599 } 1588 }
1589 (*mgr->cbs->hotplug)(mgr);
1600 } 1590 }
1601 } else { 1591 } else {
1602 mstb->link_address_sent = false; 1592 mstb->link_address_sent = false;
@@ -2293,6 +2283,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2293 drm_dp_update_port(mstb, &msg.u.conn_stat); 2283 drm_dp_update_port(mstb, &msg.u.conn_stat);
2294 2284
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2285 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2286 (*mgr->cbs->hotplug)(mgr);
2287
2296 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2288 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2297 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2289 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2298 if (!mstb) 2290 if (!mstb)
@@ -2379,6 +2371,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2379 2371
2380 case DP_PEER_DEVICE_SST_SINK: 2372 case DP_PEER_DEVICE_SST_SINK:
2381 status = connector_status_connected; 2373 status = connector_status_connected;
2374 /* for logical ports - cache the EDID */
2375 if (port->port_num >= 8 && !port->cached_edid) {
2376 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2377 }
2382 break; 2378 break;
2383 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2379 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2384 if (port->ldps) 2380 if (port->ldps)
@@ -2433,7 +2429,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2433 2429
2434 if (port->cached_edid) 2430 if (port->cached_edid)
2435 edid = drm_edid_duplicate(port->cached_edid); 2431 edid = drm_edid_duplicate(port->cached_edid);
2436 2432 else {
2433 edid = drm_get_edid(connector, &port->aux.ddc);
2434 drm_mode_connector_set_tile_property(connector);
2435 }
2437 port->has_audio = drm_detect_monitor_audio(edid); 2436 port->has_audio = drm_detect_monitor_audio(edid);
2438 drm_dp_put_port(port); 2437 drm_dp_put_port(port);
2439 return edid; 2438 return edid;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4efa651b..1fe14579e8c9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; 224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
225 } 225 }
226 226
227 /*
228 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
229 * interval? If so then vblank irqs keep running and it will likely
230 * happen that the hardware vblank counter is not trustworthy as it
231 * might reset at some point in that interval and vblank timestamps
232 * are not trustworthy either in that interval. Iow. this can result
233 * in a bogus diff >> 1 which must be avoided as it would cause
234 * random large forward jumps of the software vblank counter.
235 */
236 if (diff > 1 && (vblank->inmodeset & 0x2)) {
237 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
238 " due to pre-modeset.\n", pipe, diff);
239 diff = 1;
240 }
241
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
227 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 285 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
228 " current=%u, diff=%u, hw=%u hw_last=%u\n", 286 " current=%u, diff=%u, hw=%u hw_last=%u\n",
229 pipe, vblank->count, diff, cur_vblank, vblank->last); 287 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1316 spin_lock_irqsave(&dev->event_lock, irqflags); 1374 spin_lock_irqsave(&dev->event_lock, irqflags);
1317 1375
1318 spin_lock(&dev->vbl_lock); 1376 spin_lock(&dev->vbl_lock);
1319 vblank_disable_and_save(dev, pipe); 1377 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1378 pipe, vblank->enabled, vblank->inmodeset);
1379
1380 /* Avoid redundant vblank disables without previous drm_vblank_on(). */
1381 if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
1382 vblank_disable_and_save(dev, pipe);
1383
1320 wake_up(&vblank->queue); 1384 wake_up(&vblank->queue);
1321 1385
1322 /* 1386 /*
@@ -1418,6 +1482,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1418 return; 1482 return;
1419 1483
1420 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1484 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1485 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1486 pipe, vblank->enabled, vblank->inmodeset);
1487
1421 /* Drop our private "prevent drm_vblank_get" refcount */ 1488 /* Drop our private "prevent drm_vblank_get" refcount */
1422 if (vblank->inmodeset) { 1489 if (vblank->inmodeset) {
1423 atomic_dec(&vblank->refcount); 1490 atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1430 * re-enable interrupts if there are users left, or the 1497 * re-enable interrupts if there are users left, or the
1431 * user wishes vblank interrupts to be enabled all the time. 1498 * user wishes vblank interrupts to be enabled all the time.
1432 */ 1499 */
1433 if (atomic_read(&vblank->refcount) != 0 || 1500 if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
1434 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1435 WARN_ON(drm_vblank_enable(dev, pipe)); 1501 WARN_ON(drm_vblank_enable(dev, pipe));
1436 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1502 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1437} 1503}
@@ -1526,6 +1592,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1526 if (vblank->inmodeset) { 1592 if (vblank->inmodeset) {
1527 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1593 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1528 dev->vblank_disable_allowed = true; 1594 dev->vblank_disable_allowed = true;
1595 drm_reset_vblank_timestamp(dev, pipe);
1529 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1596 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1530 1597
1531 if (vblank->inmodeset & 0x2) 1598 if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca941388..f17d39279596 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER 5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT 6 select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21130c7..162ab93e99cb 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
93 if (test_bit(BIT_SUSPENDED, &ctx->flags)) 93 if (test_bit(BIT_SUSPENDED, &ctx->flags))
94 return -EPERM; 94 return -EPERM;
95 95
96 if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) { 96 if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
97 val = VIDINTCON0_INTEN; 97 val = VIDINTCON0_INTEN;
98 if (ctx->out_type == IFTYPE_I80) 98 if (ctx->out_type == IFTYPE_I80)
99 val |= VIDINTCON0_FRAMEDONE; 99 val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
402 decon_enable_vblank(ctx->crtc); 402 decon_enable_vblank(ctx->crtc);
403 403
404 decon_commit(ctx->crtc); 404 decon_commit(ctx->crtc);
405
406 set_bit(BIT_SUSPENDED, &ctx->flags);
407} 405}
408 406
409static void decon_disable(struct exynos_drm_crtc *crtc) 407static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@ out:
582static int exynos5433_decon_suspend(struct device *dev) 580static int exynos5433_decon_suspend(struct device *dev)
583{ 581{
584 struct decon_context *ctx = dev_get_drvdata(dev); 582 struct decon_context *ctx = dev_get_drvdata(dev);
585 int i; 583 int i = ARRAY_SIZE(decon_clks_name);
586 584
587 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) 585 while (--i >= 0)
588 clk_disable_unprepare(ctx->clks[i]); 586 clk_disable_unprepare(ctx->clks[i]);
589 587
590 return 0; 588 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e977a81af2e6..26e81d191f56 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1782 1782
1783 bridge = of_drm_find_bridge(dsi->bridge_node); 1783 bridge = of_drm_find_bridge(dsi->bridge_node);
1784 if (bridge) { 1784 if (bridge) {
1785 encoder->bridge = bridge;
1785 drm_bridge_attach(drm_dev, bridge); 1786 drm_bridge_attach(drm_dev, bridge);
1786 } 1787 }
1787 1788
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118baa8e3e..8baabd813ff5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
50 if (vm_size > exynos_gem->size) 50 if (vm_size > exynos_gem->size)
51 return -EINVAL; 51 return -EINVAL;
52 52
53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages, 53 ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
54 exynos_gem->dma_addr, exynos_gem->size, 54 exynos_gem->dma_addr, exynos_gem->size,
55 &exynos_gem->dma_attrs); 55 &exynos_gem->dma_attrs);
56 if (ret < 0) { 56 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824f3c98..8a4f4a0211d0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@ static int fimc_probe(struct platform_device *pdev)
1723 goto err_put_clk; 1723 goto err_put_clk;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 spin_lock_init(&ctx->lock); 1728 spin_lock_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb238a6..8dfe6e113a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1166 goto err_free_event; 1166 goto err_free_event;
1167 } 1167 }
1168 1168
1169 cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd; 1169 cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1170 1170
1171 if (copy_from_user(cmdlist->data + cmdlist->last, 1171 if (copy_from_user(cmdlist->data + cmdlist->last,
1172 (void __user *)cmd, 1172 (void __user *)cmd,
@@ -1184,7 +1184,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1184 if (req->cmd_buf_nr) { 1184 if (req->cmd_buf_nr) {
1185 struct drm_exynos_g2d_cmd *cmd_buf; 1185 struct drm_exynos_g2d_cmd *cmd_buf;
1186 1186
1187 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf; 1187 cmd_buf = (struct drm_exynos_g2d_cmd *)
1188 (unsigned long)req->cmd_buf;
1188 1189
1189 if (copy_from_user(cmdlist->data + cmdlist->last, 1190 if (copy_from_user(cmdlist->data + cmdlist->last,
1190 (void __user *)cmd_buf, 1191 (void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5e3db4..26b5e4bd55b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
218 return ERR_PTR(ret); 218 return ERR_PTR(ret);
219 } 219 }
220 220
221 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
222 222
223 return exynos_gem; 223 return exynos_gem;
224} 224}
@@ -335,7 +335,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
335 if (vm_size > exynos_gem->size) 335 if (vm_size > exynos_gem->size)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages, 338 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
339 exynos_gem->dma_addr, exynos_gem->size, 339 exynos_gem->dma_addr, exynos_gem->size,
340 &exynos_gem->dma_attrs); 340 &exynos_gem->dma_attrs);
341 if (ret < 0) { 341 if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23cfa11..5d20da8f957e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev)
1723 return ret; 1723 return ret;
1724 } 1724 }
1725 1725
1726 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); 1726 DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
1727 1727
1728 mutex_init(&ctx->lock); 1728 mutex_init(&ctx->lock);
1729 platform_set_drvdata(pdev, ctx); 1729 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d24236e745..95eeb9116f10 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
208 * e.g PAUSE state, queue buf, command control. 208 * e.g PAUSE state, queue buf, command control.
209 */ 209 */
210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { 210 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv); 211 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
212 212
213 mutex_lock(&ippdrv->cmd_lock); 213 mutex_lock(&ippdrv->cmd_lock);
214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) { 214 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
388 } 388 }
389 property->prop_id = ret; 389 property->prop_id = ret;
390 390
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n", 391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
392 property->prop_id, property->cmd, (int)ippdrv); 392 property->prop_id, property->cmd, ippdrv);
393 393
394 /* stored property information and ippdrv in private data */ 394 /* stored property information and ippdrv in private data */
395 c_node->property = *property; 395 c_node->property = *property;
@@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev,
518{ 518{
519 int i; 519 int i;
520 520
521 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 521 DRM_DEBUG_KMS("node[%p]\n", m_node);
522 522
523 if (!m_node) { 523 if (!m_node) {
524 DRM_ERROR("invalid dequeue node.\n"); 524 DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node
562 m_node->buf_id = qbuf->buf_id; 562 m_node->buf_id = qbuf->buf_id;
563 INIT_LIST_HEAD(&m_node->list); 563 INIT_LIST_HEAD(&m_node->list);
564 564
565 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id); 565 DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); 566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
567 567
568 for_each_ipp_planar(i) { 568 for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@ static struct drm_exynos_ipp_mem_node
582 582
583 buf_info->handles[i] = qbuf->handle[i]; 583 buf_info->handles[i] = qbuf->handle[i];
584 buf_info->base[i] = *addr; 584 buf_info->base[i] = *addr;
585 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i, 585 DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
586 buf_info->base[i], buf_info->handles[i]); 586 &buf_info->base[i], buf_info->handles[i]);
587 } 587 }
588 } 588 }
589 589
@@ -664,7 +664,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
664 664
665 mutex_lock(&c_node->event_lock); 665 mutex_lock(&c_node->event_lock);
666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { 666 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
667 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e); 667 DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
668 668
669 /* 669 /*
670 * qbuf == NULL condition means all event deletion. 670 * qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@ static struct drm_exynos_ipp_mem_node
755 755
756 /* find memory node from memory list */ 756 /* find memory node from memory list */
757 list_for_each_entry(m_node, head, list) { 757 list_for_each_entry(m_node, head, list) {
758 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node); 758 DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
759 759
760 /* compare buffer id */ 760 /* compare buffer id */
761 if (m_node->buf_id == qbuf->buf_id) 761 if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
772 struct exynos_drm_ipp_ops *ops = NULL; 772 struct exynos_drm_ipp_ops *ops = NULL;
773 int ret = 0; 773 int ret = 0;
774 774
775 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node); 775 DRM_DEBUG_KMS("node[%p]\n", m_node);
776 776
777 if (!m_node) { 777 if (!m_node) {
778 DRM_ERROR("invalid queue node.\n"); 778 DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1237 m_node = list_first_entry(head, 1237 m_node = list_first_entry(head,
1238 struct drm_exynos_ipp_mem_node, list); 1238 struct drm_exynos_ipp_mem_node, list);
1239 1239
1240 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node); 1240 DRM_DEBUG_KMS("m_node[%p]\n", m_node);
1241 1241
1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node); 1242 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1243 if (ret) { 1243 if (ret) {
@@ -1610,8 +1610,8 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1610 } 1610 }
1611 ippdrv->prop_list.ipp_id = ret; 1611 ippdrv->prop_list.ipp_id = ret;
1612 1612
1613 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n", 1613 DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
1614 count++, (int)ippdrv, ret); 1614 count++, ippdrv, ret);
1615 1615
1616 /* store parent device for node */ 1616 /* store parent device for node */
1617 ippdrv->parent_dev = dev; 1617 ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1668 1668
1669 file_priv->ipp_dev = dev; 1669 file_priv->ipp_dev = dev;
1670 1670
1671 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev); 1671 DRM_DEBUG_KMS("done priv[%p]\n", dev);
1672 1672
1673 return 0; 1673 return 0;
1674} 1674}
@@ -1685,8 +1685,8 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1685 mutex_lock(&ippdrv->cmd_lock); 1685 mutex_lock(&ippdrv->cmd_lock);
1686 list_for_each_entry_safe(c_node, tc_node, 1686 list_for_each_entry_safe(c_node, tc_node,
1687 &ippdrv->cmd_list, list) { 1687 &ippdrv->cmd_list, list) {
1688 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", 1688 DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
1689 count++, (int)ippdrv); 1689 count++, ippdrv);
1690 1690
1691 if (c_node->filp == file) { 1691 if (c_node->filp == file) {
1692 /* 1692 /*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36aec5a..9869d70e9e54 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_graph.h> 19#include <linux/of_graph.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h>
21#include <drm/drmP.h> 22#include <drm/drmP.h>
22#include <linux/mfd/syscon.h> 23#include <linux/mfd/syscon.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
@@ -306,9 +307,9 @@ exit:
306 return ret; 307 return ret;
307} 308}
308 309
309void mic_disable(struct drm_bridge *bridge) { } 310static void mic_disable(struct drm_bridge *bridge) { }
310 311
311void mic_post_disable(struct drm_bridge *bridge) 312static void mic_post_disable(struct drm_bridge *bridge)
312{ 313{
313 struct exynos_mic *mic = bridge->driver_private; 314 struct exynos_mic *mic = bridge->driver_private;
314 int i; 315 int i;
@@ -328,7 +329,7 @@ already_disabled:
328 mutex_unlock(&mic_mutex); 329 mutex_unlock(&mic_mutex);
329} 330}
330 331
331void mic_pre_enable(struct drm_bridge *bridge) 332static void mic_pre_enable(struct drm_bridge *bridge)
332{ 333{
333 struct exynos_mic *mic = bridge->driver_private; 334 struct exynos_mic *mic = bridge->driver_private;
334 int ret, i; 335 int ret, i;
@@ -371,11 +372,35 @@ already_enabled:
371 mutex_unlock(&mic_mutex); 372 mutex_unlock(&mic_mutex);
372} 373}
373 374
374void mic_enable(struct drm_bridge *bridge) { } 375static void mic_enable(struct drm_bridge *bridge) { }
375 376
376void mic_destroy(struct drm_bridge *bridge) 377static const struct drm_bridge_funcs mic_bridge_funcs = {
378 .disable = mic_disable,
379 .post_disable = mic_post_disable,
380 .pre_enable = mic_pre_enable,
381 .enable = mic_enable,
382};
383
384static int exynos_mic_bind(struct device *dev, struct device *master,
385 void *data)
377{ 386{
378 struct exynos_mic *mic = bridge->driver_private; 387 struct exynos_mic *mic = dev_get_drvdata(dev);
388 int ret;
389
390 mic->bridge.funcs = &mic_bridge_funcs;
391 mic->bridge.of_node = dev->of_node;
392 mic->bridge.driver_private = mic;
393 ret = drm_bridge_add(&mic->bridge);
394 if (ret)
395 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
396
397 return ret;
398}
399
400static void exynos_mic_unbind(struct device *dev, struct device *master,
401 void *data)
402{
403 struct exynos_mic *mic = dev_get_drvdata(dev);
379 int i; 404 int i;
380 405
381 mutex_lock(&mic_mutex); 406 mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@ void mic_destroy(struct drm_bridge *bridge)
387 412
388already_disabled: 413already_disabled:
389 mutex_unlock(&mic_mutex); 414 mutex_unlock(&mic_mutex);
415
416 drm_bridge_remove(&mic->bridge);
390} 417}
391 418
392static const struct drm_bridge_funcs mic_bridge_funcs = { 419static const struct component_ops exynos_mic_component_ops = {
393 .disable = mic_disable, 420 .bind = exynos_mic_bind,
394 .post_disable = mic_post_disable, 421 .unbind = exynos_mic_unbind,
395 .pre_enable = mic_pre_enable,
396 .enable = mic_enable,
397}; 422};
398 423
399int exynos_mic_probe(struct platform_device *pdev) 424static int exynos_mic_probe(struct platform_device *pdev)
400{ 425{
401 struct device *dev = &pdev->dev; 426 struct device *dev = &pdev->dev;
402 struct exynos_mic *mic; 427 struct exynos_mic *mic;
@@ -435,17 +460,8 @@ int exynos_mic_probe(struct platform_device *pdev)
435 goto err; 460 goto err;
436 } 461 }
437 462
438 mic->bridge.funcs = &mic_bridge_funcs;
439 mic->bridge.of_node = dev->of_node;
440 mic->bridge.driver_private = mic;
441 ret = drm_bridge_add(&mic->bridge);
442 if (ret) {
443 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
444 goto err;
445 }
446
447 for (i = 0; i < NUM_CLKS; i++) { 463 for (i = 0; i < NUM_CLKS; i++) {
448 mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]); 464 mic->clks[i] = devm_clk_get(dev, clk_names[i]);
449 if (IS_ERR(mic->clks[i])) { 465 if (IS_ERR(mic->clks[i])) {
450 DRM_ERROR("mic: Failed to get clock (%s)\n", 466 DRM_ERROR("mic: Failed to get clock (%s)\n",
451 clk_names[i]); 467 clk_names[i]);
@@ -454,7 +470,10 @@ int exynos_mic_probe(struct platform_device *pdev)
454 } 470 }
455 } 471 }
456 472
473 platform_set_drvdata(pdev, mic);
474
457 DRM_DEBUG_KMS("MIC has been probed\n"); 475 DRM_DEBUG_KMS("MIC has been probed\n");
476 return component_add(dev, &exynos_mic_component_ops);
458 477
459err: 478err:
460 return ret; 479 return ret;
@@ -462,14 +481,7 @@ err:
462 481
463static int exynos_mic_remove(struct platform_device *pdev) 482static int exynos_mic_remove(struct platform_device *pdev)
464{ 483{
465 struct exynos_mic *mic = platform_get_drvdata(pdev); 484 component_del(&pdev->dev, &exynos_mic_component_ops);
466 int i;
467
468 drm_bridge_remove(&mic->bridge);
469
470 for (i = NUM_CLKS - 1; i > -1; i--)
471 clk_put(mic->clks[i]);
472
473 return 0; 485 return 0;
474} 486}
475 487
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f7826d30..ce59f4443394 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@ static int rotator_probe(struct platform_device *pdev)
754 goto err_ippdrv_register; 754 goto err_ippdrv_register;
755 } 755 }
756 756
757 DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv); 757 DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
758 758
759 platform_set_drvdata(pdev, rot); 759 platform_set_drvdata(pdev, rot);
760 760
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5fa51d..b605bd7395ec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
223 } 223 }
224} 224}
225 225
226static int vidi_show_connection(struct device *dev, 226static ssize_t vidi_show_connection(struct device *dev,
227 struct device_attribute *attr, char *buf) 227 struct device_attribute *attr, char *buf)
228{ 228{
229 struct vidi_context *ctx = dev_get_drvdata(dev); 229 struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@ static int vidi_show_connection(struct device *dev,
238 return rc; 238 return rc;
239} 239}
240 240
241static int vidi_store_connection(struct device *dev, 241static ssize_t vidi_store_connection(struct device *dev,
242 struct device_attribute *attr, 242 struct device_attribute *attr,
243 const char *buf, size_t len) 243 const char *buf, size_t len)
244{ 244{
@@ -294,7 +294,9 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
294 } 294 }
295 295
296 if (vidi->connection) { 296 if (vidi->connection) {
297 struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid; 297 struct edid *raw_edid;
298
299 raw_edid = (struct edid *)(unsigned long)vidi->edid;
298 if (!drm_edid_is_valid(raw_edid)) { 300 if (!drm_edid_is_valid(raw_edid)) {
299 DRM_DEBUG_KMS("edid data is invalid.\n"); 301 DRM_DEBUG_KMS("edid data is invalid.\n");
300 return -EINVAL; 302 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb7276c..cf39ed3133d6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
825 } 825 }
826 826
827 for_each_pipe(dev_priv, pipe) { 827 for_each_pipe(dev_priv, pipe) {
828 if (!intel_display_power_is_enabled(dev_priv, 828 enum intel_display_power_domain power_domain;
829 POWER_DOMAIN_PIPE(pipe))) { 829
830 power_domain = POWER_DOMAIN_PIPE(pipe);
831 if (!intel_display_power_get_if_enabled(dev_priv,
832 power_domain)) {
830 seq_printf(m, "Pipe %c power disabled\n", 833 seq_printf(m, "Pipe %c power disabled\n",
831 pipe_name(pipe)); 834 pipe_name(pipe));
832 continue; 835 continue;
@@ -840,6 +843,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
840 seq_printf(m, "Pipe %c IER:\t%08x\n", 843 seq_printf(m, "Pipe %c IER:\t%08x\n",
841 pipe_name(pipe), 844 pipe_name(pipe),
842 I915_READ(GEN8_DE_PIPE_IER(pipe))); 845 I915_READ(GEN8_DE_PIPE_IER(pipe)));
846
847 intel_display_power_put(dev_priv, power_domain);
843 } 848 }
844 849
845 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 850 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3985 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3990 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3986 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3991 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3987 pipe)); 3992 pipe));
3993 enum intel_display_power_domain power_domain;
3988 u32 val = 0; /* shut up gcc */ 3994 u32 val = 0; /* shut up gcc */
3989 int ret; 3995 int ret;
3990 3996
@@ -3995,7 +4001,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3995 if (pipe_crc->source && source) 4001 if (pipe_crc->source && source)
3996 return -EINVAL; 4002 return -EINVAL;
3997 4003
3998 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 4004 power_domain = POWER_DOMAIN_PIPE(pipe);
4005 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
3999 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 4006 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4000 return -EIO; 4007 return -EIO;
4001 } 4008 }
@@ -4012,7 +4019,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4012 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); 4019 ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4013 4020
4014 if (ret != 0) 4021 if (ret != 0)
4015 return ret; 4022 goto out;
4016 4023
4017 /* none -> real source transition */ 4024 /* none -> real source transition */
4018 if (source) { 4025 if (source) {
@@ -4024,8 +4031,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4024 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, 4031 entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4025 sizeof(pipe_crc->entries[0]), 4032 sizeof(pipe_crc->entries[0]),
4026 GFP_KERNEL); 4033 GFP_KERNEL);
4027 if (!entries) 4034 if (!entries) {
4028 return -ENOMEM; 4035 ret = -ENOMEM;
4036 goto out;
4037 }
4029 4038
4030 /* 4039 /*
4031 * When IPS gets enabled, the pipe CRC changes. Since IPS gets 4040 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4081 hsw_enable_ips(crtc); 4090 hsw_enable_ips(crtc);
4082 } 4091 }
4083 4092
4084 return 0; 4093 ret = 0;
4094
4095out:
4096 intel_display_power_put(dev_priv, power_domain);
4097
4098 return ret;
4085} 4099}
4086 4100
4087/* 4101/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f0f75d7c0d94..b0847b915545 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@ struct intel_csr {
751 uint32_t mmio_count; 751 uint32_t mmio_count;
752 i915_reg_t mmioaddr[8]; 752 i915_reg_t mmioaddr[8];
753 uint32_t mmiodata[8]; 753 uint32_t mmiodata[8];
754 uint32_t dc_state;
754}; 755};
755 756
756#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 757#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -1988,6 +1989,9 @@ enum hdmi_force_audio {
1988#define I915_GTT_OFFSET_NONE ((u32)-1) 1989#define I915_GTT_OFFSET_NONE ((u32)-1)
1989 1990
1990struct drm_i915_gem_object_ops { 1991struct drm_i915_gem_object_ops {
1992 unsigned int flags;
1993#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
1994
1991 /* Interface between the GEM object and its backing storage. 1995 /* Interface between the GEM object and its backing storage.
1992 * get_pages() is called once prior to the use of the associated set 1996 * get_pages() is called once prior to the use of the associated set
1993 * of pages before to binding them into the GTT, and put_pages() is 1997 * of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2007,7 @@ struct drm_i915_gem_object_ops {
2003 */ 2007 */
2004 int (*get_pages)(struct drm_i915_gem_object *); 2008 int (*get_pages)(struct drm_i915_gem_object *);
2005 void (*put_pages)(struct drm_i915_gem_object *); 2009 void (*put_pages)(struct drm_i915_gem_object *);
2010
2006 int (*dmabuf_export)(struct drm_i915_gem_object *); 2011 int (*dmabuf_export)(struct drm_i915_gem_object *);
2007 void (*release)(struct drm_i915_gem_object *); 2012 void (*release)(struct drm_i915_gem_object *);
2008}; 2013};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ddc21d4b388d..bb44bad15403 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4425,6 +4425,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4425} 4425}
4426 4426
4427static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4427static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4428 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4428 .get_pages = i915_gem_object_get_pages_gtt, 4429 .get_pages = i915_gem_object_get_pages_gtt,
4429 .put_pages = i915_gem_object_put_pages_gtt, 4430 .put_pages = i915_gem_object_put_pages_gtt,
4430}; 4431};
@@ -5261,7 +5262,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5261 struct page *page; 5262 struct page *page;
5262 5263
5263 /* Only default objects have per-page dirty tracking */ 5264 /* Only default objects have per-page dirty tracking */
5264 if (WARN_ON(obj->ops != &i915_gem_object_ops)) 5265 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
5265 return NULL; 5266 return NULL;
5266 5267
5267 page = i915_gem_object_get_page(obj, n); 5268 page = i915_gem_object_get_page(obj, n);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 19fb0bddc1cd..59e45b3a6937 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -789,9 +789,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
789} 789}
790 790
791static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 791static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
792 .dmabuf_export = i915_gem_userptr_dmabuf_export, 792 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
793 .get_pages = i915_gem_userptr_get_pages, 793 .get_pages = i915_gem_userptr_get_pages,
794 .put_pages = i915_gem_userptr_put_pages, 794 .put_pages = i915_gem_userptr_put_pages,
795 .dmabuf_export = i915_gem_userptr_dmabuf_export,
795 .release = i915_gem_userptr_release, 796 .release = i915_gem_userptr_release,
796}; 797};
797 798
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 007ae83a4086..4897728713f6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@ enum skl_disp_power_wells {
3287 3287
3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) 3288#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
3289/* 3289/*
3290 * HDMI/DP bits are gen4+ 3290 * HDMI/DP bits are g4x+
3291 * 3291 *
3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 3292 * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
3293 * Please check the detailed lore in the commit message for for experimental 3293 * Please check the detailed lore in the commit message for for experimental
3294 * evidence. 3294 * evidence.
3295 */ 3295 */
3296#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) 3296/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
3297#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
3298#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
3299#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
3300/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
3301#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
3297#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 3302#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
3298#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 3303#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
3299/* VLV DP/HDMI bits again match Bspec */
3300#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
3301#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
3302#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
3303#define PORTD_HOTPLUG_INT_STATUS (3 << 21) 3304#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
3304#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) 3305#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
3305#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) 3306#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7514,7 +7515,7 @@ enum skl_disp_power_wells {
7514#define DPLL_CFGCR2_PDIV_7 (4<<2) 7515#define DPLL_CFGCR2_PDIV_7 (4<<2)
7515#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7516#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
7516 7517
7517#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2) 7518#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
7518#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) 7519#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
7519 7520
7520/* BXT display engine PLL */ 7521/* BXT display engine PLL */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a2aa09ce3202..a8af594fbd00 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
49 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 49 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
50 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 50 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
51 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 51 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
52 } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 52 } else if (INTEL_INFO(dev)->gen <= 4) {
53 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 53 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
54 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 54 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
55 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 55 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
84 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 84 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
85 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 85 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
86 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 86 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
87 } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 87 } else if (INTEL_INFO(dev)->gen <= 4) {
88 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 88 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
89 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 89 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
90 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 90 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1af036..a7b4a524fadd 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
74 bool ret;
74 75
75 power_domain = intel_display_port_power_domain(encoder); 76 power_domain = intel_display_port_power_domain(encoder);
76 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 77 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
77 return false; 78 return false;
78 79
80 ret = false;
81
79 tmp = I915_READ(crt->adpa_reg); 82 tmp = I915_READ(crt->adpa_reg);
80 83
81 if (!(tmp & ADPA_DAC_ENABLE)) 84 if (!(tmp & ADPA_DAC_ENABLE))
82 return false; 85 goto out;
83 86
84 if (HAS_PCH_CPT(dev)) 87 if (HAS_PCH_CPT(dev))
85 *pipe = PORT_TO_PIPE_CPT(tmp); 88 *pipe = PORT_TO_PIPE_CPT(tmp);
86 else 89 else
87 *pipe = PORT_TO_PIPE(tmp); 90 *pipe = PORT_TO_PIPE(tmp);
88 91
89 return true; 92 ret = true;
93out:
94 intel_display_power_put(dev_priv, power_domain);
95
96 return ret;
90} 97}
91 98
92static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a85997a..647d85e77c2f 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
240 I915_WRITE(dev_priv->csr.mmioaddr[i], 240 I915_WRITE(dev_priv->csr.mmioaddr[i],
241 dev_priv->csr.mmiodata[i]); 241 dev_priv->csr.mmiodata[i]);
242 } 242 }
243
244 dev_priv->csr.dc_state = 0;
243} 245}
244 246
245static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, 247static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6408e5583d7..0f3df2c39f7c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1589,7 +1589,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1589 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1589 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1590 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1590 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1591 wrpll_params.central_freq; 1591 wrpll_params.central_freq;
1592 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 1592 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
1593 intel_encoder->type == INTEL_OUTPUT_DP_MST) {
1593 switch (crtc_state->port_clock / 2) { 1594 switch (crtc_state->port_clock / 2) {
1594 case 81000: 1595 case 81000:
1595 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); 1596 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
@@ -1968,13 +1969,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1968 enum transcoder cpu_transcoder; 1969 enum transcoder cpu_transcoder;
1969 enum intel_display_power_domain power_domain; 1970 enum intel_display_power_domain power_domain;
1970 uint32_t tmp; 1971 uint32_t tmp;
1972 bool ret;
1971 1973
1972 power_domain = intel_display_port_power_domain(intel_encoder); 1974 power_domain = intel_display_port_power_domain(intel_encoder);
1973 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 1975 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
1974 return false; 1976 return false;
1975 1977
1976 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1978 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
1977 return false; 1979 ret = false;
1980 goto out;
1981 }
1978 1982
1979 if (port == PORT_A) 1983 if (port == PORT_A)
1980 cpu_transcoder = TRANSCODER_EDP; 1984 cpu_transcoder = TRANSCODER_EDP;
@@ -1986,23 +1990,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1986 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { 1990 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
1987 case TRANS_DDI_MODE_SELECT_HDMI: 1991 case TRANS_DDI_MODE_SELECT_HDMI:
1988 case TRANS_DDI_MODE_SELECT_DVI: 1992 case TRANS_DDI_MODE_SELECT_DVI:
1989 return (type == DRM_MODE_CONNECTOR_HDMIA); 1993 ret = type == DRM_MODE_CONNECTOR_HDMIA;
1994 break;
1990 1995
1991 case TRANS_DDI_MODE_SELECT_DP_SST: 1996 case TRANS_DDI_MODE_SELECT_DP_SST:
1992 if (type == DRM_MODE_CONNECTOR_eDP) 1997 ret = type == DRM_MODE_CONNECTOR_eDP ||
1993 return true; 1998 type == DRM_MODE_CONNECTOR_DisplayPort;
1994 return (type == DRM_MODE_CONNECTOR_DisplayPort); 1999 break;
2000
1995 case TRANS_DDI_MODE_SELECT_DP_MST: 2001 case TRANS_DDI_MODE_SELECT_DP_MST:
1996 /* if the transcoder is in MST state then 2002 /* if the transcoder is in MST state then
1997 * connector isn't connected */ 2003 * connector isn't connected */
1998 return false; 2004 ret = false;
2005 break;
1999 2006
2000 case TRANS_DDI_MODE_SELECT_FDI: 2007 case TRANS_DDI_MODE_SELECT_FDI:
2001 return (type == DRM_MODE_CONNECTOR_VGA); 2008 ret = type == DRM_MODE_CONNECTOR_VGA;
2009 break;
2002 2010
2003 default: 2011 default:
2004 return false; 2012 ret = false;
2013 break;
2005 } 2014 }
2015
2016out:
2017 intel_display_power_put(dev_priv, power_domain);
2018
2019 return ret;
2006} 2020}
2007 2021
2008bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 2022bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2014,15 +2028,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2014 enum intel_display_power_domain power_domain; 2028 enum intel_display_power_domain power_domain;
2015 u32 tmp; 2029 u32 tmp;
2016 int i; 2030 int i;
2031 bool ret;
2017 2032
2018 power_domain = intel_display_port_power_domain(encoder); 2033 power_domain = intel_display_port_power_domain(encoder);
2019 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2034 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2020 return false; 2035 return false;
2021 2036
2037 ret = false;
2038
2022 tmp = I915_READ(DDI_BUF_CTL(port)); 2039 tmp = I915_READ(DDI_BUF_CTL(port));
2023 2040
2024 if (!(tmp & DDI_BUF_CTL_ENABLE)) 2041 if (!(tmp & DDI_BUF_CTL_ENABLE))
2025 return false; 2042 goto out;
2026 2043
2027 if (port == PORT_A) { 2044 if (port == PORT_A) {
2028 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 2045 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2040,25 +2057,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
2040 break; 2057 break;
2041 } 2058 }
2042 2059
2043 return true; 2060 ret = true;
2044 } else {
2045 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2047 2061
2048 if ((tmp & TRANS_DDI_PORT_MASK) 2062 goto out;
2049 == TRANS_DDI_SELECT_PORT(port)) { 2063 }
2050 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
2051 return false;
2052 2064
2053 *pipe = i; 2065 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
2054 return true; 2066 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
2055 } 2067
2068 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
2069 if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
2070 TRANS_DDI_MODE_SELECT_DP_MST)
2071 goto out;
2072
2073 *pipe = i;
2074 ret = true;
2075
2076 goto out;
2056 } 2077 }
2057 } 2078 }
2058 2079
2059 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 2080 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
2060 2081
2061 return false; 2082out:
2083 intel_display_power_put(dev_priv, power_domain);
2084
2085 return ret;
2062} 2086}
2063 2087
2064void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2088void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2507,12 +2531,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
2507{ 2531{
2508 uint32_t val; 2532 uint32_t val;
2509 2533
2510 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2534 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2511 return false; 2535 return false;
2512 2536
2513 val = I915_READ(WRPLL_CTL(pll->id)); 2537 val = I915_READ(WRPLL_CTL(pll->id));
2514 hw_state->wrpll = val; 2538 hw_state->wrpll = val;
2515 2539
2540 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2541
2516 return val & WRPLL_PLL_ENABLE; 2542 return val & WRPLL_PLL_ENABLE;
2517} 2543}
2518 2544
@@ -2522,12 +2548,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
2522{ 2548{
2523 uint32_t val; 2549 uint32_t val;
2524 2550
2525 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2551 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2526 return false; 2552 return false;
2527 2553
2528 val = I915_READ(SPLL_CTL); 2554 val = I915_READ(SPLL_CTL);
2529 hw_state->spll = val; 2555 hw_state->spll = val;
2530 2556
2557 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2558
2531 return val & SPLL_PLL_ENABLE; 2559 return val & SPLL_PLL_ENABLE;
2532} 2560}
2533 2561
@@ -2644,16 +2672,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2644 uint32_t val; 2672 uint32_t val;
2645 unsigned int dpll; 2673 unsigned int dpll;
2646 const struct skl_dpll_regs *regs = skl_dpll_regs; 2674 const struct skl_dpll_regs *regs = skl_dpll_regs;
2675 bool ret;
2647 2676
2648 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2677 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2649 return false; 2678 return false;
2650 2679
2680 ret = false;
2681
2651 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ 2682 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
2652 dpll = pll->id + 1; 2683 dpll = pll->id + 1;
2653 2684
2654 val = I915_READ(regs[pll->id].ctl); 2685 val = I915_READ(regs[pll->id].ctl);
2655 if (!(val & LCPLL_PLL_ENABLE)) 2686 if (!(val & LCPLL_PLL_ENABLE))
2656 return false; 2687 goto out;
2657 2688
2658 val = I915_READ(DPLL_CTRL1); 2689 val = I915_READ(DPLL_CTRL1);
2659 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; 2690 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2663,8 +2694,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2663 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); 2694 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
2664 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); 2695 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
2665 } 2696 }
2697 ret = true;
2666 2698
2667 return true; 2699out:
2700 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
2701
2702 return ret;
2668} 2703}
2669 2704
2670static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) 2705static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2931,13 +2966,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2931{ 2966{
2932 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ 2967 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
2933 uint32_t val; 2968 uint32_t val;
2969 bool ret;
2934 2970
2935 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 2971 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
2936 return false; 2972 return false;
2937 2973
2974 ret = false;
2975
2938 val = I915_READ(BXT_PORT_PLL_ENABLE(port)); 2976 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
2939 if (!(val & PORT_PLL_ENABLE)) 2977 if (!(val & PORT_PLL_ENABLE))
2940 return false; 2978 goto out;
2941 2979
2942 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); 2980 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
2943 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; 2981 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2984,7 +3022,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2984 I915_READ(BXT_PORT_PCS_DW12_LN23(port))); 3022 I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
2985 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; 3023 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2986 3024
2987 return true; 3025 ret = true;
3026
3027out:
3028 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
3029
3030 return ret;
2988} 3031}
2989 3032
2990static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) 3033static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3119,11 +3162,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
3119{ 3162{
3120 u32 temp; 3163 u32 temp;
3121 3164
3122 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 3165 if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
3123 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 3166 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
3167
3168 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
3169
3124 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) 3170 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
3125 return true; 3171 return true;
3126 } 3172 }
3173
3127 return false; 3174 return false;
3128} 3175}
3129 3176
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5feb65725c04..46947fffd599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1351 bool cur_state; 1351 bool cur_state;
1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353 pipe); 1353 pipe);
1354 enum intel_display_power_domain power_domain;
1354 1355
1355 /* if we need the pipe quirk it must be always on */ 1356 /* if we need the pipe quirk it must be always on */
1356 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1357 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1357 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1358 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1358 state = true; 1359 state = true;
1359 1360
1360 if (!intel_display_power_is_enabled(dev_priv, 1361 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1361 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1362 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1362 cur_state = false;
1363 } else {
1364 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1363 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1365 cur_state = !!(val & PIPECONF_ENABLE); 1364 cur_state = !!(val & PIPECONF_ENABLE);
1365
1366 intel_display_power_put(dev_priv, power_domain);
1367 } else {
1368 cur_state = false;
1366 } 1369 }
1367 1370
1368 I915_STATE_WARN(cur_state != state, 1371 I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8171{ 8174{
8172 struct drm_device *dev = crtc->base.dev; 8175 struct drm_device *dev = crtc->base.dev;
8173 struct drm_i915_private *dev_priv = dev->dev_private; 8176 struct drm_i915_private *dev_priv = dev->dev_private;
8177 enum intel_display_power_domain power_domain;
8174 uint32_t tmp; 8178 uint32_t tmp;
8179 bool ret;
8175 8180
8176 if (!intel_display_power_is_enabled(dev_priv, 8181 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8177 POWER_DOMAIN_PIPE(crtc->pipe))) 8182 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8178 return false; 8183 return false;
8179 8184
8180 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8185 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8181 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8186 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8182 8187
8188 ret = false;
8189
8183 tmp = I915_READ(PIPECONF(crtc->pipe)); 8190 tmp = I915_READ(PIPECONF(crtc->pipe));
8184 if (!(tmp & PIPECONF_ENABLE)) 8191 if (!(tmp & PIPECONF_ENABLE))
8185 return false; 8192 goto out;
8186 8193
8187 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8194 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8188 switch (tmp & PIPECONF_BPC_MASK) { 8195 switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8262 pipe_config->base.adjusted_mode.crtc_clock = 8269 pipe_config->base.adjusted_mode.crtc_clock =
8263 pipe_config->port_clock / pipe_config->pixel_multiplier; 8270 pipe_config->port_clock / pipe_config->pixel_multiplier;
8264 8271
8265 return true; 8272 ret = true;
8273
8274out:
8275 intel_display_power_put(dev_priv, power_domain);
8276
8277 return ret;
8266} 8278}
8267 8279
8268static void ironlake_init_pch_refclk(struct drm_device *dev) 8280static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9366{ 9378{
9367 struct drm_device *dev = crtc->base.dev; 9379 struct drm_device *dev = crtc->base.dev;
9368 struct drm_i915_private *dev_priv = dev->dev_private; 9380 struct drm_i915_private *dev_priv = dev->dev_private;
9381 enum intel_display_power_domain power_domain;
9369 uint32_t tmp; 9382 uint32_t tmp;
9383 bool ret;
9370 9384
9371 if (!intel_display_power_is_enabled(dev_priv, 9385 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9372 POWER_DOMAIN_PIPE(crtc->pipe))) 9386 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9373 return false; 9387 return false;
9374 9388
9375 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9389 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9376 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9390 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9377 9391
9392 ret = false;
9378 tmp = I915_READ(PIPECONF(crtc->pipe)); 9393 tmp = I915_READ(PIPECONF(crtc->pipe));
9379 if (!(tmp & PIPECONF_ENABLE)) 9394 if (!(tmp & PIPECONF_ENABLE))
9380 return false; 9395 goto out;
9381 9396
9382 switch (tmp & PIPECONF_BPC_MASK) { 9397 switch (tmp & PIPECONF_BPC_MASK) {
9383 case PIPECONF_6BPC: 9398 case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9440 9455
9441 ironlake_get_pfit_config(crtc, pipe_config); 9456 ironlake_get_pfit_config(crtc, pipe_config);
9442 9457
9443 return true; 9458 ret = true;
9459
9460out:
9461 intel_display_power_put(dev_priv, power_domain);
9462
9463 return ret;
9444} 9464}
9445 9465
9446static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9466static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9950{ 9970{
9951 struct drm_device *dev = crtc->base.dev; 9971 struct drm_device *dev = crtc->base.dev;
9952 struct drm_i915_private *dev_priv = dev->dev_private; 9972 struct drm_i915_private *dev_priv = dev->dev_private;
9953 enum intel_display_power_domain pfit_domain; 9973 enum intel_display_power_domain power_domain;
9974 unsigned long power_domain_mask;
9954 uint32_t tmp; 9975 uint32_t tmp;
9976 bool ret;
9955 9977
9956 if (!intel_display_power_is_enabled(dev_priv, 9978 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9957 POWER_DOMAIN_PIPE(crtc->pipe))) 9979 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9958 return false; 9980 return false;
9981 power_domain_mask = BIT(power_domain);
9982
9983 ret = false;
9959 9984
9960 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9961 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9986 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9982 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10007 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9983 } 10008 }
9984 10009
9985 if (!intel_display_power_is_enabled(dev_priv, 10010 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9986 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 10011 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9987 return false; 10012 goto out;
10013 power_domain_mask |= BIT(power_domain);
9988 10014
9989 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10015 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9990 if (!(tmp & PIPECONF_ENABLE)) 10016 if (!(tmp & PIPECONF_ENABLE))
9991 return false; 10017 goto out;
9992 10018
9993 haswell_get_ddi_port_state(crtc, pipe_config); 10019 haswell_get_ddi_port_state(crtc, pipe_config);
9994 10020
@@ -9998,14 +10024,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9998 skl_init_scalers(dev, crtc, pipe_config); 10024 skl_init_scalers(dev, crtc, pipe_config);
9999 } 10025 }
10000 10026
10001 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10002
10003 if (INTEL_INFO(dev)->gen >= 9) { 10027 if (INTEL_INFO(dev)->gen >= 9) {
10004 pipe_config->scaler_state.scaler_id = -1; 10028 pipe_config->scaler_state.scaler_id = -1;
10005 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10029 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10006 } 10030 }
10007 10031
10008 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 10032 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10033 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10034 power_domain_mask |= BIT(power_domain);
10009 if (INTEL_INFO(dev)->gen >= 9) 10035 if (INTEL_INFO(dev)->gen >= 9)
10010 skylake_get_pfit_config(crtc, pipe_config); 10036 skylake_get_pfit_config(crtc, pipe_config);
10011 else 10037 else
@@ -10023,7 +10049,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10023 pipe_config->pixel_multiplier = 1; 10049 pipe_config->pixel_multiplier = 1;
10024 } 10050 }
10025 10051
10026 return true; 10052 ret = true;
10053
10054out:
10055 for_each_power_domain(power_domain, power_domain_mask)
10056 intel_display_power_put(dev_priv, power_domain);
10057
10058 return ret;
10027} 10059}
10028 10060
10029static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 10061static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13630{ 13662{
13631 uint32_t val; 13663 uint32_t val;
13632 13664
13633 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13665 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13634 return false; 13666 return false;
13635 13667
13636 val = I915_READ(PCH_DPLL(pll->id)); 13668 val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13638 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13670 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13639 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13671 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13640 13672
13673 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13674
13641 return val & DPLL_VCO_ENABLE; 13675 return val & DPLL_VCO_ENABLE;
13642} 13676}
13643 13677
@@ -15568,10 +15602,12 @@ void i915_redisable_vga(struct drm_device *dev)
15568 * level, just check if the power well is enabled instead of trying to 15602 * level, just check if the power well is enabled instead of trying to
15569 * follow the "don't touch the power well if we don't need it" policy 15603 * follow the "don't touch the power well if we don't need it" policy
15570 * the rest of the driver uses. */ 15604 * the rest of the driver uses. */
15571 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 15605 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15572 return; 15606 return;
15573 15607
15574 i915_redisable_vga_power_on(dev); 15608 i915_redisable_vga_power_on(dev);
15609
15610 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15575} 15611}
15576 15612
15577static bool primary_get_hw_state(struct intel_plane *plane) 15613static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d313cb9..1d8de43bed56 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2362 struct drm_i915_private *dev_priv = dev->dev_private; 2362 struct drm_i915_private *dev_priv = dev->dev_private;
2363 enum intel_display_power_domain power_domain; 2363 enum intel_display_power_domain power_domain;
2364 u32 tmp; 2364 u32 tmp;
2365 bool ret;
2365 2366
2366 power_domain = intel_display_port_power_domain(encoder); 2367 power_domain = intel_display_port_power_domain(encoder);
2367 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 2368 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2368 return false; 2369 return false;
2369 2370
2371 ret = false;
2372
2370 tmp = I915_READ(intel_dp->output_reg); 2373 tmp = I915_READ(intel_dp->output_reg);
2371 2374
2372 if (!(tmp & DP_PORT_EN)) 2375 if (!(tmp & DP_PORT_EN))
2373 return false; 2376 goto out;
2374 2377
2375 if (IS_GEN7(dev) && port == PORT_A) { 2378 if (IS_GEN7(dev) && port == PORT_A) {
2376 *pipe = PORT_TO_PIPE_CPT(tmp); 2379 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2381 u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); 2384 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2382 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { 2385 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2383 *pipe = p; 2386 *pipe = p;
2384 return true; 2387 ret = true;
2388
2389 goto out;
2385 } 2390 }
2386 } 2391 }
2387 2392
@@ -2393,7 +2398,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2393 *pipe = PORT_TO_PIPE(tmp); 2398 *pipe = PORT_TO_PIPE(tmp);
2394 } 2399 }
2395 2400
2396 return true; 2401 ret = true;
2402
2403out:
2404 intel_display_power_put(dev_priv, power_domain);
2405
2406 return ret;
2397} 2407}
2398 2408
2399static void intel_dp_get_config(struct intel_encoder *encoder, 2409static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4493,20 +4503,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4493 return I915_READ(PORT_HOTPLUG_STAT) & bit; 4503 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4494} 4504}
4495 4505
4496static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, 4506static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4497 struct intel_digital_port *port) 4507 struct intel_digital_port *port)
4498{ 4508{
4499 u32 bit; 4509 u32 bit;
4500 4510
4501 switch (port->port) { 4511 switch (port->port) {
4502 case PORT_B: 4512 case PORT_B:
4503 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 4513 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4504 break; 4514 break;
4505 case PORT_C: 4515 case PORT_C:
4506 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 4516 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4507 break; 4517 break;
4508 case PORT_D: 4518 case PORT_D:
4509 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 4519 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4510 break; 4520 break;
4511 default: 4521 default:
4512 MISSING_CASE(port->port); 4522 MISSING_CASE(port->port);
@@ -4558,8 +4568,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4558 return cpt_digital_port_connected(dev_priv, port); 4568 return cpt_digital_port_connected(dev_priv, port);
4559 else if (IS_BROXTON(dev_priv)) 4569 else if (IS_BROXTON(dev_priv))
4560 return bxt_digital_port_connected(dev_priv, port); 4570 return bxt_digital_port_connected(dev_priv, port);
4561 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4571 else if (IS_GM45(dev_priv))
4562 return vlv_digital_port_connected(dev_priv, port); 4572 return gm45_digital_port_connected(dev_priv, port);
4563 else 4573 else
4564 return g4x_digital_port_connected(dev_priv, port); 4574 return g4x_digital_port_connected(dev_priv, port);
4565} 4575}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 88887938e0bf..0b8eefc2acc5 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
215 } 215 }
216} 216}
217 217
218static void 218/*
219intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) 219 * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
220 * or 1.2 devices that support it, Training Pattern 2 otherwise.
221 */
222static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
220{ 223{
221 bool channel_eq = false; 224 u32 training_pattern = DP_TRAINING_PATTERN_2;
222 int tries, cr_tries; 225 bool source_tps3, sink_tps3;
223 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
224 226
225 /* 227 /*
226 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
227 *
228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is 228 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
229 * also mandatory for downstream devices that support HBR2. 229 * also mandatory for downstream devices that support HBR2. However, not
230 * all sinks follow the spec.
230 * 231 *
231 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is 232 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
232 * supported but still not enabled. 233 * supported in source but still not enabled.
233 */ 234 */
234 if (intel_dp_source_supports_hbr2(intel_dp) && 235 source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
235 drm_dp_tps3_supported(intel_dp->dpcd)) 236 sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
237
238 if (source_tps3 && sink_tps3) {
236 training_pattern = DP_TRAINING_PATTERN_3; 239 training_pattern = DP_TRAINING_PATTERN_3;
237 else if (intel_dp->link_rate == 540000) 240 } else if (intel_dp->link_rate == 540000) {
238 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); 241 if (!source_tps3)
242 DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
243 if (!sink_tps3)
244 DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
245 }
246
247 return training_pattern;
248}
249
250static void
251intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
252{
253 bool channel_eq = false;
254 int tries, cr_tries;
255 u32 training_pattern;
256
257 training_pattern = intel_dp_training_pattern(intel_dp);
239 258
240 /* channel equalization */ 259 /* channel equalization */
241 if (!intel_dp_set_link_train(intel_dp, 260 if (!intel_dp_set_link_train(intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea5415851c6e..df7f3cb66056 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1428 enum intel_display_power_domain domain); 1428 enum intel_display_power_domain domain);
1429void intel_display_power_get(struct drm_i915_private *dev_priv, 1429void intel_display_power_get(struct drm_i915_private *dev_priv,
1430 enum intel_display_power_domain domain); 1430 enum intel_display_power_domain domain);
1431bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain);
1431void intel_display_power_put(struct drm_i915_private *dev_priv, 1433void intel_display_power_put(struct drm_i915_private *dev_priv,
1432 enum intel_display_power_domain domain); 1434 enum intel_display_power_domain domain);
1433 1435
@@ -1514,6 +1516,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
1514 enable_rpm_wakeref_asserts(dev_priv) 1516 enable_rpm_wakeref_asserts(dev_priv)
1515 1517
1516void intel_runtime_pm_get(struct drm_i915_private *dev_priv); 1518void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1519bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
1517void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); 1520void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1518void intel_runtime_pm_put(struct drm_i915_private *dev_priv); 1521void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1519 1522
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa2f616..0193c62a53ef 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
664 struct drm_device *dev = encoder->base.dev; 664 struct drm_device *dev = encoder->base.dev;
665 enum intel_display_power_domain power_domain; 665 enum intel_display_power_domain power_domain;
666 enum port port; 666 enum port port;
667 bool ret;
667 668
668 DRM_DEBUG_KMS("\n"); 669 DRM_DEBUG_KMS("\n");
669 670
670 power_domain = intel_display_port_power_domain(encoder); 671 power_domain = intel_display_port_power_domain(encoder);
671 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 672 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
672 return false; 673 return false;
673 674
675 ret = false;
676
674 /* XXX: this only works for one DSI output */ 677 /* XXX: this only works for one DSI output */
675 for_each_dsi_port(port, intel_dsi->ports) { 678 for_each_dsi_port(port, intel_dsi->ports) {
676 i915_reg_t ctrl_reg = IS_BROXTON(dev) ? 679 i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
691 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { 694 if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
692 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { 695 if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
693 *pipe = port == PORT_A ? PIPE_A : PIPE_B; 696 *pipe = port == PORT_A ? PIPE_A : PIPE_B;
694 return true; 697 ret = true;
698
699 goto out;
695 } 700 }
696 } 701 }
697 } 702 }
703out:
704 intel_display_power_put(dev_priv, power_domain);
698 705
699 return false; 706 return ret;
700} 707}
701 708
702static void intel_dsi_get_config(struct intel_encoder *encoder, 709static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac305da..e8113ad65477 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
204 struct drm_device *dev = intel_dsi->base.base.dev; 204 struct drm_device *dev = intel_dsi->base.base.dev;
205 struct drm_i915_private *dev_priv = dev->dev_private; 205 struct drm_i915_private *dev_priv = dev->dev_private;
206 206
207 if (dev_priv->vbt.dsi.seq_version >= 3)
208 data++;
209
207 gpio = *data++; 210 gpio = *data++;
208 211
209 /* pull up/down */ 212 /* pull up/down */
210 action = *data++; 213 action = *data++ & 1;
214
215 if (gpio >= ARRAY_SIZE(gtable)) {
216 DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
217 goto out;
218 }
219
220 if (!IS_VALLEYVIEW(dev_priv)) {
221 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
222 goto out;
223 }
224
225 if (dev_priv->vbt.dsi.seq_version >= 3) {
226 DRM_DEBUG_KMS("GPIO element v3 not supported\n");
227 goto out;
228 }
211 229
212 function = gtable[gpio].function_reg; 230 function = gtable[gpio].function_reg;
213 pad = gtable[gpio].pad_reg; 231 pad = gtable[gpio].pad_reg;
@@ -226,6 +244,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
226 vlv_gpio_nc_write(dev_priv, pad, val); 244 vlv_gpio_nc_write(dev_priv, pad, val);
227 mutex_unlock(&dev_priv->sb_lock); 245 mutex_unlock(&dev_priv->sb_lock);
228 246
247out:
229 return data; 248 return data;
230} 249}
231 250
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639a489d..cb5d1b15755c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 880 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
881 enum intel_display_power_domain power_domain; 881 enum intel_display_power_domain power_domain;
882 u32 tmp; 882 u32 tmp;
883 bool ret;
883 884
884 power_domain = intel_display_port_power_domain(encoder); 885 power_domain = intel_display_port_power_domain(encoder);
885 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 886 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
886 return false; 887 return false;
887 888
889 ret = false;
890
888 tmp = I915_READ(intel_hdmi->hdmi_reg); 891 tmp = I915_READ(intel_hdmi->hdmi_reg);
889 892
890 if (!(tmp & SDVO_ENABLE)) 893 if (!(tmp & SDVO_ENABLE))
891 return false; 894 goto out;
892 895
893 if (HAS_PCH_CPT(dev)) 896 if (HAS_PCH_CPT(dev))
894 *pipe = PORT_TO_PIPE_CPT(tmp); 897 *pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
897 else 900 else
898 *pipe = PORT_TO_PIPE(tmp); 901 *pipe = PORT_TO_PIPE(tmp);
899 902
900 return true; 903 ret = true;
904
905out:
906 intel_display_power_put(dev_priv, power_domain);
907
908 return ret;
901} 909}
902 910
903static void intel_hdmi_get_config(struct intel_encoder *encoder, 911static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 25254b5c1ac5..deb8282c26d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
683 return 0; 683 return 0;
684 684
685err: 685err:
686 while (--pin) { 686 while (pin--) {
687 if (!intel_gmbus_is_valid_pin(dev_priv, pin)) 687 if (!intel_gmbus_is_valid_pin(dev_priv, pin))
688 continue; 688 continue;
689 689
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240caf81..bc04d8d29acb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
76 enum intel_display_power_domain power_domain; 76 enum intel_display_power_domain power_domain;
77 u32 tmp; 77 u32 tmp;
78 bool ret;
78 79
79 power_domain = intel_display_port_power_domain(encoder); 80 power_domain = intel_display_port_power_domain(encoder);
80 if (!intel_display_power_is_enabled(dev_priv, power_domain)) 81 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
81 return false; 82 return false;
82 83
84 ret = false;
85
83 tmp = I915_READ(lvds_encoder->reg); 86 tmp = I915_READ(lvds_encoder->reg);
84 87
85 if (!(tmp & LVDS_PORT_EN)) 88 if (!(tmp & LVDS_PORT_EN))
86 return false; 89 goto out;
87 90
88 if (HAS_PCH_CPT(dev)) 91 if (HAS_PCH_CPT(dev))
89 *pipe = PORT_TO_PIPE_CPT(tmp); 92 *pipe = PORT_TO_PIPE_CPT(tmp);
90 else 93 else
91 *pipe = PORT_TO_PIPE(tmp); 94 *pipe = PORT_TO_PIPE(tmp);
92 95
93 return true; 96 ret = true;
97
98out:
99 intel_display_power_put(dev_priv, power_domain);
100
101 return ret;
94} 102}
95 103
96static void intel_lvds_get_config(struct intel_encoder *encoder, 104static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb5fa05cf476..b28c29f20e75 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1783,16 +1783,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1783 const struct intel_plane_state *pstate, 1783 const struct intel_plane_state *pstate,
1784 uint32_t mem_value) 1784 uint32_t mem_value)
1785{ 1785{
1786 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1786 /*
1787 * We treat the cursor plane as always-on for the purposes of watermark
1788 * calculation. Until we have two-stage watermark programming merged,
1789 * this is necessary to avoid flickering.
1790 */
1791 int cpp = 4;
1792 int width = pstate->visible ? pstate->base.crtc_w : 64;
1787 1793
1788 if (!cstate->base.active || !pstate->visible) 1794 if (!cstate->base.active)
1789 return 0; 1795 return 0;
1790 1796
1791 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1797 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1792 cstate->base.adjusted_mode.crtc_htotal, 1798 cstate->base.adjusted_mode.crtc_htotal,
1793 drm_rect_width(&pstate->dst), 1799 width, cpp, mem_value);
1794 bpp,
1795 mem_value);
1796} 1800}
1797 1801
1798/* Only for WM_LP. */ 1802/* Only for WM_LP. */
@@ -2825,7 +2829,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2825 memset(ddb, 0, sizeof(*ddb)); 2829 memset(ddb, 0, sizeof(*ddb));
2826 2830
2827 for_each_pipe(dev_priv, pipe) { 2831 for_each_pipe(dev_priv, pipe) {
2828 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) 2832 enum intel_display_power_domain power_domain;
2833
2834 power_domain = POWER_DOMAIN_PIPE(pipe);
2835 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2829 continue; 2836 continue;
2830 2837
2831 for_each_plane(dev_priv, pipe, plane) { 2838 for_each_plane(dev_priv, pipe, plane) {
@@ -2837,6 +2844,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2837 val = I915_READ(CUR_BUF_CFG(pipe)); 2844 val = I915_READ(CUR_BUF_CFG(pipe));
2838 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2845 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2839 val); 2846 val);
2847
2848 intel_display_power_put(dev_priv, power_domain);
2840 } 2849 }
2841} 2850}
2842 2851
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbffe829a..678ed3475d7e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@ static void gen9_set_dc_state_debugmask_memory_up(
470 } 470 }
471} 471}
472 472
473static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
474 u32 state)
475{
476 int rewrites = 0;
477 int rereads = 0;
478 u32 v;
479
480 I915_WRITE(DC_STATE_EN, state);
481
482 /* It has been observed that disabling the dc6 state sometimes
483 * doesn't stick and dmc keeps returning old value. Make sure
484 * the write really sticks enough times and also force rewrite until
485 * we are confident that state is exactly what we want.
486 */
487 do {
488 v = I915_READ(DC_STATE_EN);
489
490 if (v != state) {
491 I915_WRITE(DC_STATE_EN, state);
492 rewrites++;
493 rereads = 0;
494 } else if (rereads++ > 5) {
495 break;
496 }
497
498 } while (rewrites < 100);
499
500 if (v != state)
501 DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
502 state, v);
503
504 /* Most of the times we need one retry, avoid spam */
505 if (rewrites > 1)
506 DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
507 state, rewrites);
508}
509
473static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) 510static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
474{ 511{
475 uint32_t val; 512 uint32_t val;
@@ -494,10 +531,18 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
494 val = I915_READ(DC_STATE_EN); 531 val = I915_READ(DC_STATE_EN);
495 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", 532 DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
496 val & mask, state); 533 val & mask, state);
534
535 /* Check if DMC is ignoring our DC state requests */
536 if ((val & mask) != dev_priv->csr.dc_state)
537 DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
538 dev_priv->csr.dc_state, val & mask);
539
497 val &= ~mask; 540 val &= ~mask;
498 val |= state; 541 val |= state;
499 I915_WRITE(DC_STATE_EN, val); 542
500 POSTING_READ(DC_STATE_EN); 543 gen9_write_dc_state(dev_priv, val);
544
545 dev_priv->csr.dc_state = val & mask;
501} 546}
502 547
503void bxt_enable_dc9(struct drm_i915_private *dev_priv) 548void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1442 chv_set_pipe_power_well(dev_priv, power_well, false); 1487 chv_set_pipe_power_well(dev_priv, power_well, false);
1443} 1488}
1444 1489
1490static void
1491__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1492 enum intel_display_power_domain domain)
1493{
1494 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1495 struct i915_power_well *power_well;
1496 int i;
1497
1498 for_each_power_well(i, power_well, BIT(domain), power_domains) {
1499 if (!power_well->count++)
1500 intel_power_well_enable(dev_priv, power_well);
1501 }
1502
1503 power_domains->domain_use_count[domain]++;
1504}
1505
1445/** 1506/**
1446 * intel_display_power_get - grab a power domain reference 1507 * intel_display_power_get - grab a power domain reference
1447 * @dev_priv: i915 device instance 1508 * @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
1457void intel_display_power_get(struct drm_i915_private *dev_priv, 1518void intel_display_power_get(struct drm_i915_private *dev_priv,
1458 enum intel_display_power_domain domain) 1519 enum intel_display_power_domain domain)
1459{ 1520{
1460 struct i915_power_domains *power_domains; 1521 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1461 struct i915_power_well *power_well;
1462 int i;
1463 1522
1464 intel_runtime_pm_get(dev_priv); 1523 intel_runtime_pm_get(dev_priv);
1465 1524
1466 power_domains = &dev_priv->power_domains; 1525 mutex_lock(&power_domains->lock);
1526
1527 __intel_display_power_get_domain(dev_priv, domain);
1528
1529 mutex_unlock(&power_domains->lock);
1530}
1531
1532/**
1533 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
1534 * @dev_priv: i915 device instance
1535 * @domain: power domain to reference
1536 *
1537 * This function grabs a power domain reference for @domain and ensures that the
1538 * power domain and all its parents are powered up. Therefore users should only
1539 * grab a reference to the innermost power domain they need.
1540 *
1541 * Any power domain reference obtained by this function must have a symmetric
1542 * call to intel_display_power_put() to release the reference again.
1543 */
1544bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
1545 enum intel_display_power_domain domain)
1546{
1547 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1548 bool is_enabled;
1549
1550 if (!intel_runtime_pm_get_if_in_use(dev_priv))
1551 return false;
1467 1552
1468 mutex_lock(&power_domains->lock); 1553 mutex_lock(&power_domains->lock);
1469 1554
1470 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1555 if (__intel_display_power_is_enabled(dev_priv, domain)) {
1471 if (!power_well->count++) 1556 __intel_display_power_get_domain(dev_priv, domain);
1472 intel_power_well_enable(dev_priv, power_well); 1557 is_enabled = true;
1558 } else {
1559 is_enabled = false;
1473 } 1560 }
1474 1561
1475 power_domains->domain_use_count[domain]++;
1476
1477 mutex_unlock(&power_domains->lock); 1562 mutex_unlock(&power_domains->lock);
1563
1564 if (!is_enabled)
1565 intel_runtime_pm_put(dev_priv);
1566
1567 return is_enabled;
1478} 1568}
1479 1569
1480/** 1570/**
@@ -2246,6 +2336,43 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2246} 2336}
2247 2337
2248/** 2338/**
2339 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
2340 * @dev_priv: i915 device instance
2341 *
2342 * This function grabs a device-level runtime pm reference if the device is
2343 * already in use and ensures that it is powered up.
2344 *
2345 * Any runtime pm reference obtained by this function must have a symmetric
2346 * call to intel_runtime_pm_put() to release the reference again.
2347 */
2348bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2349{
2350 struct drm_device *dev = dev_priv->dev;
2351 struct device *device = &dev->pdev->dev;
2352 int ret;
2353
2354 if (!IS_ENABLED(CONFIG_PM))
2355 return true;
2356
2357 ret = pm_runtime_get_if_in_use(device);
2358
2359 /*
2360 * In cases runtime PM is disabled by the RPM core and we get an
2361 * -EINVAL return value we are not supposed to call this function,
2362 * since the power state is undefined. This applies atm to the
2363 * late/early system suspend/resume handlers.
2364 */
2365 WARN_ON_ONCE(ret < 0);
2366 if (ret <= 0)
2367 return false;
2368
2369 atomic_inc(&dev_priv->pm.wakeref_count);
2370 assert_rpm_wakelock_held(dev_priv);
2371
2372 return true;
2373}
2374
2375/**
2249 * intel_runtime_pm_get_noresume - grab a runtime pm reference 2376 * intel_runtime_pm_get_noresume - grab a runtime pm reference
2250 * @dev_priv: i915 device instance 2377 * @dev_priv: i915 device instance
2251 * 2378 *
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d05de9..e3acc35e3805 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1520 DMA_BIDIRECTIONAL); 1520 DMA_BIDIRECTIONAL);
1521 1521
1522 if (dma_mapping_error(pdev, addr)) { 1522 if (dma_mapping_error(pdev, addr)) {
1523 while (--i) { 1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL); 1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0; 1526 ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d3cd18..20935eb2a09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
635 nv_crtc->lut.depth = 0; 635 nv_crtc->lut.depth = 0;
636 } 636 }
637 637
638 /* Make sure that drm and hw vblank irqs get resumed if needed. */
639 for (head = 0; head < dev->mode_config.num_crtc; head++)
640 drm_vblank_on(dev, head);
641
642 /* This should ensure we don't hit a locking problem when someone 638 /* This should ensure we don't hit a locking problem when someone
643 * wakes us up via a connector. We should never go into suspend 639 * wakes us up via a connector. We should never go into suspend
644 * while the display is on anyways. 640 * while the display is on anyways.
@@ -648,6 +644,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
648 644
649 drm_helper_resume_force_mode(dev); 645 drm_helper_resume_force_mode(dev);
650 646
647 /* Make sure that drm and hw vblank irqs get resumed if needed. */
648 for (head = 0; head < dev->mode_config.num_crtc; head++)
649 drm_vblank_on(dev, head);
650
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
653 653
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
24static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
25{ 25{
26 const struct nvkm_device_tegra_func *func; 26 const struct nvkm_device_tegra_func *func;
27 struct nvkm_device *device; 27 struct nvkm_device *device = NULL;
28 struct drm_device *drm; 28 struct drm_device *drm;
29 int ret; 29 int ret;
30 30
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
252 252
253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
254 return -ENOMEM; 254 return -ENOMEM;
255 *pdevice = &tdev->device; 255
256 tdev->func = func; 256 tdev->func = func;
257 tdev->pdev = pdev; 257 tdev->pdev = pdev;
258 tdev->irq = -1; 258 tdev->irq = -1;
259 259
260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
261 if (IS_ERR(tdev->vdd)) 261 if (IS_ERR(tdev->vdd)) {
262 return PTR_ERR(tdev->vdd); 262 ret = PTR_ERR(tdev->vdd);
263 goto free;
264 }
263 265
264 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 266 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
265 if (IS_ERR(tdev->rst)) 267 if (IS_ERR(tdev->rst)) {
266 return PTR_ERR(tdev->rst); 268 ret = PTR_ERR(tdev->rst);
269 goto free;
270 }
267 271
268 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 272 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
269 if (IS_ERR(tdev->clk)) 273 if (IS_ERR(tdev->clk)) {
270 return PTR_ERR(tdev->clk); 274 ret = PTR_ERR(tdev->clk);
275 goto free;
276 }
271 277
272 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 278 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
273 if (IS_ERR(tdev->clk_pwr)) 279 if (IS_ERR(tdev->clk_pwr)) {
274 return PTR_ERR(tdev->clk_pwr); 280 ret = PTR_ERR(tdev->clk_pwr);
281 goto free;
282 }
275 283
276 nvkm_device_tegra_probe_iommu(tdev); 284 nvkm_device_tegra_probe_iommu(tdev);
277 285
278 ret = nvkm_device_tegra_power_up(tdev); 286 ret = nvkm_device_tegra_power_up(tdev);
279 if (ret) 287 if (ret)
280 return ret; 288 goto remove;
281 289
282 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 290 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
283 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 291 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
285 cfg, dbg, detect, mmio, subdev_mask, 293 cfg, dbg, detect, mmio, subdev_mask,
286 &tdev->device); 294 &tdev->device);
287 if (ret) 295 if (ret)
288 return ret; 296 goto powerdown;
297
298 *pdevice = &tdev->device;
289 299
290 return 0; 300 return 0;
301
302powerdown:
303 nvkm_device_tegra_power_down(tdev);
304remove:
305 nvkm_device_tegra_remove_iommu(tdev);
306free:
307 kfree(tdev);
308 return ret;
291} 309}
292#else 310#else
293int 311int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
328 .outp = outp, 328 .outp = outp,
329 }, *dp = &_dp; 329 }, *dp = &_dp;
330 u32 datarate = 0; 330 u32 datarate = 0;
331 u8 pwr;
331 int ret; 332 int ret;
332 333
333 if (!outp->base.info.location && disp->func->sor.magic) 334 if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
355 /* disable link interrupt handling during link training */ 356 /* disable link interrupt handling during link training */
356 nvkm_notify_put(&outp->irq); 357 nvkm_notify_put(&outp->irq);
357 358
359 /* ensure sink is not in a low-power state */
360 if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
361 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
362 pwr &= ~DPCD_SC00_SET_POWER;
363 pwr |= DPCD_SC00_SET_POWER_D0;
364 nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
365 }
366 }
367
358 /* enable down-spreading and execute pre-train script from vbios */ 368 /* enable down-spreading and execute pre-train script from vbios */
359 dp_link_train_init(dp, outp->dpcd[3] & 0x01); 369 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
360 370
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c 71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03 72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
73 73
74/* DPCD Sink Control */
75#define DPCD_SC00 0x00600
76#define DPCD_SC00_SET_POWER 0x03
77#define DPCD_SC00_SET_POWER_D0 0x01
78#define DPCD_SC00_SET_POWER_D3 0x03
79
74void nvkm_dp_train(struct work_struct *); 80void nvkm_dp_train(struct work_struct *);
75#endif 81#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b50a8fd..9f029dda1f07 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area) 68 struct vm_area_struct *area)
69{ 69{
70 WARN_ONCE(1, "not implemented"); 70 WARN_ONCE(1, "not implemented");
71 return ENOSYS; 71 return -ENOSYS;
72} 72}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 298ea1c453c3..2b9ba03a7c1a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
403 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
404 unsigned long flags; 404 unsigned long flags;
405 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay; 406 int vpos, hpos, stat, min_udelay = 0;
407 unsigned repcnt = 4;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 408 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
408 409
409 down_read(&rdev->exclusive_lock); 410 down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
454 * In practice this won't execute very often unless on very fast 455 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small. 456 * machines because the time window for this to happen is very small.
456 */ 457 */
457 for (;;) { 458 while (radeon_crtc->enabled && repcnt--) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 459 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in 460 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos. 461 * vpos.
@@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
472 /* Sleep at least until estimated real start of hw vblank */ 473 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 474 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 475 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
476 if (min_udelay > vblank->framedur_ns / 2000) {
477 /* Don't wait ridiculously long - something is wrong */
478 repcnt = 0;
479 break;
480 }
475 usleep_range(min_udelay, 2 * min_udelay); 481 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags); 482 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 }; 483 };
478 484
485 if (!repcnt)
486 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
487 "framedur %d, linedur %d, stat %d, vpos %d, "
488 "hpos %d\n", work->crtc_id, min_udelay,
489 vblank->framedur_ns / 1000,
490 vblank->linedur_ns / 1000, stat, vpos, hpos);
491
479 /* do the flip (mmio) */ 492 /* do the flip (mmio) */
480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 493 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
481 494
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2989da..ca3be90a3bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
276 if (rdev->irq.installed) { 276 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 277 for (i = 0; i < rdev->num_crtc; i++) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 278 if (rdev->pm.active_crtcs & (1 << i)) {
279 rdev->pm.req_vblank |= (1 << i); 279 /* This can fail if a modeset is in progress */
280 drm_vblank_get(rdev->ddev, i); 280 if (drm_vblank_get(rdev->ddev, i) == 0)
281 rdev->pm.req_vblank |= (1 << i);
282 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i);
281 } 285 }
282 } 286 }
283 } 287 }
@@ -1075,8 +1079,6 @@ force:
1075 1079
1076 /* update display watermarks based on new power state */ 1080 /* update display watermarks based on new power state */
1077 radeon_bandwidth_update(rdev); 1081 radeon_bandwidth_update(rdev);
1078 /* update displays */
1079 radeon_dpm_display_configuration_changed(rdev);
1080 1082
1081 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1083 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1082 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1084 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
@@ -1097,6 +1099,9 @@ force:
1097 1099
1098 radeon_dpm_post_set_power_state(rdev); 1100 radeon_dpm_post_set_power_state(rdev);
1099 1101
1102 /* update displays */
1103 radeon_dpm_display_configuration_changed(rdev);
1104
1100 if (rdev->asic->dpm.force_performance_level) { 1105 if (rdev->asic->dpm.force_performance_level) {
1101 if (rdev->pm.dpm.thermal_active) { 1106 if (rdev->pm.dpm.thermal_active) {
1102 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1107 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 for (i = 0; i < RADEON_NUM_RINGS; ++i)
353 radeon_fence_ref(fences[i]);
354
352 spin_unlock(&sa_manager->wq.lock); 355 spin_unlock(&sa_manager->wq.lock);
353 r = radeon_fence_wait_any(rdev, fences, false); 356 r = radeon_fence_wait_any(rdev, fences, false);
357 for (i = 0; i < RADEON_NUM_RINGS; ++i)
358 radeon_fence_unref(&fences[i]);
354 spin_lock(&sa_manager->wq.lock); 359 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 360 /* if we have nothing to wait for block */
356 if (r == -ENOENT) { 361 if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
758 0, PAGE_SIZE, 758 0, PAGE_SIZE,
759 PCI_DMA_BIDIRECTIONAL); 759 PCI_DMA_BIDIRECTIONAL);
760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
761 while (--i) { 761 while (i--) {
762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
764 gtt->ttm.dma_address[i] = 0; 764 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3ec9a62..22278bcfc60e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
215 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
216 216
217 if (size == 0) 217 if (size == 0)
218 return NULL; 218 return ERR_PTR(-EINVAL);
219 219
220 /* First, try to get a vc4_bo from the kernel BO cache. */ 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) { 221 if (from_cache) {
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
237 if (IS_ERR(cma_obj)) { 237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n"); 238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4); 239 vc4_bo_stats_dump(vc4);
240 return NULL; 240 return ERR_PTR(-ENOMEM);
241 } 241 }
242 } 242 }
243 243
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
259 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
260 260
261 bo = vc4_bo_create(dev, args->size, false); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo) 262 if (IS_ERR(bo))
263 return -ENOMEM; 263 return PTR_ERR(bo);
264 264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base); 266 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
443 * get zeroed, and that might leak data between users. 443 * get zeroed, and that might leak data between users.
444 */ 444 */
445 bo = vc4_bo_create(dev, args->size, false); 445 bo = vc4_bo_create(dev, args->size, false);
446 if (!bo) 446 if (IS_ERR(bo))
447 return -ENOMEM; 447 return PTR_ERR(bo);
448 448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base); 450 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
496 } 496 }
497 497
498 bo = vc4_bo_create(dev, args->size, true); 498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo) 499 if (IS_ERR(bo))
500 return -ENOMEM; 500 return PTR_ERR(bo);
501 501
502 ret = copy_from_user(bo->base.vaddr, 502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data, 503 (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865ec2bae..51a63330d4f8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@ struct vc4_dev {
91 struct vc4_bo *overflow_mem; 91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work; 92 struct work_struct overflow_mem_work;
93 93
94 int power_refcount;
95
96 /* Mutex controlling the power refcount. */
97 struct mutex power_lock;
98
94 struct { 99 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer; 100 struct timer_list timer;
97 struct work_struct reset_work; 101 struct work_struct reset_work;
98 } hangcheck; 102 } hangcheck;
@@ -142,6 +146,7 @@ struct vc4_seqno_cb {
142}; 146};
143 147
144struct vc4_v3d { 148struct vc4_v3d {
149 struct vc4_dev *vc4;
145 struct platform_device *pdev; 150 struct platform_device *pdev;
146 void __iomem *regs; 151 void __iomem *regs;
147}; 152};
@@ -192,6 +197,11 @@ struct vc4_exec_info {
192 /* Sequence number for this bin/render job. */ 197 /* Sequence number for this bin/render job. */
193 uint64_t seqno; 198 uint64_t seqno;
194 199
200 /* Last current addresses the hardware was processing when the
201 * hangcheck timer checked on us.
202 */
203 uint32_t last_ct0ca, last_ct1ca;
204
195 /* Kernel-space copy of the ioctl arguments */ 205 /* Kernel-space copy of the ioctl arguments */
196 struct drm_vc4_submit_cl *args; 206 struct drm_vc4_submit_cl *args;
197 207
@@ -434,7 +444,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
434extern struct platform_driver vc4_v3d_driver; 444extern struct platform_driver vc4_v3d_driver;
435int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 445int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
436int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 446int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
437int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
438 447
439/* vc4_validate.c */ 448/* vc4_validate.c */
440int 449int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a6f4b5..202aa1544acc 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev)
228 struct vc4_dev *vc4 = to_vc4_dev(dev); 229 struct vc4_dev *vc4 = to_vc4_dev(dev);
229 230
230 DRM_INFO("Resetting GPU.\n"); 231 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false); 232
232 vc4_v3d_set_power(vc4, true); 233 mutex_lock(&vc4->power_lock);
234 if (vc4->power_refcount) {
235 /* Power the device off and back on the by dropping the
236 * reference on runtime PM.
237 */
238 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
239 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
240 }
241 mutex_unlock(&vc4->power_lock);
233 242
234 vc4_irq_reset(dev); 243 vc4_irq_reset(dev);
235 244
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data)
257 struct drm_device *dev = (struct drm_device *)data; 266 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev); 267 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca; 268 uint32_t ct0ca, ct1ca;
269 unsigned long irqflags;
270 struct vc4_exec_info *exec;
271
272 spin_lock_irqsave(&vc4->job_lock, irqflags);
273 exec = vc4_first_job(vc4);
260 274
261 /* If idle, we can stop watching for hangs. */ 275 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list)) 276 if (!exec) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
263 return; 278 return;
279 }
264 280
265 ct0ca = V3D_READ(V3D_CTNCA(0)); 281 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1)); 282 ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data)
268 /* If we've made any progress in execution, rearm the timer 284 /* If we've made any progress in execution, rearm the timer
269 * and wait. 285 * and wait.
270 */ 286 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca || 287 if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
272 ct1ca != vc4->hangcheck.last_ct1ca) { 288 exec->last_ct0ca = ct0ca;
273 vc4->hangcheck.last_ct0ca = ct0ca; 289 exec->last_ct1ca = ct1ca;
274 vc4->hangcheck.last_ct1ca = ct1ca; 290 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 vc4_queue_hangcheck(dev); 291 vc4_queue_hangcheck(dev);
276 return; 292 return;
277 } 293 }
278 294
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296
279 /* We've gone too long with no progress, reset. This has to 297 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and 298 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to. 299 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
340 finish_wait(&vc4->job_wait_queue, &wait); 358 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno); 359 trace_vc4_wait_for_seqno_end(dev, seqno);
342 360
343 if (ret && ret != -ERESTARTSYS) { 361 return ret;
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349} 362}
350 363
351static void 364static void
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
578 } 591 }
579 592
580 bo = vc4_bo_create(dev, exec_size, true); 593 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) { 594 if (IS_ERR(bo)) {
582 DRM_ERROR("Couldn't allocate BO for binning\n"); 595 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM; 596 ret = PTR_ERR(bo);
584 goto fail; 597 goto fail;
585 } 598 }
586 exec->exec_bo = &bo->base; 599 exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@ fail:
617static void 630static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) 631vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{ 632{
633 struct vc4_dev *vc4 = to_vc4_dev(dev);
620 unsigned i; 634 unsigned i;
621 635
622 /* Need the struct lock for drm_gem_object_unreference(). */ 636 /* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
635 } 649 }
636 mutex_unlock(&dev->struct_mutex); 650 mutex_unlock(&dev->struct_mutex);
637 651
652 mutex_lock(&vc4->power_lock);
653 if (--vc4->power_refcount == 0)
654 pm_runtime_put(&vc4->v3d->pdev->dev);
655 mutex_unlock(&vc4->power_lock);
656
638 kfree(exec); 657 kfree(exec);
639} 658}
640 659
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
746 struct drm_gem_object *gem_obj; 765 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo; 766 struct vc4_bo *bo;
748 767
768 if (args->pad != 0)
769 return -EINVAL;
770
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); 771 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) { 772 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 773 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
772 struct vc4_dev *vc4 = to_vc4_dev(dev); 794 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data; 795 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec; 796 struct vc4_exec_info *exec;
775 int ret; 797 int ret = 0;
776 798
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 799 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 800 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
785 return -ENOMEM; 807 return -ENOMEM;
786 } 808 }
787 809
810 mutex_lock(&vc4->power_lock);
811 if (vc4->power_refcount++ == 0)
812 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
813 mutex_unlock(&vc4->power_lock);
814 if (ret < 0) {
815 kfree(exec);
816 return ret;
817 }
818
788 exec->args = args; 819 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list); 820 INIT_LIST_HEAD(&exec->unref_list);
790 821
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev)
839 (unsigned long)dev); 870 (unsigned long)dev);
840 871
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 872 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
873
874 mutex_init(&vc4->power_lock);
842} 875}
843 876
844void 877void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e758db..78a21357fb2d 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work)
57 struct vc4_bo *bo; 57 struct vc4_bo *bo;
58 58
59 bo = vc4_bo_create(dev, 256 * 1024, true); 59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) { 60 if (IS_ERR(bo)) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return; 62 return;
63 } 63 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312e2c1b..0f12418725e5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
316 size += xtiles * ytiles * loop_body_size; 316 size += xtiles * ytiles * loop_body_size;
317 317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base; 318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl) 319 if (IS_ERR(setup->rcl))
320 return -ENOMEM; 320 return PTR_ERR(setup->rcl);
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, 321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list); 322 &exec->unref_list);
323 323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If 324 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has 325 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no 326 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
349 rcl_u32(setup, 0); /* no address, since we're in None mode */ 340 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 } 341 }
351 342
343 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
344 rcl_u32(setup,
345 (setup->color_write ? (setup->color_write->paddr +
346 args->color_write.offset) :
347 0));
348 rcl_u16(setup, args->width);
349 rcl_u16(setup, args->height);
350 rcl_u16(setup, args->color_write.bits);
351
352 for (y = min_y_tile; y <= max_y_tile; y++) { 352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) { 353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile); 354 bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71db978..31de5d17bc85 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "linux/component.h" 19#include "linux/component.h"
20#include "linux/pm_runtime.h"
20#include "vc4_drv.h" 21#include "vc4_drv.h"
21#include "vc4_regs.h" 22#include "vc4_regs.h"
22 23
@@ -144,18 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
144} 145}
145#endif /* CONFIG_DEBUG_FS */ 146#endif /* CONFIG_DEBUG_FS */
146 147
147int
148vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
149{
150 /* XXX: This interface is needed for GPU reset, and the way to
151 * do it is to turn our power domain off and back on. We
152 * can't just reset from within the driver, because the reset
153 * bits are in the power domain's register area, and get set
154 * during the poweron process.
155 */
156 return 0;
157}
158
159static void vc4_v3d_init_hw(struct drm_device *dev) 148static void vc4_v3d_init_hw(struct drm_device *dev)
160{ 149{
161 struct vc4_dev *vc4 = to_vc4_dev(dev); 150 struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
167 V3D_WRITE(V3D_VPMBASE, 0); 156 V3D_WRITE(V3D_VPMBASE, 0);
168} 157}
169 158
159#ifdef CONFIG_PM
160static int vc4_v3d_runtime_suspend(struct device *dev)
161{
162 struct vc4_v3d *v3d = dev_get_drvdata(dev);
163 struct vc4_dev *vc4 = v3d->vc4;
164
165 vc4_irq_uninstall(vc4->dev);
166
167 return 0;
168}
169
170static int vc4_v3d_runtime_resume(struct device *dev)
171{
172 struct vc4_v3d *v3d = dev_get_drvdata(dev);
173 struct vc4_dev *vc4 = v3d->vc4;
174
175 vc4_v3d_init_hw(vc4->dev);
176 vc4_irq_postinstall(vc4->dev);
177
178 return 0;
179}
180#endif
181
170static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) 182static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
171{ 183{
172 struct platform_device *pdev = to_platform_device(dev); 184 struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
179 if (!v3d) 191 if (!v3d)
180 return -ENOMEM; 192 return -ENOMEM;
181 193
194 dev_set_drvdata(dev, v3d);
195
182 v3d->pdev = pdev; 196 v3d->pdev = pdev;
183 197
184 v3d->regs = vc4_ioremap_regs(pdev, 0); 198 v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
186 return PTR_ERR(v3d->regs); 200 return PTR_ERR(v3d->regs);
187 201
188 vc4->v3d = v3d; 202 vc4->v3d = v3d;
203 v3d->vc4 = vc4;
189 204
190 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { 205 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
191 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", 206 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
207 return ret; 222 return ret;
208 } 223 }
209 224
225 pm_runtime_enable(dev);
226
210 return 0; 227 return 0;
211} 228}
212 229
@@ -216,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
216 struct drm_device *drm = dev_get_drvdata(master); 233 struct drm_device *drm = dev_get_drvdata(master);
217 struct vc4_dev *vc4 = to_vc4_dev(drm); 234 struct vc4_dev *vc4 = to_vc4_dev(drm);
218 235
236 pm_runtime_disable(dev);
237
219 drm_irq_uninstall(drm); 238 drm_irq_uninstall(drm);
220 239
221 /* Disable the binner's overflow memory address, so the next 240 /* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
228 vc4->v3d = NULL; 247 vc4->v3d = NULL;
229} 248}
230 249
250static const struct dev_pm_ops vc4_v3d_pm_ops = {
251 SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
252};
253
231static const struct component_ops vc4_v3d_ops = { 254static const struct component_ops vc4_v3d_ops = {
232 .bind = vc4_v3d_bind, 255 .bind = vc4_v3d_bind,
233 .unbind = vc4_v3d_unbind, 256 .unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@ struct platform_driver vc4_v3d_driver = {
255 .driver = { 278 .driver = {
256 .name = "vc4_v3d", 279 .name = "vc4_v3d",
257 .of_match_table = vc4_v3d_dt_match, 280 .of_match_table = vc4_v3d_dt_match,
281 .pm = &vc4_v3d_pm_ops,
258 }, 282 },
259}; 283};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6face3..24c2c746e8f3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, 401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true); 402 true);
403 exec->tile_bo = &tile_bo->base; 403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo) 404 if (IS_ERR(exec->tile_bo))
405 return -ENOMEM; 405 return PTR_ERR(exec->tile_bo);
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list); 406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407 407
408 /* tile alloc address. */ 408 /* tile alloc address. */
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b8380481..2b3105c8aed3 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
126 struct ads1015_data *data = i2c_get_clientdata(client); 126 struct ads1015_data *data = i2c_get_clientdata(client);
127 unsigned int pga = data->channel_data[channel].pga; 127 unsigned int pga = data->channel_data[channel].pga;
128 int fullscale = fullscale_table[pga]; 128 int fullscale = fullscale_table[pga];
129 const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; 129 const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
130 130
131 return DIV_ROUND_CLOSEST(reg * fullscale, mask); 131 return DIV_ROUND_CLOSEST(reg * fullscale, mask);
132} 132}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3deeb18a..685568b1236d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
406 unsigned long *state) 406 unsigned long *state)
407{ 407{
408 struct gpio_fan_data *fan_data = cdev->devdata; 408 struct gpio_fan_data *fan_data = cdev->devdata;
409 int r;
410 409
411 if (!fan_data) 410 if (!fan_data)
412 return -EINVAL; 411 return -EINVAL;
413 412
414 r = get_fan_speed_index(fan_data); 413 *state = fan_data->speed_index;
415 if (r < 0)
416 return r;
417
418 *state = r;
419 return 0; 414 return 0;
420} 415}
421 416
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d69799a9c..27fa0cb09538 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1271 switch (dev->device) { 1271 switch (dev->device) {
1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: 1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: 1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1275 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1276 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1275 priv->features |= FEATURE_I2C_BLOCK_READ; 1277 priv->features |= FEATURE_I2C_BLOCK_READ;
1276 priv->features |= FEATURE_IRQ; 1278 priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 08d26ba61ed3..13c45296ce5b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1450,7 +1450,8 @@ omap_i2c_probe(struct platform_device *pdev)
1450 1450
1451err_unuse_clocks: 1451err_unuse_clocks:
1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1453 pm_runtime_put(omap->dev); 1453 pm_runtime_dont_use_autosuspend(omap->dev);
1454 pm_runtime_put_sync(omap->dev);
1454 pm_runtime_disable(&pdev->dev); 1455 pm_runtime_disable(&pdev->dev);
1455err_free_mem: 1456err_free_mem:
1456 1457
@@ -1468,6 +1469,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1468 return ret; 1469 return ret;
1469 1470
1470 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1471 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1472 pm_runtime_dont_use_autosuspend(&pdev->dev);
1471 pm_runtime_put_sync(&pdev->dev); 1473 pm_runtime_put_sync(&pdev->dev);
1472 pm_runtime_disable(&pdev->dev); 1474 pm_runtime_disable(&pdev->dev);
1473 return 0; 1475 return 0;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index f3e5ff8522f0..213ba55e17c3 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -467,7 +467,7 @@ static int uniphier_fi2c_clk_init(struct device *dev,
467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED; 467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
468 468
469 if (!bus_speed) { 469 if (!bus_speed) {
470 dev_err(dev, "clock-freqyency should not be zero\n"); 470 dev_err(dev, "clock-frequency should not be zero\n");
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 1f4f3f53819c..89eaa8a7e1e0 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -328,7 +328,7 @@ static int uniphier_i2c_clk_init(struct device *dev,
328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED; 328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
329 329
330 if (!bus_speed) { 330 if (!bus_speed) {
331 dev_err(dev, "clock-freqyency should not be zero\n"); 331 dev_err(dev, "clock-frequency should not be zero\n");
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 334
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3de93517efe4..14606afbfaa8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
336 union ib_gid gid; 336 union ib_gid gid;
337 struct ib_gid_attr gid_attr = {}; 337 struct ib_gid_attr gid_attr = {};
338 ssize_t ret; 338 ssize_t ret;
339 va_list args;
340 339
341 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, 340 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
342 &gid_attr); 341 &gid_attr);
@@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
348err: 347err:
349 if (gid_attr.ndev) 348 if (gid_attr.ndev)
350 dev_put(gid_attr.ndev); 349 dev_put(gid_attr.ndev);
351 va_end(args);
352 return ret; 350 return ret;
353} 351}
354 352
@@ -722,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
722 720
723 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, 721 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
724 &cpi, 40, sizeof(cpi)) >= 0) { 722 &cpi, 40, sizeof(cpi)) >= 0) {
725 723 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
726 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
727 /* We have extended counters */ 724 /* We have extended counters */
728 return &pma_group_ext; 725 return &pma_group_ext;
729 726
730 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) 727 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
731 /* But not the IETF ones */ 728 /* But not the IETF ones */
732 return &pma_group_noietf; 729 return &pma_group_noietf;
733 } 730 }
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 26833bfa639b..d68f506c1922 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -817,17 +817,48 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
818} 818}
819 819
820static void edit_counter(struct mlx4_counter *cnt, 820static void edit_counter(struct mlx4_counter *cnt, void *counters,
821 struct ib_pma_portcounters *pma_cnt) 821 __be16 attr_id)
822{ 822{
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 823 switch (attr_id) {
824 (be64_to_cpu(cnt->tx_bytes) >> 2)); 824 case IB_PMA_PORT_COUNTERS:
825 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 825 {
826 (be64_to_cpu(cnt->rx_bytes) >> 2)); 826 struct ib_pma_portcounters *pma_cnt =
827 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 827 (struct ib_pma_portcounters *)counters;
828 be64_to_cpu(cnt->tx_frames)); 828
829 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 829 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
830 be64_to_cpu(cnt->rx_frames)); 830 (be64_to_cpu(cnt->tx_bytes) >> 2));
831 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
832 (be64_to_cpu(cnt->rx_bytes) >> 2));
833 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
834 be64_to_cpu(cnt->tx_frames));
835 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
836 be64_to_cpu(cnt->rx_frames));
837 break;
838 }
839 case IB_PMA_PORT_COUNTERS_EXT:
840 {
841 struct ib_pma_portcounters_ext *pma_cnt_ext =
842 (struct ib_pma_portcounters_ext *)counters;
843
844 pma_cnt_ext->port_xmit_data =
845 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
846 pma_cnt_ext->port_rcv_data =
847 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
848 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
849 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
850 break;
851 }
852 }
853}
854
855static int iboe_process_mad_port_info(void *out_mad)
856{
857 struct ib_class_port_info cpi = {};
858
859 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
860 memcpy(out_mad, &cpi, sizeof(cpi));
861 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
831} 862}
832 863
833static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 864static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
842 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 873 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
843 return -EINVAL; 874 return -EINVAL;
844 875
876 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
877 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
878
845 memset(&counter_stats, 0, sizeof(counter_stats)); 879 memset(&counter_stats, 0, sizeof(counter_stats));
846 mutex_lock(&dev->counters_table[port_num - 1].mutex); 880 mutex_lock(&dev->counters_table[port_num - 1].mutex);
847 list_for_each_entry(tmp_counter, 881 list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
863 switch (counter_stats.counter_mode & 0xf) { 897 switch (counter_stats.counter_mode & 0xf) {
864 case 0: 898 case 0:
865 edit_counter(&counter_stats, 899 edit_counter(&counter_stats,
866 (void *)(out_mad->data + 40)); 900 (void *)(out_mad->data + 40),
901 in_mad->mad_hdr.attr_id);
867 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 902 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
868 break; 903 break;
869 default: 904 default:
@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
894 */ 929 */
895 if (link == IB_LINK_LAYER_INFINIBAND) { 930 if (link == IB_LINK_LAYER_INFINIBAND) {
896 if (mlx4_is_slave(dev->dev) && 931 if (mlx4_is_slave(dev->dev) &&
897 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 932 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
898 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) 933 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
934 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
935 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
899 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 936 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
900 in_grh, in_mad, out_mad); 937 in_grh, in_mad, out_mad);
901 938
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f00b6c..fd97534762b8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1681 } 1681 }
1682 1682
1683 if (qp->ibqp.uobject) 1683 if (qp->ibqp.uobject)
1684 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1684 context->usr_page = cpu_to_be32(
1685 mlx4_to_hw_uar_index(dev->dev,
1686 to_mucontext(ibqp->uobject->context)->uar.index));
1685 else 1687 else
1686 context->usr_page = cpu_to_be32(dev->priv_uar.index); 1688 context->usr_page = cpu_to_be32(
1689 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1687 1690
1688 if (attr_mask & IB_QP_DEST_QPN) 1691 if (attr_mask & IB_QP_DEST_QPN)
1689 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1692 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9116bc3988a6..34cb8e87c7b8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type)
270 /* fall through */ 270 /* fall through */
271 case IB_QPT_RC: 271 case IB_QPT_RC:
272 size += sizeof(struct mlx5_wqe_ctrl_seg) + 272 size += sizeof(struct mlx5_wqe_ctrl_seg) +
273 sizeof(struct mlx5_wqe_atomic_seg) + 273 max(sizeof(struct mlx5_wqe_atomic_seg) +
274 sizeof(struct mlx5_wqe_raddr_seg); 274 sizeof(struct mlx5_wqe_raddr_seg),
275 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
276 sizeof(struct mlx5_mkey_seg));
275 break; 277 break;
276 278
277 case IB_QPT_XRC_TGT: 279 case IB_QPT_XRC_TGT:
@@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
279 281
280 case IB_QPT_UC: 282 case IB_QPT_UC:
281 size += sizeof(struct mlx5_wqe_ctrl_seg) + 283 size += sizeof(struct mlx5_wqe_ctrl_seg) +
282 sizeof(struct mlx5_wqe_raddr_seg) + 284 max(sizeof(struct mlx5_wqe_raddr_seg),
283 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 285 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
284 sizeof(struct mlx5_mkey_seg); 286 sizeof(struct mlx5_mkey_seg));
285 break; 287 break;
286 288
287 case IB_QPT_UD: 289 case IB_QPT_UD:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 040bb8b5cb15..12503f15fbd6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -323,9 +323,6 @@ struct ocrdma_cq {
323 */ 323 */
324 u32 max_hw_cqe; 324 u32 max_hw_cqe;
325 bool phase_change; 325 bool phase_change;
326 bool deferred_arm, deferred_sol;
327 bool first_arm;
328
329 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 326 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
330 * to cq polling 327 * to cq polling
331 */ 328 */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 573849354cb9..f38743018cb4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -228,6 +228,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
228 228
229 ocrdma_alloc_pd_pool(dev); 229 ocrdma_alloc_pd_pool(dev);
230 230
231 if (!ocrdma_alloc_stats_resources(dev)) {
232 pr_err("%s: stats resource allocation failed\n", __func__);
233 goto alloc_err;
234 }
235
231 spin_lock_init(&dev->av_tbl.lock); 236 spin_lock_init(&dev->av_tbl.lock);
232 spin_lock_init(&dev->flush_q_lock); 237 spin_lock_init(&dev->flush_q_lock);
233 return 0; 238 return 0;
@@ -238,6 +243,7 @@ alloc_err:
238 243
239static void ocrdma_free_resources(struct ocrdma_dev *dev) 244static void ocrdma_free_resources(struct ocrdma_dev *dev)
240{ 245{
246 ocrdma_release_stats_resources(dev);
241 kfree(dev->stag_arr); 247 kfree(dev->stag_arr);
242 kfree(dev->qp_tbl); 248 kfree(dev->qp_tbl);
243 kfree(dev->cq_tbl); 249 kfree(dev->cq_tbl);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a620c1..255f774080a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -64,10 +64,11 @@ static int ocrdma_add_stat(char *start, char *pcur,
64 return cpy_len; 64 return cpy_len;
65} 65}
66 66
67static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) 67bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
68{ 68{
69 struct stats_mem *mem = &dev->stats_mem; 69 struct stats_mem *mem = &dev->stats_mem;
70 70
71 mutex_init(&dev->stats_lock);
71 /* Alloc mbox command mem*/ 72 /* Alloc mbox command mem*/
72 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
73 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
@@ -91,13 +92,14 @@ static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
91 return true; 92 return true;
92} 93}
93 94
94static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) 95void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
95{ 96{
96 struct stats_mem *mem = &dev->stats_mem; 97 struct stats_mem *mem = &dev->stats_mem;
97 98
98 if (mem->va) 99 if (mem->va)
99 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, 100 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
100 mem->va, mem->pa); 101 mem->va, mem->pa);
102 mem->va = NULL;
101 kfree(mem->debugfs_mem); 103 kfree(mem->debugfs_mem);
102} 104}
103 105
@@ -838,15 +840,9 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
838 &dev->reset_stats, &ocrdma_dbg_ops)) 840 &dev->reset_stats, &ocrdma_dbg_ops))
839 goto err; 841 goto err;
840 842
841 /* Now create dma_mem for stats mbx command */
842 if (!ocrdma_alloc_stats_mem(dev))
843 goto err;
844
845 mutex_init(&dev->stats_lock);
846 843
847 return; 844 return;
848err: 845err:
849 ocrdma_release_stats_mem(dev);
850 debugfs_remove_recursive(dev->dir); 846 debugfs_remove_recursive(dev->dir);
851 dev->dir = NULL; 847 dev->dir = NULL;
852} 848}
@@ -855,9 +851,7 @@ void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
855{ 851{
856 if (!dev->dir) 852 if (!dev->dir)
857 return; 853 return;
858 debugfs_remove(dev->dir); 854 debugfs_remove_recursive(dev->dir);
859 mutex_destroy(&dev->stats_lock);
860 ocrdma_release_stats_mem(dev);
861} 855}
862 856
863void ocrdma_init_debugfs(void) 857void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d04c7b8..bba1fec4f11f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,6 +65,8 @@ enum OCRDMA_STATS_TYPE {
65 65
66void ocrdma_rem_debugfs(void); 66void ocrdma_rem_debugfs(void);
67void ocrdma_init_debugfs(void); 67void ocrdma_init_debugfs(void);
68bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
69void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
68void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 70void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
69void ocrdma_add_port_stats(struct ocrdma_dev *dev); 71void ocrdma_add_port_stats(struct ocrdma_dev *dev);
70int ocrdma_pma_counters(struct ocrdma_dev *dev, 72int ocrdma_pma_counters(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d4c687b548d8..12420e4ecf3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
125 IB_DEVICE_SYS_IMAGE_GUID | 125 IB_DEVICE_SYS_IMAGE_GUID |
126 IB_DEVICE_LOCAL_DMA_LKEY | 126 IB_DEVICE_LOCAL_DMA_LKEY |
127 IB_DEVICE_MEM_MGT_EXTENSIONS; 127 IB_DEVICE_MEM_MGT_EXTENSIONS;
128 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); 128 attr->max_sge = dev->attr.max_send_sge;
129 attr->max_sge_rd = 0; 129 attr->max_sge_rd = attr->max_sge;
130 attr->max_cq = dev->attr.max_cq; 130 attr->max_cq = dev->attr.max_cq;
131 attr->max_cqe = dev->attr.max_cqe; 131 attr->max_cqe = dev->attr.max_cqe;
132 attr->max_mr = dev->attr.max_mr; 132 attr->max_mr = dev->attr.max_mr;
@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1094 spin_lock_init(&cq->comp_handler_lock); 1094 spin_lock_init(&cq->comp_handler_lock);
1095 INIT_LIST_HEAD(&cq->sq_head); 1095 INIT_LIST_HEAD(&cq->sq_head);
1096 INIT_LIST_HEAD(&cq->rq_head); 1096 INIT_LIST_HEAD(&cq->rq_head);
1097 cq->first_arm = true;
1098 1097
1099 if (ib_ctx) { 1098 if (ib_ctx) {
1100 uctx = get_ocrdma_ucontext(ib_ctx); 1099 uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2726,8 +2725,7 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2726 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2725 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2727 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2726 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2728 OCRDMA_CQE_SRCQP_MASK; 2727 OCRDMA_CQE_SRCQP_MASK;
2729 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & 2728 ibwc->pkey_index = 0;
2730 OCRDMA_CQE_PKEY_MASK;
2731 ibwc->wc_flags = IB_WC_GRH; 2729 ibwc->wc_flags = IB_WC_GRH;
2732 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2730 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2733 OCRDMA_CQE_UD_XFER_LEN_SHIFT); 2731 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
@@ -2911,12 +2909,9 @@ expand_cqe:
2911 } 2909 }
2912stop_cqe: 2910stop_cqe:
2913 cq->getp = cur_getp; 2911 cq->getp = cur_getp;
2914 if (cq->deferred_arm || polled_hw_cqes) { 2912
2915 ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, 2913 if (polled_hw_cqes)
2916 cq->deferred_sol, polled_hw_cqes); 2914 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2917 cq->deferred_arm = false;
2918 cq->deferred_sol = false;
2919 }
2920 2915
2921 return i; 2916 return i;
2922} 2917}
@@ -3000,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
3000 if (cq_flags & IB_CQ_SOLICITED) 2995 if (cq_flags & IB_CQ_SOLICITED)
3001 sol_needed = true; 2996 sol_needed = true;
3002 2997
3003 if (cq->first_arm) { 2998 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3004 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3005 cq->first_arm = false;
3006 }
3007
3008 cq->deferred_arm = true;
3009 cq->deferred_sol = sol_needed;
3010 spin_unlock_irqrestore(&cq->cq_lock, flags); 2999 spin_unlock_irqrestore(&cq->cq_lock, flags);
3011 3000
3012 return 0; 3001 return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14070d1..fa9c42ff1fb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
245 skb_reset_mac_header(skb); 245 skb_reset_mac_header(skb);
246 skb_pull(skb, IPOIB_ENCAP_LEN); 246 skb_pull(skb, IPOIB_ENCAP_LEN);
247 247
248 skb->truesize = SKB_TRUESIZE(skb->len);
249
250 ++dev->stats.rx_packets; 248 ++dev->stats.rx_packets;
251 dev->stats.rx_bytes += skb->len; 249 dev->stats.rx_bytes += skb->len;
252 250
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 050dfa175d16..25889311b1e9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
456 return status; 456 return status;
457} 457}
458 458
459static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) 459/*
460 * Caller must hold 'priv->lock'
461 */
462static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
460{ 463{
461 struct ipoib_dev_priv *priv = netdev_priv(dev); 464 struct ipoib_dev_priv *priv = netdev_priv(dev);
462 struct ib_sa_multicast *multicast; 465 struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
466 ib_sa_comp_mask comp_mask; 469 ib_sa_comp_mask comp_mask;
467 int ret = 0; 470 int ret = 0;
468 471
472 if (!priv->broadcast ||
473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
474 return -EINVAL;
475
469 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 476 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
470 477
471 rec.mgid = mcast->mcmember.mgid; 478 rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
525 rec.join_state = 4; 532 rec.join_state = 4;
526#endif 533#endif
527 } 534 }
535 spin_unlock_irq(&priv->lock);
528 536
529 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 537 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
530 &rec, comp_mask, GFP_KERNEL, 538 &rec, comp_mask, GFP_KERNEL,
531 ipoib_mcast_join_complete, mcast); 539 ipoib_mcast_join_complete, mcast);
540 spin_lock_irq(&priv->lock);
532 if (IS_ERR(multicast)) { 541 if (IS_ERR(multicast)) {
533 ret = PTR_ERR(multicast); 542 ret = PTR_ERR(multicast);
534 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 543 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
535 spin_lock_irq(&priv->lock);
536 /* Requeue this join task with a backoff delay */ 544 /* Requeue this join task with a backoff delay */
537 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); 545 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
538 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 546 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
539 spin_unlock_irq(&priv->lock); 547 spin_unlock_irq(&priv->lock);
540 complete(&mcast->done); 548 complete(&mcast->done);
549 spin_lock_irq(&priv->lock);
541 } 550 }
551 return 0;
542} 552}
543 553
544void ipoib_mcast_join_task(struct work_struct *work) 554void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
620 /* Found the next unjoined group */ 630 /* Found the next unjoined group */
621 init_completion(&mcast->done); 631 init_completion(&mcast->done);
622 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 632 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
623 spin_unlock_irq(&priv->lock); 633 if (ipoib_mcast_join(dev, mcast)) {
624 ipoib_mcast_join(dev, mcast); 634 spin_unlock_irq(&priv->lock);
625 spin_lock_irq(&priv->lock); 635 return;
636 }
626 } else if (!delay_until || 637 } else if (!delay_until ||
627 time_before(mcast->delay_until, delay_until)) 638 time_before(mcast->delay_until, delay_until))
628 delay_until = mcast->delay_until; 639 delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@ out:
641 if (mcast) { 652 if (mcast) {
642 init_completion(&mcast->done); 653 init_completion(&mcast->done);
643 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 654 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
655 ipoib_mcast_join(dev, mcast);
644 } 656 }
645 spin_unlock_irq(&priv->lock); 657 spin_unlock_irq(&priv->lock);
646 if (mcast)
647 ipoib_mcast_join(dev, mcast);
648} 658}
649 659
650int ipoib_mcast_start_thread(struct net_device *dev) 660int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6727954ab74b..e8a84d12b7ff 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
1207#else 1207#else
1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } 1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
1209static void xpad_led_disconnect(struct usb_xpad *xpad) { } 1209static void xpad_led_disconnect(struct usb_xpad *xpad) { }
1210static void xpad_identify_controller(struct usb_xpad *xpad) { }
1211#endif 1210#endif
1212 1211
1213static int xpad_start_input(struct usb_xpad *xpad) 1212static int xpad_start_input(struct usb_xpad *xpad)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4d446d5085aa..c01a1d648f9f 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -235,7 +235,7 @@ struct adp5589_kpad {
235 unsigned short gpimapsize; 235 unsigned short gpimapsize;
236 unsigned extend_cfg; 236 unsigned extend_cfg;
237 bool is_adp5585; 237 bool is_adp5585;
238 bool adp5585_support_row5; 238 bool support_row5;
239#ifdef CONFIG_GPIOLIB 239#ifdef CONFIG_GPIOLIB
240 unsigned char gpiomap[ADP5589_MAXGPIO]; 240 unsigned char gpiomap[ADP5589_MAXGPIO];
241 bool export_gpio; 241 bool export_gpio;
@@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
485 if (kpad->extend_cfg & C4_EXTEND_CFG) 485 if (kpad->extend_cfg & C4_EXTEND_CFG)
486 pin_used[kpad->var->c4_extend_cfg] = true; 486 pin_used[kpad->var->c4_extend_cfg] = true;
487 487
488 if (!kpad->adp5585_support_row5) 488 if (!kpad->support_row5)
489 pin_used[5] = true; 489 pin_used[5] = true;
490 490
491 for (i = 0; i < kpad->var->maxgpio; i++) 491 for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
884 884
885 switch (id->driver_data) { 885 switch (id->driver_data) {
886 case ADP5585_02: 886 case ADP5585_02:
887 kpad->adp5585_support_row5 = true; 887 kpad->support_row5 = true;
888 case ADP5585_01: 888 case ADP5585_01:
889 kpad->is_adp5585 = true; 889 kpad->is_adp5585 = true;
890 kpad->var = &const_adp5585; 890 kpad->var = &const_adp5585;
891 break; 891 break;
892 case ADP5589: 892 case ADP5589:
893 kpad->support_row5 = true;
893 kpad->var = &const_adp5589; 894 kpad->var = &const_adp5589;
894 break; 895 break;
895 } 896 }
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 378db10001df..4401be225d64 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
304 led->cdev.brightness = LED_OFF; 304 led->cdev.brightness = LED_OFF;
305 305
306 error = of_property_read_u32(child, "reg", &reg); 306 error = of_property_read_u32(child, "reg", &reg);
307 if (error != 0 || reg >= num_leds) 307 if (error != 0 || reg >= num_leds) {
308 of_node_put(child);
308 return -EINVAL; 309 return -EINVAL;
310 }
309 311
310 led->reg = reg; 312 led->reg = reg;
311 led->priv = priv; 313 led->priv = priv;
@@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
313 INIT_WORK(&led->work, cap11xx_led_work); 315 INIT_WORK(&led->work, cap11xx_led_work);
314 316
315 error = devm_led_classdev_register(dev, &led->cdev); 317 error = devm_led_classdev_register(dev, &led->cdev);
316 if (error) 318 if (error) {
319 of_node_put(child);
317 return error; 320 return error;
321 }
318 322
319 priv->num_leds++; 323 priv->num_leds++;
320 led++; 324 led++;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d6d16fa78281..1f2337abcf2f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
733 module will be called xen-kbdfront. 733 module will be called xen-kbdfront.
734 734
735config INPUT_SIRFSOC_ONKEY 735config INPUT_SIRFSOC_ONKEY
736 bool "CSR SiRFSoC power on/off/suspend key support" 736 tristate "CSR SiRFSoC power on/off/suspend key support"
737 depends on ARCH_SIRF && OF 737 depends on ARCH_SIRF && OF
738 default y 738 default y
739 help 739 help
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 9d5b89befe6f..ed7237f19539 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
101static const struct of_device_id sirfsoc_pwrc_of_match[] = { 101static const struct of_device_id sirfsoc_pwrc_of_match[] = {
102 { .compatible = "sirf,prima2-pwrc" }, 102 { .compatible = "sirf,prima2-pwrc" },
103 {}, 103 {},
104} 104};
105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); 105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
106 106
107static int sirfsoc_pwrc_probe(struct platform_device *pdev) 107static int sirfsoc_pwrc_probe(struct platform_device *pdev)
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06258ce..a3f0f5a47490 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
458 priv->abs_dev = abs_dev; 458 priv->abs_dev = abs_dev;
459 psmouse->private = priv; 459 psmouse->private = priv;
460 460
461 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
462
463 /* Set up and register absolute device */ 461 /* Set up and register absolute device */
464 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", 462 snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
465 psmouse->ps2dev.serio->phys); 463 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
475 abs_dev->id.version = psmouse->model; 473 abs_dev->id.version = psmouse->model;
476 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; 474 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
477 475
478 error = input_register_device(priv->abs_dev);
479 if (error)
480 goto init_fail;
481
482 /* Set absolute device capabilities */ 476 /* Set absolute device capabilities */
483 input_set_capability(abs_dev, EV_KEY, BTN_LEFT); 477 input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
484 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); 478 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
488 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); 482 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
489 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); 483 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
490 484
485 error = input_register_device(priv->abs_dev);
486 if (error)
487 goto init_fail;
488
489 /* Add wheel capability to the relative device */
490 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
491
491 psmouse->protocol_handler = vmmouse_process_byte; 492 psmouse->protocol_handler = vmmouse_process_byte;
492 psmouse->disconnect = vmmouse_disconnect; 493 psmouse->disconnect = vmmouse_disconnect;
493 psmouse->reconnect = vmmouse_reconnect; 494 psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f828975ab10..1ca7f551e2da 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
134 int error; 134 int error;
135 135
136 error = device_attach(&serio->dev); 136 error = device_attach(&serio->dev);
137 if (error < 0) 137 if (error < 0 && error != -EPROBE_DEFER)
138 dev_warn(&serio->dev, 138 dev_warn(&serio->dev,
139 "device_attach() failed for %s (%s), error: %d\n", 139 "device_attach() failed for %s (%s), error: %d\n",
140 serio->phys, serio->name, error); 140 serio->phys, serio->name, error);
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 5d4903a402cc..69828d015d45 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/of.h>
24#include <linux/pinctrl/consumer.h> 25#include <linux/pinctrl/consumer.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0b0f8c17f3f7..23fbe382da8b 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
822 int error; 822 int error;
823 823
824 error = device_property_read_u32(dev, "threshold", &val); 824 error = device_property_read_u32(dev, "threshold", &val);
825 if (!error) 825 if (!error) {
826 reg_addr->reg_threshold = val; 826 edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
827 tsdata->threshold = val;
828 }
827 829
828 error = device_property_read_u32(dev, "gain", &val); 830 error = device_property_read_u32(dev, "gain", &val);
829 if (!error) 831 if (!error) {
830 reg_addr->reg_gain = val; 832 edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
833 tsdata->gain = val;
834 }
831 835
832 error = device_property_read_u32(dev, "offset", &val); 836 error = device_property_read_u32(dev, "offset", &val);
833 if (!error) 837 if (!error) {
834 reg_addr->reg_offset = val; 838 edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
839 tsdata->offset = val;
840 }
835} 841}
836 842
837static void 843static void
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..fb092f3f11cb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1353,7 +1353,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1353
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1354 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1355
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1356 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1357 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1358 goto end;
1359 1359
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 3447549fcc93..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -66,7 +66,10 @@ struct its_node {
66 unsigned long phys_base; 66 unsigned long phys_base;
67 struct its_cmd_block *cmd_base; 67 struct its_cmd_block *cmd_base;
68 struct its_cmd_block *cmd_write; 68 struct its_cmd_block *cmd_write;
69 void *tables[GITS_BASER_NR_REGS]; 69 struct {
70 void *base;
71 u32 order;
72 } tables[GITS_BASER_NR_REGS];
70 struct its_collection *collections; 73 struct its_collection *collections;
71 struct list_head its_device_list; 74 struct list_head its_device_list;
72 u64 flags; 75 u64 flags;
@@ -75,6 +78,9 @@ struct its_node {
75 78
76#define ITS_ITT_ALIGN SZ_256 79#define ITS_ITT_ALIGN SZ_256
77 80
81/* Convert page order to size in bytes */
82#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
83
78struct event_lpi_map { 84struct event_lpi_map {
79 unsigned long *lpi_map; 85 unsigned long *lpi_map;
80 u16 *col_map; 86 u16 *col_map;
@@ -597,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
597 lpi_set_config(d, true); 603 lpi_set_config(d, true);
598} 604}
599 605
600static void its_eoi_irq(struct irq_data *d)
601{
602 gic_write_eoir(d->hwirq);
603}
604
605static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 606static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
606 bool force) 607 bool force)
607{ 608{
@@ -638,7 +639,7 @@ static struct irq_chip its_irq_chip = {
638 .name = "ITS", 639 .name = "ITS",
639 .irq_mask = its_mask_irq, 640 .irq_mask = its_mask_irq,
640 .irq_unmask = its_unmask_irq, 641 .irq_unmask = its_unmask_irq,
641 .irq_eoi = its_eoi_irq, 642 .irq_eoi = irq_chip_eoi_parent,
642 .irq_set_affinity = its_set_affinity, 643 .irq_set_affinity = its_set_affinity,
643 .irq_compose_msi_msg = its_irq_compose_msi_msg, 644 .irq_compose_msi_msg = its_irq_compose_msi_msg,
644}; 645};
@@ -807,9 +808,10 @@ static void its_free_tables(struct its_node *its)
807 int i; 808 int i;
808 809
809 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 810 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
810 if (its->tables[i]) { 811 if (its->tables[i].base) {
811 free_page((unsigned long)its->tables[i]); 812 free_pages((unsigned long)its->tables[i].base,
812 its->tables[i] = NULL; 813 its->tables[i].order);
814 its->tables[i].base = NULL;
813 } 815 }
814 } 816 }
815} 817}
@@ -842,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
842 u64 type = GITS_BASER_TYPE(val); 844 u64 type = GITS_BASER_TYPE(val);
843 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 845 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
844 int order = get_order(psz); 846 int order = get_order(psz);
845 int alloc_size;
846 int alloc_pages; 847 int alloc_pages;
847 u64 tmp; 848 u64 tmp;
848 void *base; 849 void *base;
@@ -874,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
874 } 875 }
875 } 876 }
876 877
877 alloc_size = (1 << order) * PAGE_SIZE;
878retry_alloc_baser: 878retry_alloc_baser:
879 alloc_pages = (alloc_size / psz); 879 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
880 if (alloc_pages > GITS_BASER_PAGES_MAX) { 880 if (alloc_pages > GITS_BASER_PAGES_MAX) {
881 alloc_pages = GITS_BASER_PAGES_MAX; 881 alloc_pages = GITS_BASER_PAGES_MAX;
882 order = get_order(GITS_BASER_PAGES_MAX * psz); 882 order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -890,7 +890,8 @@ retry_alloc_baser:
890 goto out_free; 890 goto out_free;
891 } 891 }
892 892
893 its->tables[i] = base; 893 its->tables[i].base = base;
894 its->tables[i].order = order;
894 895
895retry_baser: 896retry_baser:
896 val = (virt_to_phys(base) | 897 val = (virt_to_phys(base) |
@@ -928,7 +929,7 @@ retry_baser:
928 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 929 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
929 if (!shr) { 930 if (!shr) {
930 cache = GITS_BASER_nC; 931 cache = GITS_BASER_nC;
931 __flush_dcache_area(base, alloc_size); 932 __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
932 } 933 }
933 goto retry_baser; 934 goto retry_baser;
934 } 935 }
@@ -940,7 +941,7 @@ retry_baser:
940 * something is horribly wrong... 941 * something is horribly wrong...
941 */ 942 */
942 free_pages((unsigned long)base, order); 943 free_pages((unsigned long)base, order);
943 its->tables[i] = NULL; 944 its->tables[i].base = NULL;
944 945
945 switch (psz) { 946 switch (psz) {
946 case SZ_16K: 947 case SZ_16K:
@@ -961,7 +962,7 @@ retry_baser:
961 } 962 }
962 963
963 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 964 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
964 (int)(alloc_size / entry_size), 965 (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
965 its_base_type_string[type], 966 its_base_type_string[type],
966 (unsigned long)virt_to_phys(base), 967 (unsigned long)virt_to_phys(base),
967 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 968 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 911758c056c1..8f9ebf714e2b 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -384,9 +384,6 @@ static struct irq_chip gic_chip = {
384 .irq_unmask = gic_unmask_irq, 384 .irq_unmask = gic_unmask_irq,
385 .irq_eoi = gic_eoi_irq, 385 .irq_eoi = gic_eoi_irq,
386 .irq_set_type = gic_set_type, 386 .irq_set_type = gic_set_type,
387#ifdef CONFIG_SMP
388 .irq_set_affinity = gic_set_affinity,
389#endif
390 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 387 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
391 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 388 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
392 .flags = IRQCHIP_SET_TYPE_MASKED | 389 .flags = IRQCHIP_SET_TYPE_MASKED |
@@ -400,9 +397,6 @@ static struct irq_chip gic_eoimode1_chip = {
400 .irq_unmask = gic_unmask_irq, 397 .irq_unmask = gic_unmask_irq,
401 .irq_eoi = gic_eoimode1_eoi_irq, 398 .irq_eoi = gic_eoimode1_eoi_irq,
402 .irq_set_type = gic_set_type, 399 .irq_set_type = gic_set_type,
403#ifdef CONFIG_SMP
404 .irq_set_affinity = gic_set_affinity,
405#endif
406 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 400 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
407 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 401 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
408 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
@@ -443,7 +437,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
443 u32 bypass = 0; 437 u32 bypass = 0;
444 u32 mode = 0; 438 u32 mode = 0;
445 439
446 if (static_key_true(&supports_deactivate)) 440 if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
447 mode = GIC_CPU_CTRL_EOImodeNS; 441 mode = GIC_CPU_CTRL_EOImodeNS;
448 442
449 /* 443 /*
@@ -1039,6 +1033,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1039 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1033 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
1040 } 1034 }
1041 1035
1036#ifdef CONFIG_SMP
1037 if (gic_nr == 0)
1038 gic->chip.irq_set_affinity = gic_set_affinity;
1039#endif
1040
1042#ifdef CONFIG_GIC_NON_BANKED 1041#ifdef CONFIG_GIC_NON_BANKED
1043 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1042 if (percpu_offset) { /* Frankein-GIC without banked registers... */
1044 unsigned int cpu; 1043 unsigned int cpu;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 0704362f4c82..376b28074e0d 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -22,7 +22,6 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23 23
24#include <asm/exception.h> 24#include <asm/exception.h>
25#include <asm/mach/irq.h>
26 25
27#define SUN4I_IRQ_VECTOR_REG 0x00 26#define SUN4I_IRQ_VECTOR_REG 0x00
28#define SUN4I_IRQ_PROTECTION_REG 0x08 27#define SUN4I_IRQ_PROTECTION_REG 0x08
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe0c8a4..d1f8ab915b15 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
373 373
374static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
375{ 375{
376 struct cardstate *cs = dev_get_drvdata(dev); 376 kfree(container_of(dev, struct ser_cardstate, dev.dev));
377
378 if (!cs)
379 return;
380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
383} 377}
384 378
385/* 379/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
408 cs->hw.ser = NULL; 402 cs->hw.ser = NULL;
409 return rc; 403 return rc;
410 } 404 }
411 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
412 405
413 tasklet_init(&cs->write_tasklet, 406 tasklet_init(&cs->write_tasklet,
414 gigaset_modem_fill, (unsigned long) cs); 407 gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e2944784e00..afde4edef9ae 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
392 } 392 }
393 stat = bchannel_get_rxbuf(&bc->bch, cnt); 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
394 /* only transparent use the count here, HDLC overun is detected later */ 394 /* only transparent use the count here, HDLC overun is detected later */
395 if (stat == ENOMEM) { 395 if (stat == -ENOMEM) {
396 pr_warning("%s.B%d: No memory for %d bytes\n", 396 pr_warning("%s.B%d: No memory for %d bytes\n",
397 card->name, bc->bch.nr, cnt); 397 card->name, bc->bch.nr, cnt);
398 return; 398 return;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb91c5b..9f6acd5d1d2e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name,
572 } 572 }
573 } 573 }
574 574
575 ret = nvm_get_sysblock(dev, &dev->sb); 575 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
576 if (!ret) 576 ret = nvm_get_sysblock(dev, &dev->sb);
577 pr_err("nvm: device not initialized.\n"); 577 if (!ret)
578 else if (ret < 0) 578 pr_err("nvm: device not initialized.\n");
579 pr_err("nvm: err (%d) on device initialization\n", ret); 579 else if (ret < 0)
580 pr_err("nvm: err (%d) on device initialization\n", ret);
581 }
580 582
581 /* register device with a supported media manager */ 583 /* register device with a supported media manager */
582 down_write(&nvm_lock); 584 down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1057 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1; 1058 info.fs_ppa.ppa = -1;
1057 1059
1058 ret = nvm_init_sysblock(dev, &info); 1060 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1059 if (ret) 1061 ret = nvm_init_sysblock(dev, &info);
1060 return ret; 1062 if (ret)
1063 return ret;
1064 }
1061 1065
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1066 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063 1067
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1117 dev->mt = NULL; 1121 dev->mt = NULL;
1118 } 1122 }
1119 1123
1120 return nvm_dev_factory(dev, fact.flags); 1124 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1125 return nvm_dev_factory(dev, fact.flags);
1126
1127 return 0;
1121} 1128}
1122 1129
1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1130static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..307db1ea22de 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 } 300 }
301 301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page) 303 if (!page) {
304 bio_put(bio);
304 return -ENOMEM; 305 return -ENOMEM;
306 }
305 307
306 while ((slot = find_first_zero_bit(rblk->invalid_pages, 308 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) { 309 nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7700c8..f7b37336353f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
174static inline int request_intersects(struct rrpc_inflight_rq *r, 174static inline int request_intersects(struct rrpc_inflight_rq *r,
175 sector_t laddr_start, sector_t laddr_end) 175 sector_t laddr_start, sector_t laddr_end)
176{ 176{
177 return (laddr_end >= r->l_start && laddr_end <= r->l_end) && 177 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
178 (laddr_start >= r->l_start && laddr_start <= r->l_end);
179} 178}
180 179
181static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, 180static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 sector_t laddr_end = laddr + pages - 1; 183 sector_t laddr_end = laddr + pages - 1;
185 struct rrpc_inflight_rq *rtmp; 184 struct rrpc_inflight_rq *rtmp;
186 185
186 WARN_ON(irqs_disabled());
187
187 spin_lock_irq(&rrpc->inflights.lock); 188 spin_lock_irq(&rrpc->inflights.lock);
188 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { 189 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
189 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { 190 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
1191 1191
1192 if (clone) 1192 if (clone)
1193 free_rq_clone(clone); 1193 free_rq_clone(clone);
1194 else if (!tio->md->queue->mq_ops)
1195 free_rq_tio(tio);
1194} 1196}
1195 1197
1196/* 1198/*
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bacb09ee..12099b09a9a7 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
2048 2048
2049 return 0; 2049 return 0;
2050} 2050}
2051EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
2051 2052
2052static int config_hot_period(u16 val) 2053static int config_hot_period(u16 val)
2053{ 2054{
@@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
2074 2075
2075 return config_hot_period(cycles32k); 2076 return config_hot_period(cycles32k);
2076} 2077}
2078EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
2077 2079
2078int db8500_prcmu_stop_temp_sense(void) 2080int db8500_prcmu_stop_temp_sense(void)
2079{ 2081{
2080 return config_hot_period(0xFFFF); 2082 return config_hot_period(0xFFFF);
2081} 2083}
2084EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
2082 2085
2083static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2086static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
2084{ 2087{
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 677d0362f334..80f9afcb1382 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
458{ 458{
459 struct mei_cl *cl = file->private_data; 459 struct mei_cl *cl = file->private_data;
460 460
461 return mei_cl_notify_request(cl, file, request); 461 if (request != MEI_HBM_NOTIFICATION_START &&
462 request != MEI_HBM_NOTIFICATION_STOP)
463 return -EINVAL;
464
465 return mei_cl_notify_request(cl, file, (u8)request);
462} 466}
463 467
464/** 468/**
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5914263090fc..fe207e542032 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,13 +47,10 @@
47#include "queue.h" 47#include "queue.h"
48 48
49MODULE_ALIAS("mmc:block"); 49MODULE_ALIAS("mmc:block");
50
51#ifdef KERNEL
52#ifdef MODULE_PARAM_PREFIX 50#ifdef MODULE_PARAM_PREFIX
53#undef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX
54#endif 52#endif
55#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
56#endif
57 54
58#define INAND_CMD38_ARG_EXT_CSD 113 55#define INAND_CMD38_ARG_EXT_CSD 113
59#define INAND_CMD38_ARG_ERASE 0x00 56#define INAND_CMD38_ARG_ERASE 0x00
@@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
655 } 652 }
656 653
657 md = mmc_blk_get(bdev->bd_disk); 654 md = mmc_blk_get(bdev->bd_disk);
658 if (!md) 655 if (!md) {
656 err = -EINVAL;
659 goto cmd_err; 657 goto cmd_err;
658 }
660 659
661 card = md->queue.card; 660 card = md->queue.card;
662 if (IS_ERR(card)) { 661 if (IS_ERR(card)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45ef3faf..3446097a43c0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
925 925
926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, 926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
927 PAGE_SIZE, dir); 927 PAGE_SIZE, dir);
928 if (dma_mapping_error(dma_dev, dma_addr)) {
929 data->error = -EFAULT;
930 break;
931 }
928 if (direction == DMA_TO_DEVICE) 932 if (direction == DMA_TO_DEVICE)
929 t->tx_dma = dma_addr + sg->offset; 933 t->tx_dma = dma_addr + sg->offset;
930 else 934 else
@@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi)
1393 host->dma_dev = dev; 1397 host->dma_dev = dev;
1394 host->ones_dma = dma_map_single(dev, ones, 1398 host->ones_dma = dma_map_single(dev, ones,
1395 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1399 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1400 if (dma_mapping_error(dev, host->ones_dma))
1401 goto fail_ones_dma;
1396 host->data_dma = dma_map_single(dev, host->data, 1402 host->data_dma = dma_map_single(dev, host->data,
1397 sizeof(*host->data), DMA_BIDIRECTIONAL); 1403 sizeof(*host->data), DMA_BIDIRECTIONAL);
1398 1404 if (dma_mapping_error(dev, host->data_dma))
1399 /* REVISIT in theory those map operations can fail... */ 1405 goto fail_data_dma;
1400 1406
1401 dma_sync_single_for_cpu(host->dma_dev, 1407 dma_sync_single_for_cpu(host->dma_dev,
1402 host->data_dma, sizeof(*host->data), 1408 host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@ fail_glue_init:
1462 if (host->dma_dev) 1468 if (host->dma_dev)
1463 dma_unmap_single(host->dma_dev, host->data_dma, 1469 dma_unmap_single(host->dma_dev, host->data_dma,
1464 sizeof(*host->data), DMA_BIDIRECTIONAL); 1470 sizeof(*host->data), DMA_BIDIRECTIONAL);
1471fail_data_dma:
1472 if (host->dma_dev)
1473 dma_unmap_single(host->dma_dev, host->ones_dma,
1474 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1475fail_ones_dma:
1465 kfree(host->data); 1476 kfree(host->data);
1466 1477
1467fail_nobuf1: 1478fail_nobuf1:
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
2232 dma_release_channel(host->tx_chan); 2232 dma_release_channel(host->tx_chan);
2233 if (host->rx_chan) 2233 if (host->rx_chan)
2234 dma_release_channel(host->rx_chan); 2234 dma_release_channel(host->rx_chan);
2235 pm_runtime_dont_use_autosuspend(host->dev);
2235 pm_runtime_put_sync(host->dev); 2236 pm_runtime_put_sync(host->dev);
2236 pm_runtime_disable(host->dev); 2237 pm_runtime_disable(host->dev);
2237 if (host->dbclk) 2238 if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2253 dma_release_channel(host->tx_chan); 2254 dma_release_channel(host->tx_chan);
2254 dma_release_channel(host->rx_chan); 2255 dma_release_channel(host->rx_chan);
2255 2256
2257 pm_runtime_dont_use_autosuspend(host->dev);
2256 pm_runtime_put_sync(host->dev); 2258 pm_runtime_put_sync(host->dev);
2257 pm_runtime_disable(host->dev); 2259 pm_runtime_disable(host->dev);
2258 device_init_wakeup(&pdev->dev, false); 2260 device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896b9d69..da824772bbb4 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -86,7 +86,7 @@ struct pxamci_host {
86static inline void pxamci_init_ocr(struct pxamci_host *host) 86static inline void pxamci_init_ocr(struct pxamci_host *host)
87{ 87{
88#ifdef CONFIG_REGULATOR 88#ifdef CONFIG_REGULATOR
89 host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); 89 host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
90 90
91 if (IS_ERR(host->vcc)) 91 if (IS_ERR(host->vcc))
92 host->vcc = NULL; 92 host->vcc = NULL;
@@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev)
654 654
655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
656 irq = platform_get_irq(pdev, 0); 656 irq = platform_get_irq(pdev, 0);
657 if (!r || irq < 0) 657 if (irq < 0)
658 return -ENXIO; 658 return irq;
659
660 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
661 if (!r)
662 return -EBUSY;
663 659
664 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 660 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
665 if (!mmc) { 661 if (!mmc) {
@@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev)
695 host->pdata = pdev->dev.platform_data; 691 host->pdata = pdev->dev.platform_data;
696 host->clkrt = CLKRT_OFF; 692 host->clkrt = CLKRT_OFF;
697 693
698 host->clk = clk_get(&pdev->dev, NULL); 694 host->clk = devm_clk_get(&pdev->dev, NULL);
699 if (IS_ERR(host->clk)) { 695 if (IS_ERR(host->clk)) {
700 ret = PTR_ERR(host->clk); 696 ret = PTR_ERR(host->clk);
701 host->clk = NULL; 697 host->clk = NULL;
@@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev)
727 host->irq = irq; 723 host->irq = irq;
728 host->imask = MMC_I_MASK_ALL; 724 host->imask = MMC_I_MASK_ALL;
729 725
730 host->base = ioremap(r->start, SZ_4K); 726 host->base = devm_ioremap_resource(&pdev->dev, r);
731 if (!host->base) { 727 if (IS_ERR(host->base)) {
732 ret = -ENOMEM; 728 ret = PTR_ERR(host->base);
733 goto out; 729 goto out;
734 } 730 }
735 731
@@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev)
742 writel(64, host->base + MMC_RESTO); 738 writel(64, host->base + MMC_RESTO);
743 writel(host->imask, host->base + MMC_I_MASK); 739 writel(host->imask, host->base + MMC_I_MASK);
744 740
745 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 741 ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
742 DRIVER_NAME, host);
746 if (ret) 743 if (ret)
747 goto out; 744 goto out;
748 745
@@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev)
804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 801 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
805 goto out; 802 goto out;
806 } else { 803 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 804 mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
808 0 : MMC_CAP2_RO_ACTIVE_HIGH; 805 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 806 }
810 807
@@ -833,14 +830,9 @@ out:
833 dma_release_channel(host->dma_chan_rx); 830 dma_release_channel(host->dma_chan_rx);
834 if (host->dma_chan_tx) 831 if (host->dma_chan_tx)
835 dma_release_channel(host->dma_chan_tx); 832 dma_release_channel(host->dma_chan_tx);
836 if (host->base)
837 iounmap(host->base);
838 if (host->clk)
839 clk_put(host->clk);
840 } 833 }
841 if (mmc) 834 if (mmc)
842 mmc_free_host(mmc); 835 mmc_free_host(mmc);
843 release_resource(r);
844 return ret; 836 return ret;
845} 837}
846 838
@@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev)
859 gpio_ro = host->pdata->gpio_card_ro; 851 gpio_ro = host->pdata->gpio_card_ro;
860 gpio_power = host->pdata->gpio_power; 852 gpio_power = host->pdata->gpio_power;
861 } 853 }
862 if (host->vcc)
863 regulator_put(host->vcc);
864
865 if (host->pdata && host->pdata->exit) 854 if (host->pdata && host->pdata->exit)
866 host->pdata->exit(&pdev->dev, mmc); 855 host->pdata->exit(&pdev->dev, mmc);
867 856
@@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev)
870 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 859 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
871 host->base + MMC_I_MASK); 860 host->base + MMC_I_MASK);
872 861
873 free_irq(host->irq, host);
874 dmaengine_terminate_all(host->dma_chan_rx); 862 dmaengine_terminate_all(host->dma_chan_rx);
875 dmaengine_terminate_all(host->dma_chan_tx); 863 dmaengine_terminate_all(host->dma_chan_tx);
876 dma_release_channel(host->dma_chan_rx); 864 dma_release_channel(host->dma_chan_rx);
877 dma_release_channel(host->dma_chan_tx); 865 dma_release_channel(host->dma_chan_tx);
878 iounmap(host->base);
879
880 clk_put(host->clk);
881
882 release_resource(host->res);
883 866
884 mmc_free_host(mmc); 867 mmc_free_host(mmc);
885 } 868 }
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc94062..a5cda926d38e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
146 .ops = &sdhci_acpi_ops_int, 146 .ops = &sdhci_acpi_ops_int,
147}; 147};
148 148
149static int bxt_get_cd(struct mmc_host *mmc)
150{
151 int gpio_cd = mmc_gpio_get_cd(mmc);
152 struct sdhci_host *host = mmc_priv(mmc);
153 unsigned long flags;
154 int ret = 0;
155
156 if (!gpio_cd)
157 return 0;
158
159 pm_runtime_get_sync(mmc->parent);
160
161 spin_lock_irqsave(&host->lock, flags);
162
163 if (host->flags & SDHCI_DEVICE_DEAD)
164 goto out;
165
166 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
167out:
168 spin_unlock_irqrestore(&host->lock, flags);
169
170 pm_runtime_mark_last_busy(mmc->parent);
171 pm_runtime_put_autosuspend(mmc->parent);
172
173 return ret;
174}
175
149static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, 176static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
150 const char *hid, const char *uid) 177 const char *hid, const char *uid)
151{ 178{
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
196 223
197 /* Platform specific code during sd probe slot goes here */ 224 /* Platform specific code during sd probe slot goes here */
198 225
226 if (hid && !strcmp(hid, "80865ACA"))
227 host->mmc_host_ops.get_cd = bxt_get_cd;
228
199 return 0; 229 return 0;
200} 230}
201 231
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7e7d8f0c9438..9cb86fb25976 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
217pm_runtime_disable: 217pm_runtime_disable:
218 pm_runtime_disable(&pdev->dev); 218 pm_runtime_disable(&pdev->dev);
219 pm_runtime_set_suspended(&pdev->dev); 219 pm_runtime_set_suspended(&pdev->dev);
220 pm_runtime_put_noidle(&pdev->dev);
220clocks_disable_unprepare: 221clocks_disable_unprepare:
221 clk_disable_unprepare(priv->gck); 222 clk_disable_unprepare(priv->gck);
222 clk_disable_unprepare(priv->mainck); 223 clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cc851b065d0a..df3b8eced8c4 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); 330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
331} 331}
332 332
333static int bxt_get_cd(struct mmc_host *mmc)
334{
335 int gpio_cd = mmc_gpio_get_cd(mmc);
336 struct sdhci_host *host = mmc_priv(mmc);
337 unsigned long flags;
338 int ret = 0;
339
340 if (!gpio_cd)
341 return 0;
342
343 pm_runtime_get_sync(mmc->parent);
344
345 spin_lock_irqsave(&host->lock, flags);
346
347 if (host->flags & SDHCI_DEVICE_DEAD)
348 goto out;
349
350 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
351out:
352 spin_unlock_irqrestore(&host->lock, flags);
353
354 pm_runtime_mark_last_busy(mmc->parent);
355 pm_runtime_put_autosuspend(mmc->parent);
356
357 return ret;
358}
359
333static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 360static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
334{ 361{
335 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 362 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
362 slot->cd_con_id = NULL; 389 slot->cd_con_id = NULL;
363 slot->cd_idx = 0; 390 slot->cd_idx = 0;
364 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395
365 return 0; 396 return 0;
366} 397}
367 398
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d622435d1bcc..add9fdfd1d8f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1360 sdhci_runtime_pm_get(host); 1360 sdhci_runtime_pm_get(host);
1361 1361
1362 /* Firstly check card presence */ 1362 /* Firstly check card presence */
1363 present = sdhci_do_get_cd(host); 1363 present = mmc->ops->get_cd(mmc);
1364 1364
1365 spin_lock_irqsave(&host->lock, flags); 1365 spin_lock_irqsave(&host->lock, flags);
1366 1366
@@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
2849 2849
2850 host = mmc_priv(mmc); 2850 host = mmc_priv(mmc);
2851 host->mmc = mmc; 2851 host->mmc = mmc;
2852 host->mmc_host_ops = sdhci_ops;
2853 mmc->ops = &host->mmc_host_ops;
2852 2854
2853 return host; 2855 return host;
2854} 2856}
@@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host)
3037 /* 3039 /*
3038 * Set host parameters. 3040 * Set host parameters.
3039 */ 3041 */
3040 mmc->ops = &sdhci_ops;
3041 max_clk = host->max_clk; 3042 max_clk = host->max_clk;
3042 3043
3043 if (host->ops->get_min_clock) 3044 if (host->ops->get_min_clock)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7654ae5d2b4e..0115e9907bf8 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -430,6 +430,7 @@ struct sdhci_host {
430 430
431 /* Internal data */ 431 /* Internal data */
432 struct mmc_host *mmc; /* MMC structure */ 432 struct mmc_host *mmc; /* MMC structure */
433 struct mmc_host_ops mmc_host_ops; /* MMC host ops */
433 u64 dma_mask; /* custom DMA mask */ 434 u64 dma_mask; /* custom DMA mask */
434 435
435#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 436#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 1ca8a1359cbc..6234eab38ff3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
445 pdata->slave_id_rx); 445 pdata->slave_id_rx);
446 } else { 446 } else {
447 host->chan_tx = dma_request_slave_channel(dev, "tx"); 447 host->chan_tx = dma_request_slave_channel(dev, "tx");
448 host->chan_tx = dma_request_slave_channel(dev, "rx"); 448 host->chan_rx = dma_request_slave_channel(dev, "rx");
449 } 449 }
450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, 450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
451 host->chan_rx); 451 host->chan_rx);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b560558884..b7f1a9919033 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, 214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
215 struct rtnl_link_stats64 *stats); 215 struct rtnl_link_stats64 *stats);
216static void bond_slave_arr_handler(struct work_struct *work); 216static void bond_slave_arr_handler(struct work_struct *work);
217static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
218 int mod);
217 219
218/*---------------------------- General routines -----------------------------*/ 220/*---------------------------- General routines -----------------------------*/
219 221
@@ -2127,6 +2129,7 @@ static void bond_miimon_commit(struct bonding *bond)
2127 continue; 2129 continue;
2128 2130
2129 case BOND_LINK_UP: 2131 case BOND_LINK_UP:
2132 bond_update_speed_duplex(slave);
2130 bond_set_slave_link_state(slave, BOND_LINK_UP, 2133 bond_set_slave_link_state(slave, BOND_LINK_UP,
2131 BOND_SLAVE_NOTIFY_NOW); 2134 BOND_SLAVE_NOTIFY_NOW);
2132 slave->last_link_up = jiffies; 2135 slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2459 struct slave *slave) 2462 struct slave *slave)
2460{ 2463{
2461 struct arphdr *arp = (struct arphdr *)skb->data; 2464 struct arphdr *arp = (struct arphdr *)skb->data;
2462 struct slave *curr_active_slave; 2465 struct slave *curr_active_slave, *curr_arp_slave;
2463 unsigned char *arp_ptr; 2466 unsigned char *arp_ptr;
2464 __be32 sip, tip; 2467 __be32 sip, tip;
2465 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2468 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2506 &sip, &tip); 2509 &sip, &tip);
2507 2510
2508 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2511 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2512 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2509 2513
2510 /* Backup slaves won't see the ARP reply, but do come through 2514 /* We 'trust' the received ARP enough to validate it if:
2511 * here for each ARP probe (so we swap the sip/tip to validate 2515 *
2512 * the probe). In a "redundant switch, common router" type of 2516 * (a) the slave receiving the ARP is active (which includes the
2513 * configuration, the ARP probe will (hopefully) travel from 2517 * current ARP slave, if any), or
2514 * the active, through one switch, the router, then the other 2518 *
2515 * switch before reaching the backup. 2519 * (b) the receiving slave isn't active, but there is a currently
2520 * active slave and it received valid arp reply(s) after it became
2521 * the currently active slave, or
2522 *
2523 * (c) there is an ARP slave that sent an ARP during the prior ARP
2524 * interval, and we receive an ARP reply on any slave. We accept
2525 * these because switch FDB update delays may deliver the ARP
2526 * reply to a slave other than the sender of the ARP request.
2516 * 2527 *
2517 * We 'trust' the arp requests if there is an active slave and 2528 * Note: for (b), backup slaves are receiving the broadcast ARP
2518 * it received valid arp reply(s) after it became active. This 2529 * request, not a reply. This request passes from the sending
2519 * is done to avoid endless looping when we can't reach the 2530 * slave through the L2 switch(es) to the receiving slave. Since
2531 * this is checking the request, sip/tip are swapped for
2532 * validation.
2533 *
2534 * This is done to avoid endless looping when we can't reach the
2520 * arp_ip_target and fool ourselves with our own arp requests. 2535 * arp_ip_target and fool ourselves with our own arp requests.
2521 */ 2536 */
2522
2523 if (bond_is_active_slave(slave)) 2537 if (bond_is_active_slave(slave))
2524 bond_validate_arp(bond, slave, sip, tip); 2538 bond_validate_arp(bond, slave, sip, tip);
2525 else if (curr_active_slave && 2539 else if (curr_active_slave &&
2526 time_after(slave_last_rx(bond, curr_active_slave), 2540 time_after(slave_last_rx(bond, curr_active_slave),
2527 curr_active_slave->last_link_up)) 2541 curr_active_slave->last_link_up))
2528 bond_validate_arp(bond, slave, tip, sip); 2542 bond_validate_arp(bond, slave, tip, sip);
2543 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2544 bond_time_in_interval(bond,
2545 dev_trans_start(curr_arp_slave->dev), 1))
2546 bond_validate_arp(bond, slave, sip, tip);
2529 2547
2530out_unlock: 2548out_unlock:
2531 if (arp != (struct arphdr *)skb->data) 2549 if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
526 /* Release context */ 534 /* Release context */
527 context->echo_index = MAX_TX_URBS; 535 context->echo_index = MAX_TX_URBS;
528 536
529 if (netif_queue_stopped(netdev))
530 netif_wake_queue(netdev);
531} 537}
532 538
533/* 539/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
587 int err, i; 593 int err, i;
588 594
589 dev->intr_in_buffer[0] = 0; 595 dev->intr_in_buffer[0] = 0;
590 dev->free_slots = 15; /* initial size */ 596 dev->free_slots = 50; /* initial size */
591 597
592 for (i = 0; i < MAX_RX_URBS; i++) { 598 for (i = 0; i < MAX_RX_URBS; i++) {
593 struct urb *urb = NULL; 599 struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
835 841
836 /* Slow down tx path */ 842 /* Slow down tx path */
837 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
838 dev->free_slots < 5) { 844 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
839 netif_stop_queue(netdev); 845 netif_stop_queue(netdev);
840 } 846 }
841 } 847 }
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c54553418..a47f52f44b0d 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
25static const struct mv88e6xxx_switch_id mv88e6352_table[] = { 25static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, 26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, 27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
28 { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
28 { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, 29 { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
29 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, 30 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
30 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, 31 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf34681af4f6..512c8c0be1b4 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1555,7 +1555,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1555 1555
1556 if (vlan.vid != vid || !vlan.valid || 1556 if (vlan.vid != vid || !vlan.valid ||
1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1558 return -ENOENT; 1558 return -EOPNOTSUPP;
1559 1559
1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1561 1561
@@ -1582,6 +1582,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1582 const struct switchdev_obj_port_vlan *vlan) 1582 const struct switchdev_obj_port_vlan *vlan)
1583{ 1583{
1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1585 const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1585 u16 pvid, vid; 1586 u16 pvid, vid;
1586 int err = 0; 1587 int err = 0;
1587 1588
@@ -1597,7 +1598,8 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1597 goto unlock; 1598 goto unlock;
1598 1599
1599 if (vid == pvid) { 1600 if (vid == pvid) {
1600 err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1601 /* restore reserved VLAN ID */
1602 err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
1601 if (err) 1603 if (err)
1602 goto unlock; 1604 goto unlock;
1603 } 1605 }
@@ -1889,26 +1891,20 @@ unlock:
1889 1891
1890int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1892int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
1891{ 1893{
1892 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1894 return 0;
1893 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1894 int err;
1895
1896 /* The port joined a bridge, so leave its reserved VLAN */
1897 mutex_lock(&ps->smi_mutex);
1898 err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
1899 if (!err)
1900 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1901 mutex_unlock(&ps->smi_mutex);
1902 return err;
1903} 1895}
1904 1896
1905int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1897int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
1906{ 1898{
1899 return 0;
1900}
1901
1902static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
1903{
1907 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1908 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1905 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1909 int err; 1906 int err;
1910 1907
1911 /* The port left the bridge, so join its reserved VLAN */
1912 mutex_lock(&ps->smi_mutex); 1908 mutex_lock(&ps->smi_mutex);
1913 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1909 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
1914 if (!err) 1910 if (!err)
@@ -2192,8 +2188,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2192 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2188 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2193 continue; 2189 continue;
2194 2190
2195 /* setup the unbridged state */ 2191 ret = mv88e6xxx_setup_port_default_vlan(ds, i);
2196 ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
2197 if (ret < 0) 2192 if (ret < 0)
2198 return ret; 2193 return ret;
2199 } 2194 }
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289a26c0..2f79d29f17f2 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), 1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), 1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
1504 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
1504 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), 1505 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
1505 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), 1506 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
1506 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), 1507 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbea15bd..0907ab6ff309 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2380 sizeof(u32), 2380 sizeof(u32),
2381 &tx_ring->tx_status_pa, 2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL); 2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) { 2383 if (!tx_ring->tx_status) {
2384 dev_err(&adapter->pdev->dev, 2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n"); 2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM; 2386 return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b921dc..fcdf5dda448f 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@ static const char version[] =
50static void write_rreg(u_long base, u_int reg, u_int val) 50static void write_rreg(u_long base, u_int reg, u_int val)
51{ 51{
52 asm volatile( 52 asm volatile(
53 "str%?h %1, [%2] @ NET_RAP\n\t" 53 "strh %1, [%2] @ NET_RAP\n\t"
54 "str%?h %0, [%2, #-4] @ NET_RDP" 54 "strh %0, [%2, #-4] @ NET_RDP"
55 : 55 :
56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
57} 57}
@@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
60{ 60{
61 unsigned short v; 61 unsigned short v;
62 asm volatile( 62 asm volatile(
63 "str%?h %1, [%2] @ NET_RAP\n\t" 63 "strh %1, [%2] @ NET_RAP\n\t"
64 "ldr%?h %0, [%2, #-4] @ NET_RDP" 64 "ldrh %0, [%2, #-4] @ NET_RDP"
65 : "=r" (v) 65 : "=r" (v)
66 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 66 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
67 return v; 67 return v;
@@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70static inline void write_ireg(u_long base, u_int reg, u_int val) 70static inline void write_ireg(u_long base, u_int reg, u_int val)
71{ 71{
72 asm volatile( 72 asm volatile(
73 "str%?h %1, [%2] @ NET_RAP\n\t" 73 "strh %1, [%2] @ NET_RAP\n\t"
74 "str%?h %0, [%2, #8] @ NET_IDP" 74 "strh %0, [%2, #8] @ NET_IDP"
75 : 75 :
76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
77} 77}
@@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
80{ 80{
81 u_short v; 81 u_short v;
82 asm volatile( 82 asm volatile(
83 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "strh %1, [%2] @ NAT_RAP\n\t"
84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldrh %0, [%2, #8] @ NET_IDP\n\t"
85 : "=r" (v) 85 : "=r" (v)
86 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
87 return v; 87 return v;
@@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
96 offset = ISAMEM_BASE + (offset << 1); 96 offset = ISAMEM_BASE + (offset << 1);
97 length = (length + 1) & ~1; 97 length = (length + 1) & ~1;
98 if ((int)buf & 2) { 98 if ((int)buf & 2) {
99 asm volatile("str%?h %2, [%0], #4" 99 asm volatile("strh %2, [%0], #4"
100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
101 buf += 2; 101 buf += 2;
102 length -= 2; 102 length -= 2;
@@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
104 while (length > 8) { 104 while (length > 8) {
105 register unsigned int tmp asm("r2"), tmp2 asm("r3"); 105 register unsigned int tmp asm("r2"), tmp2 asm("r3");
106 asm volatile( 106 asm volatile(
107 "ldm%?ia %0!, {%1, %2}" 107 "ldmia %0!, {%1, %2}"
108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); 108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
109 length -= 8; 109 length -= 8;
110 asm volatile( 110 asm volatile(
111 "str%?h %1, [%0], #4\n\t" 111 "strh %1, [%0], #4\n\t"
112 "mov%? %1, %1, lsr #16\n\t" 112 "mov %1, %1, lsr #16\n\t"
113 "str%?h %1, [%0], #4\n\t" 113 "strh %1, [%0], #4\n\t"
114 "str%?h %2, [%0], #4\n\t" 114 "strh %2, [%0], #4\n\t"
115 "mov%? %2, %2, lsr #16\n\t" 115 "mov %2, %2, lsr #16\n\t"
116 "str%?h %2, [%0], #4" 116 "strh %2, [%0], #4"
117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); 117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
118 } 118 }
119 while (length > 0) { 119 while (length > 0) {
120 asm volatile("str%?h %2, [%0], #4" 120 asm volatile("strh %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 122 buf += 2;
123 length -= 2; 123 length -= 2;
@@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
132 if ((int)buf & 2) { 132 if ((int)buf & 2) {
133 unsigned int tmp; 133 unsigned int tmp;
134 asm volatile( 134 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 135 "ldrh %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 136 "strb %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 137 "mov %2, %2, lsr #8\n\t"
138 "str%?b %2, [%1], #1" 138 "strb %2, [%1], #1"
139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); 139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
140 length -= 2; 140 length -= 2;
141 } 141 }
142 while (length > 8) { 142 while (length > 8) {
143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; 143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 asm volatile( 144 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 145 "ldrh %2, [%0], #4\n\t"
146 "ldr%?h %4, [%0], #4\n\t" 146 "ldrh %4, [%0], #4\n\t"
147 "ldr%?h %3, [%0], #4\n\t" 147 "ldrh %3, [%0], #4\n\t"
148 "orr%? %2, %2, %4, lsl #16\n\t" 148 "orr %2, %2, %4, lsl #16\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 149 "ldrh %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 150 "orr %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 151 "stmia %1!, {%2, %3}"
152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) 152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
153 : "0" (offset), "1" (buf)); 153 : "0" (offset), "1" (buf));
154 length -= 8; 154 length -= 8;
@@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
156 while (length > 0) { 156 while (length > 0) {
157 unsigned int tmp; 157 unsigned int tmp;
158 asm volatile( 158 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 159 "ldrh %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 160 "strb %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 161 "mov %2, %2, lsr #8\n\t"
162 "str%?b %2, [%1], #1" 162 "strb %2, [%1], #1"
163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); 163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
164 length -= 2; 164 length -= 2;
165 } 165 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590f6bb1..3a7ebfdda57d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548 548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); 549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL) 550 if (!lp)
551 return -ENODEV; 551 return -ENOMEM;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp; 553 dev->ml_priv = lp;
554 lp->name = chipname; 554 lp->name = chipname;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..6446af1403f7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163 struct sk_buff *skb = tx_buff->skb; 163 struct sk_buff *skb = tx_buff->skb;
164 unsigned int info = le32_to_cpu(txbd->info); 164 unsigned int info = le32_to_cpu(txbd->info);
165 165
166 if ((info & FOR_EMAC) || !txbd->data) 166 if ((info & FOR_EMAC) || !txbd->data || !skb)
167 break; 167 break;
168 168
169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191 191
192 txbd->data = 0; 192 txbd->data = 0;
193 txbd->info = 0; 193 txbd->info = 0;
194 tx_buff->skb = NULL;
194 195
195 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
196 } 197 }
@@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev)
446 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
447 } 448 }
448 449
450 priv->txbd_curr = 0;
451 priv->txbd_dirty = 0;
452
449 /* Clean Tx BD's */ 453 /* Clean Tx BD's */
450 memset(priv->txbd, 0, TX_RING_SZ); 454 memset(priv->txbd, 0, TX_RING_SZ);
451 455
@@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
514} 518}
515 519
516/** 520/**
521 * arc_free_tx_queue - free skb from tx queue
522 * @ndev: Pointer to the network device.
523 *
524 * This function must be called while EMAC disable
525 */
526static void arc_free_tx_queue(struct net_device *ndev)
527{
528 struct arc_emac_priv *priv = netdev_priv(ndev);
529 unsigned int i;
530
531 for (i = 0; i < TX_BD_NUM; i++) {
532 struct arc_emac_bd *txbd = &priv->txbd[i];
533 struct buffer_state *tx_buff = &priv->tx_buff[i];
534
535 if (tx_buff->skb) {
536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
538
539 /* return the sk_buff to system */
540 dev_kfree_skb_irq(tx_buff->skb);
541 }
542
543 txbd->info = 0;
544 txbd->data = 0;
545 tx_buff->skb = NULL;
546 }
547}
548
549/**
550 * arc_free_rx_queue - free skb from rx queue
551 * @ndev: Pointer to the network device.
552 *
553 * This function must be called while EMAC disable
554 */
555static void arc_free_rx_queue(struct net_device *ndev)
556{
557 struct arc_emac_priv *priv = netdev_priv(ndev);
558 unsigned int i;
559
560 for (i = 0; i < RX_BD_NUM; i++) {
561 struct arc_emac_bd *rxbd = &priv->rxbd[i];
562 struct buffer_state *rx_buff = &priv->rx_buff[i];
563
564 if (rx_buff->skb) {
565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
567
568 /* return the sk_buff to system */
569 dev_kfree_skb_irq(rx_buff->skb);
570 }
571
572 rxbd->info = 0;
573 rxbd->data = 0;
574 rx_buff->skb = NULL;
575 }
576}
577
578/**
517 * arc_emac_stop - Close the network device. 579 * arc_emac_stop - Close the network device.
518 * @ndev: Pointer to the network device. 580 * @ndev: Pointer to the network device.
519 * 581 *
@@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev)
534 /* Disable EMAC */ 596 /* Disable EMAC */
535 arc_reg_clr(priv, R_CTRL, EN_MASK); 597 arc_reg_clr(priv, R_CTRL, EN_MASK);
536 598
599 /* Return the sk_buff to system */
600 arc_free_tx_queue(ndev);
601 arc_free_rx_queue(ndev);
602
537 return 0; 603 return 0;
538} 604}
539 605
@@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
611 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
612 678
613 priv->tx_buff[*txbd_curr].skb = skb;
614 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
615 680
616 /* Make sure pointer to data buffer is set */ 681 /* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620 685
621 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
622 687
688 /* Make sure info word is set */
689 wmb();
690
691 priv->tx_buff[*txbd_curr].skb = skb;
692
623 /* Increment index to point to the next BD */ 693 /* Increment index to point to the next BD */
624 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
625 695
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..1fb80100e5e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
6185 shift -= 4; 6185 shift -= 4;
6186 digit = ((num & mask) >> shift); 6186 digit = ((num & mask) >> shift);
6187 if (digit == 0 && remove_leading_zeros) { 6187 if (digit == 0 && remove_leading_zeros) {
6188 mask = mask >> 4; 6188 *str_ptr = '0';
6189 continue; 6189 } else {
6190 } else if (digit < 0xa) 6190 if (digit < 0xa)
6191 *str_ptr = digit + '0'; 6191 *str_ptr = digit + '0';
6192 else 6192 else
6193 *str_ptr = digit - 0xa + 'a'; 6193 *str_ptr = digit - 0xa + 'a';
6194 remove_leading_zeros = 0; 6194
6195 str_ptr++; 6195 remove_leading_zeros = 0;
6196 (*len)--; 6196 str_ptr++;
6197 (*len)--;
6198 }
6197 mask = mask >> 4; 6199 mask = mask >> 4;
6198 if (shift == 4*4) { 6200 if (shift == 4*4) {
6201 if (remove_leading_zeros) {
6202 str_ptr++;
6203 (*len)--;
6204 }
6199 *str_ptr = '.'; 6205 *str_ptr = '.';
6200 str_ptr++; 6206 str_ptr++;
6201 (*len)--; 6207 (*len)--;
6202 remove_leading_zeros = 1; 6208 remove_leading_zeros = 1;
6203 } 6209 }
6204 } 6210 }
6211 if (remove_leading_zeros)
6212 (*len)--;
6205 return 0; 6213 return 0;
6206} 6214}
6207 6215
6216static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
6217{
6218 u8 *str_ptr = str;
6219 u32 mask = 0x00f00000;
6220 u8 shift = 8*3;
6221 u8 digit;
6222 u8 remove_leading_zeros = 1;
6223
6224 if (*len < 10) {
6225 /* Need more than 10chars for this format */
6226 *str_ptr = '\0';
6227 (*len)--;
6228 return -EINVAL;
6229 }
6230
6231 while (shift > 0) {
6232 shift -= 4;
6233 digit = ((num & mask) >> shift);
6234 if (digit == 0 && remove_leading_zeros) {
6235 *str_ptr = '0';
6236 } else {
6237 if (digit < 0xa)
6238 *str_ptr = digit + '0';
6239 else
6240 *str_ptr = digit - 0xa + 'a';
6241
6242 remove_leading_zeros = 0;
6243 str_ptr++;
6244 (*len)--;
6245 }
6246 mask = mask >> 4;
6247 if ((shift == 4*4) || (shift == 4*2)) {
6248 if (remove_leading_zeros) {
6249 str_ptr++;
6250 (*len)--;
6251 }
6252 *str_ptr = '.';
6253 str_ptr++;
6254 (*len)--;
6255 remove_leading_zeros = 1;
6256 }
6257 }
6258 if (remove_leading_zeros)
6259 (*len)--;
6260 return 0;
6261}
6208 6262
6209static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6263static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
6210{ 6264{
@@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9677 9731
9678 if (bnx2x_is_8483x_8485x(phy)) { 9732 if (bnx2x_is_8483x_8485x(phy)) {
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9733 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9734 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9681 phy->ver_addr); 9735 fw_ver1 &= 0xfff;
9736 bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
9682 } else { 9737 } else {
9683 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9738 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9684 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9739 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9732static void bnx2x_848xx_set_led(struct bnx2x *bp, 9787static void bnx2x_848xx_set_led(struct bnx2x *bp,
9733 struct bnx2x_phy *phy) 9788 struct bnx2x_phy *phy)
9734{ 9789{
9735 u16 val, offset, i; 9790 u16 val, led3_blink_rate, offset, i;
9736 static struct bnx2x_reg_set reg_set[] = { 9791 static struct bnx2x_reg_set reg_set[] = {
9737 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9792 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9738 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9793 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9739 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9794 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9740 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9741 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9795 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9742 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9796 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9743 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9797 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9744 }; 9798 };
9799
9800 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
9801 /* Set LED5 source */
9802 bnx2x_cl45_write(bp, phy,
9803 MDIO_PMA_DEVAD,
9804 MDIO_PMA_REG_8481_LED5_MASK,
9805 0x90);
9806 led3_blink_rate = 0x000f;
9807 } else {
9808 led3_blink_rate = 0x0000;
9809 }
9810 /* Set LED3 BLINK */
9811 bnx2x_cl45_write(bp, phy,
9812 MDIO_PMA_DEVAD,
9813 MDIO_PMA_REG_8481_LED3_BLINK,
9814 led3_blink_rate);
9815
9745 /* PHYC_CTL_LED_CTL */ 9816 /* PHYC_CTL_LED_CTL */
9746 bnx2x_cl45_read(bp, phy, 9817 bnx2x_cl45_read(bp, phy,
9747 MDIO_PMA_DEVAD, 9818 MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9749 val &= 0xFE00; 9820 val &= 0xFE00;
9750 val |= 0x0092; 9821 val |= 0x0092;
9751 9822
9823 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9824 val |= 2 << 12; /* LED5 ON based on source */
9825
9752 bnx2x_cl45_write(bp, phy, 9826 bnx2x_cl45_write(bp, phy,
9753 MDIO_PMA_DEVAD, 9827 MDIO_PMA_DEVAD,
9754 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9828 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9762 else 9836 else
9763 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9837 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9764 9838
9765 /* stretch_en for LED3*/ 9839 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9840 val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
9841 MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9842 else
9843 val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9844
9845 /* stretch_en for LEDs */
9766 bnx2x_cl45_read_or_write(bp, phy, 9846 bnx2x_cl45_read_or_write(bp, phy,
9767 MDIO_PMA_DEVAD, offset, 9847 MDIO_PMA_DEVAD,
9768 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9848 offset,
9849 val);
9769} 9850}
9770 9851
9771static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9852static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9775 struct bnx2x *bp = params->bp; 9856 struct bnx2x *bp = params->bp;
9776 switch (action) { 9857 switch (action) {
9777 case PHY_INIT: 9858 case PHY_INIT:
9778 if (!bnx2x_is_8483x_8485x(phy)) { 9859 if (bnx2x_is_8483x_8485x(phy)) {
9779 /* Save spirom version */ 9860 /* Save spirom version */
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9861 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9781 } 9862 }
@@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
10036 10117
10037static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 10118static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10038 struct link_params *params, u16 fw_cmd, 10119 struct link_params *params, u16 fw_cmd,
10039 u16 cmd_args[], int argc) 10120 u16 cmd_args[], int argc, int process)
10040{ 10121{
10041 int idx; 10122 int idx;
10042 u16 val; 10123 u16 val;
10043 struct bnx2x *bp = params->bp; 10124 struct bnx2x *bp = params->bp;
10044 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 10125 int rc = 0;
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10126
10046 MDIO_848xx_CMD_HDLR_STATUS, 10127 if (process == PHY84833_MB_PROCESS2) {
10047 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 10128 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
10129 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10130 MDIO_848xx_CMD_HDLR_STATUS,
10131 PHY84833_STATUS_CMD_OPEN_OVERRIDE);
10132 }
10133
10048 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10134 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10135 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10050 MDIO_848xx_CMD_HDLR_STATUS, &val); 10136 MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10054 } 10140 }
10055 if (idx >= PHY848xx_CMDHDLR_WAIT) { 10141 if (idx >= PHY848xx_CMDHDLR_WAIT) {
10056 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 10142 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
10143 /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
10144 * clear the status to CMD_CLEAR_COMPLETE
10145 */
10146 if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
10147 val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
10148 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10149 MDIO_848xx_CMD_HDLR_STATUS,
10150 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10151 }
10057 return -EINVAL; 10152 return -EINVAL;
10058 } 10153 }
10059 10154 if (process == PHY84833_MB_PROCESS1 ||
10060 /* Prepare argument(s) and issue command */ 10155 process == PHY84833_MB_PROCESS2) {
10061 for (idx = 0; idx < argc; idx++) { 10156 /* Prepare argument(s) */
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10157 for (idx = 0; idx < argc; idx++) {
10063 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10158 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10064 cmd_args[idx]); 10159 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10160 cmd_args[idx]);
10161 }
10065 } 10162 }
10163
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10164 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10067 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); 10165 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
10068 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10166 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10076 if ((idx >= PHY848xx_CMDHDLR_WAIT) || 10174 if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
10077 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 10175 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
10078 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 10176 DP(NETIF_MSG_LINK, "FW cmd failed.\n");
10079 return -EINVAL; 10177 rc = -EINVAL;
10080 } 10178 }
10081 /* Gather returning data */ 10179 if (process == PHY84833_MB_PROCESS3 && rc == 0) {
10082 for (idx = 0; idx < argc; idx++) { 10180 /* Gather returning data */
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10181 for (idx = 0; idx < argc; idx++) {
10084 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10182 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10085 &cmd_args[idx]); 10183 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10184 &cmd_args[idx]);
10185 }
10086 } 10186 }
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10187 if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
10088 MDIO_848xx_CMD_HDLR_STATUS, 10188 val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
10089 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 10189 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10090 return 0; 10190 MDIO_848xx_CMD_HDLR_STATUS,
10191 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10192 }
10193 return rc;
10091} 10194}
10092 10195
10093static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, 10196static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10094 struct link_params *params, 10197 struct link_params *params,
10095 u16 fw_cmd, 10198 u16 fw_cmd,
10096 u16 cmd_args[], int argc) 10199 u16 cmd_args[], int argc,
10200 int process)
10097{ 10201{
10098 struct bnx2x *bp = params->bp; 10202 struct bnx2x *bp = params->bp;
10099 10203
@@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10106 argc); 10210 argc);
10107 } else { 10211 } else {
10108 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, 10212 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
10109 argc); 10213 argc, process);
10110 } 10214 }
10111} 10215}
10112 10216
@@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
10133 10237
10134 status = bnx2x_848xx_cmd_hdlr(phy, params, 10238 status = bnx2x_848xx_cmd_hdlr(phy, params,
10135 PHY848xx_CMD_SET_PAIR_SWAP, data, 10239 PHY848xx_CMD_SET_PAIR_SWAP, data,
10136 PHY848xx_CMDHDLR_MAX_ARGS); 10240 2, PHY84833_MB_PROCESS2);
10137 if (status == 0) 10241 if (status == 0)
10138 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 10242 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
10139 10243
@@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
10222 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10326 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
10223 10327
10224 /* Prevent Phy from working in EEE and advertising it */ 10328 /* Prevent Phy from working in EEE and advertising it */
10225 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10329 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10226 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10330 &cmd_args, 1, PHY84833_MB_PROCESS1);
10227 if (rc) { 10331 if (rc) {
10228 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10332 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
10229 return rc; 10333 return rc;
@@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
10240 struct bnx2x *bp = params->bp; 10344 struct bnx2x *bp = params->bp;
10241 u16 cmd_args = 1; 10345 u16 cmd_args = 1;
10242 10346
10243 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10347 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10244 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10348 &cmd_args, 1, PHY84833_MB_PROCESS1);
10245 if (rc) { 10349 if (rc) {
10246 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10350 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
10247 return rc; 10351 return rc;
@@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10362 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10466 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
10363 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10467 rc = bnx2x_848xx_cmd_hdlr(phy, params,
10364 PHY848xx_CMD_SET_EEE_MODE, cmd_args, 10468 PHY848xx_CMD_SET_EEE_MODE, cmd_args,
10365 PHY848xx_CMDHDLR_MAX_ARGS); 10469 4, PHY84833_MB_PROCESS1);
10366 if (rc) 10470 if (rc)
10367 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10471 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
10368 } 10472 }
@@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10416 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10520 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10417 } 10521 }
10418 10522
10523 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10524 /* Additional settings for jumbo packets in 1000BASE-T mode */
10525 /* Allow rx extended length */
10526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10527 MDIO_AN_REG_8481_AUX_CTRL, &val);
10528 val |= 0x4000;
10529 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10530 MDIO_AN_REG_8481_AUX_CTRL, val);
10531 /* TX FIFO Elasticity LSB */
10532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10533 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
10534 val |= 0x1;
10535 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10536 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
10537 /* TX FIFO Elasticity MSB */
10538 /* Enable expansion register 0x46 (Pattern Generator status) */
10539 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10540 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
10541
10542 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10543 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
10544 val |= 0x4000;
10545 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10546 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
10547 }
10548
10419 if (bnx2x_is_8483x_8485x(phy)) { 10549 if (bnx2x_is_8483x_8485x(phy)) {
10420 /* Bring PHY out of super isolate mode as the final step. */ 10550 /* Bring PHY out of super isolate mode as the final step. */
10421 bnx2x_cl45_read_and_write(bp, phy, 10551 bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10555 return link_up; 10685 return link_up;
10556} 10686}
10557 10687
10688static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
10689{
10690 int status = 0;
10691 u32 num;
10692
10693 num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
10694 ((raw_ver & 0xF000) >> 12);
10695 status = bnx2x_3_seq_format_ver(num, str, len);
10696 return status;
10697}
10698
10558static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10699static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10559{ 10700{
10560 int status = 0; 10701 int status = 0;
@@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10651 0x0); 10792 0x0);
10652 10793
10653 } else { 10794 } else {
10795 /* LED 1 OFF */
10654 bnx2x_cl45_write(bp, phy, 10796 bnx2x_cl45_write(bp, phy,
10655 MDIO_PMA_DEVAD, 10797 MDIO_PMA_DEVAD,
10656 MDIO_PMA_REG_8481_LED1_MASK, 10798 MDIO_PMA_REG_8481_LED1_MASK,
10657 0x0); 10799 0x0);
10800
10801 if (phy->type ==
10802 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10803 /* LED 2 OFF */
10804 bnx2x_cl45_write(bp, phy,
10805 MDIO_PMA_DEVAD,
10806 MDIO_PMA_REG_8481_LED2_MASK,
10807 0x0);
10808 /* LED 3 OFF */
10809 bnx2x_cl45_write(bp, phy,
10810 MDIO_PMA_DEVAD,
10811 MDIO_PMA_REG_8481_LED3_MASK,
10812 0x0);
10813 }
10658 } 10814 }
10659 break; 10815 break;
10660 case LED_MODE_FRONT_PANEL_OFF: 10816 case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10713 MDIO_PMA_REG_8481_SIGNAL_MASK, 10869 MDIO_PMA_REG_8481_SIGNAL_MASK,
10714 0x0); 10870 0x0);
10715 } 10871 }
10872 if (phy->type ==
10873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10874 /* LED 2 OFF */
10875 bnx2x_cl45_write(bp, phy,
10876 MDIO_PMA_DEVAD,
10877 MDIO_PMA_REG_8481_LED2_MASK,
10878 0x0);
10879 /* LED 3 OFF */
10880 bnx2x_cl45_write(bp, phy,
10881 MDIO_PMA_DEVAD,
10882 MDIO_PMA_REG_8481_LED3_MASK,
10883 0x0);
10884 }
10716 } 10885 }
10717 break; 10886 break;
10718 case LED_MODE_ON: 10887 case LED_MODE_ON:
@@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10776 params->port*4, 10945 params->port*4,
10777 NIG_MASK_MI_INT); 10946 NIG_MASK_MI_INT);
10778 } 10947 }
10948 }
10949 if (phy->type ==
10950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10951 /* Tell LED3 to constant on */
10952 bnx2x_cl45_read(bp, phy,
10953 MDIO_PMA_DEVAD,
10954 MDIO_PMA_REG_8481_LINK_SIGNAL,
10955 &val);
10956 val &= ~(7<<6);
10957 val |= (2<<6); /* A83B[8:6]= 2 */
10958 bnx2x_cl45_write(bp, phy,
10959 MDIO_PMA_DEVAD,
10960 MDIO_PMA_REG_8481_LINK_SIGNAL,
10961 val);
10962 bnx2x_cl45_write(bp, phy,
10963 MDIO_PMA_DEVAD,
10964 MDIO_PMA_REG_8481_LED3_MASK,
10965 0x20);
10966 } else {
10779 bnx2x_cl45_write(bp, phy, 10967 bnx2x_cl45_write(bp, phy,
10780 MDIO_PMA_DEVAD, 10968 MDIO_PMA_DEVAD,
10781 MDIO_PMA_REG_8481_SIGNAL_MASK, 10969 MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10854 MDIO_PMA_REG_8481_LINK_SIGNAL, 11042 MDIO_PMA_REG_8481_LINK_SIGNAL,
10855 val); 11043 val);
10856 if (phy->type == 11044 if (phy->type ==
11045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
11046 bnx2x_cl45_write(bp, phy,
11047 MDIO_PMA_DEVAD,
11048 MDIO_PMA_REG_8481_LED2_MASK,
11049 0x18);
11050 bnx2x_cl45_write(bp, phy,
11051 MDIO_PMA_DEVAD,
11052 MDIO_PMA_REG_8481_LED3_MASK,
11053 0x06);
11054 }
11055 if (phy->type ==
10857 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 11056 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10858 /* Restore LED4 source to external link, 11057 /* Restore LED4 source to external link,
10859 * and re-enable interrupts. 11058 * and re-enable interrupts.
@@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = {
11982 .read_status = (read_status_t)bnx2x_848xx_read_status, 12181 .read_status = (read_status_t)bnx2x_848xx_read_status,
11983 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 12182 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11984 .config_loopback = (config_loopback_t)NULL, 12183 .config_loopback = (config_loopback_t)NULL,
11985 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 12184 .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
11986 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 12185 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11987 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 12186 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11988 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 12187 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13807 if (CHIP_IS_E3(bp)) { 14006 if (CHIP_IS_E3(bp)) {
13808 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 14007 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13809 bnx2x_set_aer_mmd(params, phy); 14008 bnx2x_set_aer_mmd(params, phy);
13810 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 14009 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
13811 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 14010 (phy->speed_cap_mask &
14011 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
14012 (phy->req_line_speed == SPEED_20000))
13812 bnx2x_check_kr2_wa(params, vars, phy); 14013 bnx2x_check_kr2_wa(params, vars, phy);
13813 bnx2x_check_over_curr(params, vars); 14014 bnx2x_check_over_curr(params, vars);
13814 if (vars->rx_tx_asic_rst) 14015 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49bd5cb..a43dea259b12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/
7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec 7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
7299/* BCM84858 only */
7300#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
7299 7301
7300/* BCM84833 only */ 7302/* BCM84833 only */
7301#define MDIO_84833_TOP_CFG_FW_REV 0x400f 7303#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/
7337#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 7339#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
7338#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 7340#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
7339#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 7341#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
7342/* Mailbox Process */
7343#define PHY84833_MB_PROCESS1 1
7344#define PHY84833_MB_PROCESS2 2
7345#define PHY84833_MB_PROCESS3 3
7340 7346
7341/* Mailbox status set used by 84858 only */ 7347/* Mailbox status set used by 84858 only */
7342#define PHY84858_STATUS_CMD_RECEIVED 0x0001 7348#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5dc89e527e7d..8ab000dd52d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD 69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256 70#define BNXT_RX_COPY_THRESH 256
71 71
72#define BNXT_TX_PUSH_THRESH 92 72#define BNXT_TX_PUSH_THRESH 164
73 73
74enum board_idx { 74enum board_idx {
75 BCM57301, 75 BCM57301,
@@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
223 } 223 }
224 224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push; 226 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228 struct tx_bd_ext *tx_push1 = &push->txbd2; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229 void *pdata = tx_push1 + 1; 229 void *pdata = tx_push_buf->data;
230 int j; 230 u64 *end;
231 int j, push_len;
231 232
232 /* Set COAL_NOW to be ready quickly for the next push */ 233 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type = 234 tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249 250
251 end = PTR_ALIGN(pdata + length + 1, 8) - 1;
252 *end = 0;
253
250 skb_copy_from_linear_data(skb, pdata, len); 254 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len; 255 pdata += len;
252 for (j = 0; j < last_frag; j++) { 256 for (j = 0; j < last_frag; j++) {
@@ -261,22 +265,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 pdata += skb_frag_size(frag); 265 pdata += skb_frag_size(frag);
262 } 266 }
263 267
264 memcpy(txbd, tx_push, sizeof(*txbd)); 268 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
269 txbd->tx_bd_haddr = txr->data_mapping;
265 prod = NEXT_TX(prod); 270 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 271 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd)); 272 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod); 273 prod = NEXT_TX(prod);
269 push->doorbell = 274 tx_push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 275 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod; 276 txr->tx_prod = prod;
272 277
273 netdev_tx_sent_queue(txq, skb->len); 278 netdev_tx_sent_queue(txq, skb->len);
274 279
275 __iowrite64_copy(txr->tx_doorbell, push, 280 push_len = (length + sizeof(*tx_push) + 7) / 8;
276 (length + sizeof(*push) + 8) / 8); 281 if (push_len > 16) {
282 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
283 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
284 push_len - 16);
285 } else {
286 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
287 push_len);
288 }
277 289
278 tx_buf->is_push = 1; 290 tx_buf->is_push = 1;
279
280 goto tx_done; 291 goto tx_done;
281 } 292 }
282 293
@@ -1753,7 +1764,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1753 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1764 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1754 bp->tx_push_thresh); 1765 bp->tx_push_thresh);
1755 1766
1756 if (push_size > 128) { 1767 if (push_size > 256) {
1757 push_size = 0; 1768 push_size = 0;
1758 bp->tx_push_thresh = 0; 1769 bp->tx_push_thresh = 0;
1759 } 1770 }
@@ -1772,7 +1783,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1772 return rc; 1783 return rc;
1773 1784
1774 if (bp->tx_push_size) { 1785 if (bp->tx_push_size) {
1775 struct tx_bd *txbd;
1776 dma_addr_t mapping; 1786 dma_addr_t mapping;
1777 1787
1778 /* One pre-allocated DMA buffer to backup 1788 /* One pre-allocated DMA buffer to backup
@@ -1786,13 +1796,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1786 if (!txr->tx_push) 1796 if (!txr->tx_push)
1787 return -ENOMEM; 1797 return -ENOMEM;
1788 1798
1789 txbd = &txr->tx_push->txbd1;
1790
1791 mapping = txr->tx_push_mapping + 1799 mapping = txr->tx_push_mapping +
1792 sizeof(struct tx_push_bd); 1800 sizeof(struct tx_push_bd);
1793 txbd->tx_bd_haddr = cpu_to_le64(mapping); 1801 txr->data_mapping = cpu_to_le64(mapping);
1794 1802
1795 memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); 1803 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1796 } 1804 }
1797 ring->queue_id = bp->q_info[j].queue_id; 1805 ring->queue_id = bp->q_info[j].queue_id;
1798 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1806 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -4546,20 +4554,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4546 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4554 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4547 link_info->force_pause_setting != link_info->req_flow_ctrl) 4555 link_info->force_pause_setting != link_info->req_flow_ctrl)
4548 update_pause = true; 4556 update_pause = true;
4549 if (link_info->req_duplex != link_info->duplex_setting)
4550 update_link = true;
4551 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4557 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4552 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4558 if (BNXT_AUTO_MODE(link_info->auto_mode))
4553 update_link = true; 4559 update_link = true;
4554 if (link_info->req_link_speed != link_info->force_link_speed) 4560 if (link_info->req_link_speed != link_info->force_link_speed)
4555 update_link = true; 4561 update_link = true;
4562 if (link_info->req_duplex != link_info->duplex_setting)
4563 update_link = true;
4556 } else { 4564 } else {
4557 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4565 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4558 update_link = true; 4566 update_link = true;
4559 if (link_info->advertising != link_info->auto_link_speeds) 4567 if (link_info->advertising != link_info->auto_link_speeds)
4560 update_link = true; 4568 update_link = true;
4561 if (link_info->req_link_speed != link_info->auto_link_speed)
4562 update_link = true;
4563 } 4569 }
4564 4570
4565 if (update_link) 4571 if (update_link)
@@ -4636,7 +4642,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4636 if (link_re_init) { 4642 if (link_re_init) {
4637 rc = bnxt_update_phy_setting(bp); 4643 rc = bnxt_update_phy_setting(bp);
4638 if (rc) 4644 if (rc)
4639 goto open_err; 4645 netdev_warn(bp->dev, "failed to update phy settings\n");
4640 } 4646 }
4641 4647
4642 if (irq_re_init) { 4648 if (irq_re_init) {
@@ -4654,6 +4660,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4654 /* Enable TX queues */ 4660 /* Enable TX queues */
4655 bnxt_tx_enable(bp); 4661 bnxt_tx_enable(bp);
4656 mod_timer(&bp->timer, jiffies + bp->current_interval); 4662 mod_timer(&bp->timer, jiffies + bp->current_interval);
4663 bnxt_update_link(bp, true);
4657 4664
4658 return 0; 4665 return 0;
4659 4666
@@ -5670,22 +5677,16 @@ static int bnxt_probe_phy(struct bnxt *bp)
5670 } 5677 }
5671 5678
5672 /*initialize the ethool setting copy with NVM settings */ 5679 /*initialize the ethool setting copy with NVM settings */
5673 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5680 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5674 link_info->autoneg |= BNXT_AUTONEG_SPEED; 5681 link_info->autoneg = BNXT_AUTONEG_SPEED |
5675 5682 BNXT_AUTONEG_FLOW_CTRL;
5676 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5683 link_info->advertising = link_info->auto_link_speeds;
5677 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5678 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5679 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5684 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5680 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5685 } else {
5686 link_info->req_link_speed = link_info->force_link_speed;
5687 link_info->req_duplex = link_info->duplex_setting;
5681 link_info->req_flow_ctrl = link_info->force_pause_setting; 5688 link_info->req_flow_ctrl = link_info->force_pause_setting;
5682 } 5689 }
5683 link_info->req_duplex = link_info->duplex_setting;
5684 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5685 link_info->req_link_speed = link_info->auto_link_speed;
5686 else
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->advertising = link_info->auto_link_speeds;
5689 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", 5690 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5690 link_info->phy_ver[0], 5691 link_info->phy_ver[0],
5691 link_info->phy_ver[1], 5692 link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8efcef..2be51b332652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext {
411 411
412#define BNXT_NUM_TESTS(bp) 0 412#define BNXT_NUM_TESTS(bp) 0
413 413
414#define BNXT_DEFAULT_RX_RING_SIZE 1023 414#define BNXT_DEFAULT_RX_RING_SIZE 511
415#define BNXT_DEFAULT_TX_RING_SIZE 512 415#define BNXT_DEFAULT_TX_RING_SIZE 511
416 416
417#define MAX_TPA 64 417#define MAX_TPA 64
418 418
@@ -523,10 +523,16 @@ struct bnxt_ring_struct {
523 523
524struct tx_push_bd { 524struct tx_push_bd {
525 __le32 doorbell; 525 __le32 doorbell;
526 struct tx_bd txbd1; 526 __le32 tx_bd_len_flags_type;
527 u32 tx_bd_opaque;
527 struct tx_bd_ext txbd2; 528 struct tx_bd_ext txbd2;
528}; 529};
529 530
531struct tx_push_buffer {
532 struct tx_push_bd push_bd;
533 u32 data[25];
534};
535
530struct bnxt_tx_ring_info { 536struct bnxt_tx_ring_info {
531 struct bnxt_napi *bnapi; 537 struct bnxt_napi *bnapi;
532 u16 tx_prod; 538 u16 tx_prod;
@@ -538,8 +544,9 @@ struct bnxt_tx_ring_info {
538 544
539 dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; 545 dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
540 546
541 struct tx_push_bd *tx_push; 547 struct tx_push_buffer *tx_push;
542 dma_addr_t tx_push_mapping; 548 dma_addr_t tx_push_mapping;
549 __le64 data_mapping;
543 550
544#define BNXT_DEV_STATE_CLOSING 0x1 551#define BNXT_DEV_STATE_CLOSING 0x1
545 u32 dev_state; 552 u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898e7a32..3238817dfd5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
486 speed_mask |= SUPPORTED_2500baseX_Full; 486 speed_mask |= SUPPORTED_2500baseX_Full;
487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
488 speed_mask |= SUPPORTED_10000baseT_Full; 488 speed_mask |= SUPPORTED_10000baseT_Full;
489 /* TODO: support 25GB, 50GB with different cable type */
490 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
491 speed_mask |= SUPPORTED_20000baseMLD2_Full |
492 SUPPORTED_20000baseKR2_Full;
493 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 489 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
494 speed_mask |= SUPPORTED_40000baseKR4_Full | 490 speed_mask |= SUPPORTED_40000baseCR4_Full;
495 SUPPORTED_40000baseCR4_Full |
496 SUPPORTED_40000baseSR4_Full |
497 SUPPORTED_40000baseLR4_Full;
498 491
499 return speed_mask; 492 return speed_mask;
500} 493}
@@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
514 speed_mask |= ADVERTISED_2500baseX_Full; 507 speed_mask |= ADVERTISED_2500baseX_Full;
515 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 508 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
516 speed_mask |= ADVERTISED_10000baseT_Full; 509 speed_mask |= ADVERTISED_10000baseT_Full;
517 /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
518 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
519 speed_mask |= ADVERTISED_20000baseMLD2_Full |
520 ADVERTISED_20000baseKR2_Full;
521 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 510 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
522 speed_mask |= ADVERTISED_40000baseKR4_Full | 511 speed_mask |= ADVERTISED_40000baseCR4_Full;
523 ADVERTISED_40000baseCR4_Full |
524 ADVERTISED_40000baseSR4_Full |
525 ADVERTISED_40000baseLR4_Full;
526 return speed_mask; 512 return speed_mask;
527} 513}
528 514
@@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 u16 ethtool_speed; 543 u16 ethtool_speed;
558 544
559 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 545 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
546 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
560 547
561 if (link_info->auto_link_speeds) 548 if (link_info->auto_link_speeds)
562 cmd->supported |= SUPPORTED_Autoneg; 549 cmd->supported |= SUPPORTED_Autoneg;
563 550
564 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 551 if (link_info->autoneg) {
565 cmd->advertising = 552 cmd->advertising =
566 bnxt_fw_to_ethtool_advertised_spds(link_info); 553 bnxt_fw_to_ethtool_advertised_spds(link_info);
567 cmd->advertising |= ADVERTISED_Autoneg; 554 cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570 cmd->autoneg = AUTONEG_DISABLE; 557 cmd->autoneg = AUTONEG_DISABLE;
571 cmd->advertising = 0; 558 cmd->advertising = 0;
572 } 559 }
573 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 560 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
574 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 561 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
575 BNXT_LINK_PAUSE_BOTH) { 562 BNXT_LINK_PAUSE_BOTH) {
576 cmd->advertising |= ADVERTISED_Pause; 563 cmd->advertising |= ADVERTISED_Pause;
577 cmd->supported |= SUPPORTED_Pause;
578 } else { 564 } else {
579 cmd->advertising |= ADVERTISED_Asym_Pause; 565 cmd->advertising |= ADVERTISED_Asym_Pause;
580 cmd->supported |= SUPPORTED_Asym_Pause;
581 if (link_info->auto_pause_setting & 566 if (link_info->auto_pause_setting &
582 BNXT_LINK_PAUSE_RX) 567 BNXT_LINK_PAUSE_RX)
583 cmd->advertising |= ADVERTISED_Pause; 568 cmd->advertising |= ADVERTISED_Pause;
584 } 569 }
585 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
586 if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
587 BNXT_LINK_PAUSE_BOTH) {
588 cmd->supported |= SUPPORTED_Pause;
589 } else {
590 cmd->supported |= SUPPORTED_Asym_Pause;
591 if (link_info->force_pause_setting &
592 BNXT_LINK_PAUSE_RX)
593 cmd->supported |= SUPPORTED_Pause;
594 }
595 } 570 }
596 571
597 cmd->port = PORT_NONE; 572 cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
670 if (advertising & ADVERTISED_10000baseT_Full) 645 if (advertising & ADVERTISED_10000baseT_Full)
671 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 646 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
672 647
648 if (advertising & ADVERTISED_40000baseCR4_Full)
649 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
650
673 return fw_speed_mask; 651 return fw_speed_mask;
674} 652}
675 653
@@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
729 speed = ethtool_cmd_speed(cmd); 707 speed = ethtool_cmd_speed(cmd);
730 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 708 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
731 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 709 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
732 link_info->autoneg &= ~BNXT_AUTONEG_SPEED; 710 link_info->autoneg = 0;
733 link_info->advertising = 0; 711 link_info->advertising = 0;
734 } 712 }
735 713
@@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev,
748 726
749 if (BNXT_VF(bp)) 727 if (BNXT_VF(bp))
750 return; 728 return;
751 epause->autoneg = !!(link_info->auto_pause_setting & 729 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
752 BNXT_LINK_PAUSE_BOTH);
753 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); 730 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
754 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); 731 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
755} 732}
@@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
765 return rc; 742 return rc;
766 743
767 if (epause->autoneg) { 744 if (epause->autoneg) {
745 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
746 return -EINVAL;
747
768 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 748 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
769 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 749 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
770 } else { 750 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d787c7..d7e01a74e927 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2445 } 2445 }
2446 2446
2447 /* Link UP/DOWN event */ 2447 /* Link UP/DOWN event */
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2448 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev, 2449 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2450 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2451 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 49eea8981332..3010080cfeee 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7831 return ret; 7831 return ret;
7832} 7832}
7833 7833
7834static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835{
7836 /* Check if we will never have enough descriptors,
7837 * as gso_segs can be more than current ring size
7838 */
7839 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840}
7841
7834static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7842static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7835 7843
7836/* Use GSO to workaround all TSO packets that meet HW bug conditions 7844/* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7934 * vlan encapsulated. 7942 * vlan encapsulated.
7935 */ 7943 */
7936 if (skb->protocol == htons(ETH_P_8021Q) || 7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7937 skb->protocol == htons(ETH_P_8021AD)) 7945 skb->protocol == htons(ETH_P_8021AD)) {
7938 return tg3_tso_bug(tp, tnapi, txq, skb); 7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7948 goto drop;
7949 }
7939 7950
7940 if (!skb_is_gso_v6(skb)) { 7951 if (!skb_is_gso_v6(skb)) {
7941 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7942 tg3_flag(tp, TSO_BUG)) 7953 tg3_flag(tp, TSO_BUG)) {
7943 return tg3_tso_bug(tp, tnapi, txq, skb); 7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7944 7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7956 goto drop;
7957 }
7945 ip_csum = iph->check; 7958 ip_csum = iph->check;
7946 ip_tot_len = iph->tot_len; 7959 ip_tot_len = iph->tot_len;
7947 iph->check = 0; 7960 iph->check = 0;
@@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8073 if (would_hit_hwbug) { 8086 if (would_hit_hwbug) {
8074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8075 8088
8076 if (mss) { 8089 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8077 /* If it's a TSO packet, do GSO instead of 8090 /* If it's a TSO packet, do GSO instead of
8078 * allocating and copying to a large linear SKB 8091 * allocating and copying to a large linear SKB
8079 */ 8092 */
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 872765527081..34d269cd5579 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1683,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1684 /* droq creation and local register settings. */ 1684 /* droq creation and local register settings. */
1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1686 if (ret_val == -1) 1686 if (ret_val < 0)
1687 return ret_val; 1687 return ret_val;
1688 1688
1689 if (ret_val == 1) { 1689 if (ret_val == 1) {
@@ -2524,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct,
2524 2524
2525 octeon_swap_8B_data(&resp->timestamp, 1); 2525 octeon_swap_8B_data(&resp->timestamp, 1);
2526 2526
2527 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2527 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2528 struct skb_shared_hwtstamps ts; 2528 struct skb_shared_hwtstamps ts;
2529 u64 ns = resp->timestamp; 2529 u64 ns = resp->timestamp;
2530 2530
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86eaa045..174072b3740b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct,
983 983
984create_droq_fail: 984create_droq_fail:
985 octeon_delete_droq(oct, q_no); 985 octeon_delete_droq(oct, q_no);
986 return -1; 986 return -ENOMEM;
987} 987}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a86a42..a009bc30dc4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
574 574
575static void nicvf_rcv_pkt_handler(struct net_device *netdev, 575static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 struct napi_struct *napi, 576 struct napi_struct *napi,
577 struct cmp_queue *cq, 577 struct cqe_rx_t *cqe_rx)
578 struct cqe_rx_t *cqe_rx, int cqe_type)
579{ 578{
580 struct sk_buff *skb; 579 struct sk_buff *skb;
581 struct nicvf *nic = netdev_priv(netdev); 580 struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
591 } 590 }
592 591
593 /* Check for errors */ 592 /* Check for errors */
594 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 593 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
595 if (err && !cqe_rx->rb_cnt) 594 if (err && !cqe_rx->rb_cnt)
596 return; 595 return;
597 596
@@ -682,8 +681,7 @@ loop:
682 cq_idx, cq_desc->cqe_type); 681 cq_idx, cq_desc->cqe_type);
683 switch (cq_desc->cqe_type) { 682 switch (cq_desc->cqe_type) {
684 case CQE_TYPE_RX: 683 case CQE_TYPE_RX:
685 nicvf_rcv_pkt_handler(netdev, napi, cq, 684 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 cq_desc, CQE_TYPE_RX);
687 work_done++; 685 work_done++;
688 break; 686 break;
689 case CQE_TYPE_SEND: 687 case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev)
1125 1123
1126 /* Clear multiqset info */ 1124 /* Clear multiqset info */
1127 nic->pnicvf = nic; 1125 nic->pnicvf = nic;
1128 nic->sqs_count = 0;
1129 1126
1130 return 0; 1127 return 0;
1131} 1128}
@@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic)
1354 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1351 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 stats->tx_bcast_frames_ok + 1352 stats->tx_bcast_frames_ok +
1356 stats->tx_mcast_frames_ok; 1353 stats->tx_mcast_frames_ok;
1354 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1355 stats->rx_bcast_frames +
1356 stats->rx_mcast_frames;
1357 drv_stats->rx_drops = stats->rx_drop_red + 1357 drv_stats->rx_drops = stats->rx_drop_red +
1358 stats->rx_drop_overrun; 1358 stats->rx_drop_overrun;
1359 drv_stats->tx_drops = stats->tx_drops; 1359 drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538 1538
1539 nicvf_send_vf_struct(nic); 1539 nicvf_send_vf_struct(nic);
1540 1540
1541 if (!pass1_silicon(nic->pdev))
1542 nic->hw_tso = true;
1543
1541 /* Check if this VF is in QS only mode */ 1544 /* Check if this VF is in QS only mode */
1542 if (nic->sqs_mode) 1545 if (nic->sqs_mode)
1543 return 0; 1546 return 0;
@@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 1560
1558 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 1561 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 1562
1560 if (!pass1_silicon(nic->pdev))
1561 nic->hw_tso = true;
1562
1563 netdev->netdev_ops = &nicvf_netdev_ops; 1563 netdev->netdev_ops = &nicvf_netdev_ops;
1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 1565
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b5490061..767347b1f631 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1329} 1329}
1330 1330
1331/* Check for errors in the receive cmp.queue entry */ 1331/* Check for errors in the receive cmp.queue entry */
1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1333 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1334{ 1333{
1335 struct nicvf_hw_stats *stats = &nic->hw_stats; 1334 struct nicvf_hw_stats *stats = &nic->hw_stats;
1336 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1337 1335
1338 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1339 drv_stats->rx_frames_ok++;
1340 return 0; 1337 return 0;
1341 }
1342 1338
1343 if (netif_msg_rx_err(nic)) 1339 if (netif_msg_rx_err(nic))
1344 netdev_err(nic->netdev, 1340 netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7f213a..6673e1133523 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338/* Stats */ 338/* Stats */
339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
341int nicvf_check_cqe_rx_errs(struct nicvf *nic, 341int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
342 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
343int nicvf_check_cqe_tx_errs(struct nicvf *nic, 342int nicvf_check_cqe_tx_errs(struct nicvf *nic,
344 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 343 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
345#endif /* NICVF_QUEUES_H */ 344#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa6c4d8..a89721fad633 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682} 682}
683 683
684static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
685{
686 char tok[len + 1];
687
688 memcpy(tok, s, len);
689 tok[len] = 0;
690 return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
694{
695 char tok[len + 1];
696
697 memcpy(tok, s, len);
698 tok[len] = 0;
699 return kstrtou16(strim(tok), base, val);
700}
701
684/** 702/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM 703 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read 704 * @adapter: adapter to read
@@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
709 return ret; 727 return ret;
710 } 728 }
711 729
712 ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); 730 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
713 if (ret) 731 if (ret)
714 return ret; 732 return ret;
715 ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); 733 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
716 if (ret) 734 if (ret)
717 return ret; 735 return ret;
718 ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); 736 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
719 if (ret) 737 if (ret)
720 return ret; 738 return ret;
721 ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); 739 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
722 if (ret) 740 if (ret)
723 return ret; 741 return ret;
724 ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); 742 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
725 if (ret) 743 if (ret)
726 return ret; 744 return ret;
727 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 745 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
733 } else { 751 } else {
734 p->port_type[0] = hex_to_bin(vpd.port0_data[0]); 752 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 p->port_type[1] = hex_to_bin(vpd.port1_data[0]); 753 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); 754 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755 &p->xauicfg[0]);
737 if (ret) 756 if (ret)
738 return ret; 757 return ret;
739 ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); 758 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759 &p->xauicfg[1]);
740 if (ret) 760 if (ret)
741 return ret; 761 return ret;
742 } 762 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda635456d..06bc2d2e7a73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ 165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
168 169
169 /* T6 adapters: 170 /* T6 adapters:
170 */ 171 */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3332c2..7ba6d530b0c0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
36#define DRV_VERSION "2.3.0.12" 36#define DRV_VERSION "2.3.0.20"
37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
38 38
39#define ENIC_BARS_MAX 6 39#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd1050860b..1fdf5fe12a95 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
298 int wait) 298 int wait)
299{ 299{
300 struct devcmd2_controller *dc2c = vdev->devcmd2; 300 struct devcmd2_controller *dc2c = vdev->devcmd2;
301 struct devcmd2_result *result = dc2c->result + dc2c->next_result; 301 struct devcmd2_result *result;
302 u8 color;
302 unsigned int i; 303 unsigned int i;
303 int delay, err; 304 int delay, err;
304 u32 fetch_index, new_posted; 305 u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
336 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 337 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
337 return 0; 338 return 0;
338 339
340 result = dc2c->result + dc2c->next_result;
341 color = dc2c->color;
342
343 dc2c->next_result++;
344 if (dc2c->next_result == dc2c->result_size) {
345 dc2c->next_result = 0;
346 dc2c->color = dc2c->color ? 0 : 1;
347 }
348
339 for (delay = 0; delay < wait; delay++) { 349 for (delay = 0; delay < wait; delay++) {
340 if (result->color == dc2c->color) { 350 if (result->color == color) {
341 dc2c->next_result++;
342 if (dc2c->next_result == dc2c->result_size) {
343 dc2c->next_result = 0;
344 dc2c->color = dc2c->color ? 0 : 1;
345 }
346 if (result->error) { 351 if (result->error) {
347 err = result->error; 352 err = result->error;
348 if (err != ERR_ECMDUNKNOWN || 353 if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72dbacd..48d91941408d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@ struct board_info {
128 struct resource *data_res; 128 struct resource *data_res;
129 struct resource *addr_req; /* resources requested */ 129 struct resource *addr_req; /* resources requested */
130 struct resource *data_req; 130 struct resource *data_req;
131 struct resource *irq_res;
132 131
133 int irq_wake; 132 int irq_wake;
134 133
@@ -1300,22 +1299,16 @@ static int
1300dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1301{ 1300{
1302 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1303 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 1302
1305 if (netif_msg_ifup(db)) 1303 if (netif_msg_ifup(db))
1306 dev_dbg(db->dev, "enabling %s\n", dev->name); 1304 dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 1305
1308 /* If there is no IRQ type specified, default to something that 1306 /* If there is no IRQ type specified, tell the user that this is a
1309 * may work, and tell the user that this is a problem */ 1307 * problem
1310 1308 */
1311 if (irqflags == IRQF_TRIGGER_NONE) 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
1312 irqflags = irq_get_trigger_type(dev->irq);
1313
1314 if (irqflags == IRQF_TRIGGER_NONE)
1315 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 1311
1317 irqflags |= IRQF_SHARED;
1318
1319 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1321 mdelay(1); /* delay needs by DM9000B */ 1314 mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev)
1323 /* Initialize DM9000 board */ 1316 /* Initialize DM9000 board */
1324 dm9000_init_dm9000(dev); 1317 dm9000_init_dm9000(dev);
1325 1318
1326 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
1320 dev->name, dev))
1327 return -EAGAIN; 1321 return -EAGAIN;
1328 /* Now that we have an interrupt handler hooked up we can unmask 1322 /* Now that we have an interrupt handler hooked up we can unmask
1329 * our interrupts 1323 * our interrupts
@@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev)
1500 1494
1501 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 1497
1505 if (db->addr_res == NULL || db->data_res == NULL || 1498 if (!db->addr_res || !db->data_res) {
1506 db->irq_res == NULL) { 1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1507 dev_err(db->dev, "insufficient resources\n"); 1500 db->addr_res, db->data_res);
1508 ret = -ENOENT; 1501 ret = -ENOENT;
1509 goto out; 1502 goto out;
1510 } 1503 }
1511 1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1508 ndev->irq);
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1512 db->irq_wake = platform_get_irq(pdev, 1); 1513 db->irq_wake = platform_get_irq(pdev, 1);
1513 if (db->irq_wake >= 0) { 1514 if (db->irq_wake >= 0) {
1514 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev)
1570 1571
1571 /* fill in parameters for net-dev structure */ 1572 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr; 1573 ndev->base_addr = (unsigned long)db->io_addr;
1573 ndev->irq = db->irq_res->start;
1574 1574
1575 /* ensure at least we have a default set of IO routines */ 1575 /* ensure at least we have a default set of IO routines */
1576 dm9000_set_io(db, iosize); 1576 dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f588ad2..678f5018d0be 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 goto failed; 469 goto failed;
470 } 470 }
471 /* Read MACID from CIS */ 471 /* Read MACID from CIS */
472 for (i = 5; i < 11; i++) 472 for (i = 0; i < 6; i++)
473 dev->dev_addr[i] = buf[i]; 473 dev->dev_addr[i] = buf[i + 5];
474 kfree(buf); 474 kfree(buf);
475 } else { 475 } else {
476 if (pcmcia_get_mac_from_cis(link, dev)) 476 if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 662c2ee268c7..b0ae69f84493 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -370,6 +370,11 @@ struct mvneta_port {
370 struct net_device *dev; 370 struct net_device *dev;
371 struct notifier_block cpu_notifier; 371 struct notifier_block cpu_notifier;
372 int rxq_def; 372 int rxq_def;
373 /* Protect the access to the percpu interrupt registers,
374 * ensuring that the configuration remains coherent.
375 */
376 spinlock_t lock;
377 bool is_stopped;
373 378
374 /* Core clock */ 379 /* Core clock */
375 struct clk *clk; 380 struct clk *clk;
@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1038 } 1043 }
1039} 1044}
1040 1045
1046static void mvneta_percpu_unmask_interrupt(void *arg)
1047{
1048 struct mvneta_port *pp = arg;
1049
1050 /* All the queue are unmasked, but actually only the ones
1051 * mapped to this CPU will be unmasked
1052 */
1053 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1054 MVNETA_RX_INTR_MASK_ALL |
1055 MVNETA_TX_INTR_MASK_ALL |
1056 MVNETA_MISCINTR_INTR_MASK);
1057}
1058
1059static void mvneta_percpu_mask_interrupt(void *arg)
1060{
1061 struct mvneta_port *pp = arg;
1062
1063 /* All the queue are masked, but actually only the ones
1064 * mapped to this CPU will be masked
1065 */
1066 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1067 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1068 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1069}
1070
1071static void mvneta_percpu_clear_intr_cause(void *arg)
1072{
1073 struct mvneta_port *pp = arg;
1074
1075 /* All the queue are cleared, but actually only the ones
1076 * mapped to this CPU will be cleared
1077 */
1078 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1079 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1080 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1081}
1082
1041/* This method sets defaults to the NETA port: 1083/* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers. 1084 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables. 1085 * Clears all MAC tables.
@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
1055 int max_cpu = num_present_cpus(); 1097 int max_cpu = num_present_cpus();
1056 1098
1057 /* Clear all Cause registers */ 1099 /* Clear all Cause registers */
1058 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1100 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1059 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1060 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1061 1101
1062 /* Mask all interrupts */ 1102 /* Mask all interrupts */
1063 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1103 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1064 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1065 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1066 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1104 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1067 1105
1068 /* Enable MBUS Retry bit16 */ 1106 /* Enable MBUS Retry bit16 */
@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
2528 return 0; 2566 return 0;
2529} 2567}
2530 2568
2531static void mvneta_percpu_unmask_interrupt(void *arg)
2532{
2533 struct mvneta_port *pp = arg;
2534
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2537 */
2538 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2539 MVNETA_RX_INTR_MASK_ALL |
2540 MVNETA_TX_INTR_MASK_ALL |
2541 MVNETA_MISCINTR_INTR_MASK);
2542}
2543
2544static void mvneta_percpu_mask_interrupt(void *arg)
2545{
2546 struct mvneta_port *pp = arg;
2547
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2550 */
2551 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2552 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2553 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2554}
2555
2556static void mvneta_start_dev(struct mvneta_port *pp) 2569static void mvneta_start_dev(struct mvneta_port *pp)
2557{ 2570{
2558 unsigned int cpu; 2571 int cpu;
2559 2572
2560 mvneta_max_rx_size_set(pp, pp->pkt_size); 2573 mvneta_max_rx_size_set(pp, pp->pkt_size);
2561 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2574 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2564 mvneta_port_enable(pp); 2577 mvneta_port_enable(pp);
2565 2578
2566 /* Enable polling on the port */ 2579 /* Enable polling on the port */
2567 for_each_present_cpu(cpu) { 2580 for_each_online_cpu(cpu) {
2568 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2581 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2569 2582
2570 napi_enable(&port->napi); 2583 napi_enable(&port->napi);
2571 } 2584 }
2572 2585
2573 /* Unmask interrupts. It has to be done from each CPU */ 2586 /* Unmask interrupts. It has to be done from each CPU */
2574 for_each_online_cpu(cpu) 2587 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2575 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2588
2576 pp, true);
2577 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2589 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2578 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2590 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2579 MVNETA_CAUSE_LINK_CHANGE | 2591 MVNETA_CAUSE_LINK_CHANGE |
@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2589 2601
2590 phy_stop(pp->phy_dev); 2602 phy_stop(pp->phy_dev);
2591 2603
2592 for_each_present_cpu(cpu) { 2604 for_each_online_cpu(cpu) {
2593 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2605 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2594 2606
2595 napi_disable(&port->napi); 2607 napi_disable(&port->napi);
@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2604 mvneta_port_disable(pp); 2616 mvneta_port_disable(pp);
2605 2617
2606 /* Clear all ethernet port interrupts */ 2618 /* Clear all ethernet port interrupts */
2607 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2619 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
2608 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2609 2620
2610 /* Mask all ethernet port interrupts */ 2621 /* Mask all ethernet port interrupts */
2611 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2622 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2612 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2613 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2614 2623
2615 mvneta_tx_reset(pp); 2624 mvneta_tx_reset(pp);
2616 mvneta_rx_reset(pp); 2625 mvneta_rx_reset(pp);
@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
2847 disable_percpu_irq(pp->dev->irq); 2856 disable_percpu_irq(pp->dev->irq);
2848} 2857}
2849 2858
2859/* Electing a CPU must be done in an atomic way: it should be done
2860 * after or before the removal/insertion of a CPU and this function is
2861 * not reentrant.
2862 */
2850static void mvneta_percpu_elect(struct mvneta_port *pp) 2863static void mvneta_percpu_elect(struct mvneta_port *pp)
2851{ 2864{
2852 int online_cpu_idx, max_cpu, cpu, i = 0; 2865 int elected_cpu = 0, max_cpu, cpu, i = 0;
2866
2867 /* Use the cpu associated to the rxq when it is online, in all
2868 * the other cases, use the cpu 0 which can't be offline.
2869 */
2870 if (cpu_online(pp->rxq_def))
2871 elected_cpu = pp->rxq_def;
2853 2872
2854 online_cpu_idx = pp->rxq_def % num_online_cpus();
2855 max_cpu = num_present_cpus(); 2873 max_cpu = num_present_cpus();
2856 2874
2857 for_each_online_cpu(cpu) { 2875 for_each_online_cpu(cpu) {
@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2862 if ((rxq % max_cpu) == cpu) 2880 if ((rxq % max_cpu) == cpu)
2863 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 2881 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2864 2882
2865 if (i == online_cpu_idx) 2883 if (cpu == elected_cpu)
2866 /* Map the default receive queue queue to the 2884 /* Map the default receive queue queue to the
2867 * elected CPU 2885 * elected CPU
2868 */ 2886 */
@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2873 * the CPU bound to the default RX queue 2891 * the CPU bound to the default RX queue
2874 */ 2892 */
2875 if (txq_number == 1) 2893 if (txq_number == 1)
2876 txq_map = (i == online_cpu_idx) ? 2894 txq_map = (cpu == elected_cpu) ?
2877 MVNETA_CPU_TXQ_ACCESS(1) : 0; 2895 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2878 else 2896 else
2879 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 2897 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2902 switch (action) { 2920 switch (action) {
2903 case CPU_ONLINE: 2921 case CPU_ONLINE:
2904 case CPU_ONLINE_FROZEN: 2922 case CPU_ONLINE_FROZEN:
2923 spin_lock(&pp->lock);
2924 /* Configuring the driver for a new CPU while the
2925 * driver is stopping is racy, so just avoid it.
2926 */
2927 if (pp->is_stopped) {
2928 spin_unlock(&pp->lock);
2929 break;
2930 }
2905 netif_tx_stop_all_queues(pp->dev); 2931 netif_tx_stop_all_queues(pp->dev);
2906 2932
2907 /* We have to synchronise on tha napi of each CPU 2933 /* We have to synchronise on tha napi of each CPU
@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2917 } 2943 }
2918 2944
2919 /* Mask all ethernet port interrupts */ 2945 /* Mask all ethernet port interrupts */
2920 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2946 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2921 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2922 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2923 napi_enable(&port->napi); 2947 napi_enable(&port->napi);
2924 2948
2925 2949
@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2934 */ 2958 */
2935 mvneta_percpu_elect(pp); 2959 mvneta_percpu_elect(pp);
2936 2960
2937 /* Unmask all ethernet port interrupts, as this 2961 /* Unmask all ethernet port interrupts */
2938 * notifier is called for each CPU then the CPU to 2962 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2939 * Queue mapping is applied
2940 */
2941 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2942 MVNETA_RX_INTR_MASK(rxq_number) |
2943 MVNETA_TX_INTR_MASK(txq_number) |
2944 MVNETA_MISCINTR_INTR_MASK);
2945 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2963 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2946 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2964 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2947 MVNETA_CAUSE_LINK_CHANGE | 2965 MVNETA_CAUSE_LINK_CHANGE |
2948 MVNETA_CAUSE_PSC_SYNC_CHANGE); 2966 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2949 netif_tx_start_all_queues(pp->dev); 2967 netif_tx_start_all_queues(pp->dev);
2968 spin_unlock(&pp->lock);
2950 break; 2969 break;
2951 case CPU_DOWN_PREPARE: 2970 case CPU_DOWN_PREPARE:
2952 case CPU_DOWN_PREPARE_FROZEN: 2971 case CPU_DOWN_PREPARE_FROZEN:
2953 netif_tx_stop_all_queues(pp->dev); 2972 netif_tx_stop_all_queues(pp->dev);
2973 /* Thanks to this lock we are sure that any pending
2974 * cpu election is done
2975 */
2976 spin_lock(&pp->lock);
2954 /* Mask all ethernet port interrupts */ 2977 /* Mask all ethernet port interrupts */
2955 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2978 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2956 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2979 spin_unlock(&pp->lock);
2957 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2958 2980
2959 napi_synchronize(&port->napi); 2981 napi_synchronize(&port->napi);
2960 napi_disable(&port->napi); 2982 napi_disable(&port->napi);
@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2968 case CPU_DEAD: 2990 case CPU_DEAD:
2969 case CPU_DEAD_FROZEN: 2991 case CPU_DEAD_FROZEN:
2970 /* Check if a new CPU must be elected now this on is down */ 2992 /* Check if a new CPU must be elected now this on is down */
2993 spin_lock(&pp->lock);
2971 mvneta_percpu_elect(pp); 2994 mvneta_percpu_elect(pp);
2995 spin_unlock(&pp->lock);
2972 /* Unmask all ethernet port interrupts */ 2996 /* Unmask all ethernet port interrupts */
2973 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2997 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2974 MVNETA_RX_INTR_MASK(rxq_number) |
2975 MVNETA_TX_INTR_MASK(txq_number) |
2976 MVNETA_MISCINTR_INTR_MASK);
2977 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2998 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2978 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2999 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2979 MVNETA_CAUSE_LINK_CHANGE | 3000 MVNETA_CAUSE_LINK_CHANGE |
@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2988static int mvneta_open(struct net_device *dev) 3009static int mvneta_open(struct net_device *dev)
2989{ 3010{
2990 struct mvneta_port *pp = netdev_priv(dev); 3011 struct mvneta_port *pp = netdev_priv(dev);
2991 int ret, cpu; 3012 int ret;
2992 3013
2993 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3014 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2994 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 3015 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
3010 goto err_cleanup_txqs; 3031 goto err_cleanup_txqs;
3011 } 3032 }
3012 3033
3013 /* Even though the documentation says that request_percpu_irq
3014 * doesn't enable the interrupts automatically, it actually
3015 * does so on the local CPU.
3016 *
3017 * Make sure it's disabled.
3018 */
3019 mvneta_percpu_disable(pp);
3020
3021 /* Enable per-CPU interrupt on all the CPU to handle our RX 3034 /* Enable per-CPU interrupt on all the CPU to handle our RX
3022 * queue interrupts 3035 * queue interrupts
3023 */ 3036 */
3024 for_each_online_cpu(cpu) 3037 on_each_cpu(mvneta_percpu_enable, pp, true);
3025 smp_call_function_single(cpu, mvneta_percpu_enable,
3026 pp, true);
3027
3028 3038
3039 pp->is_stopped = false;
3029 /* Register a CPU notifier to handle the case where our CPU 3040 /* Register a CPU notifier to handle the case where our CPU
3030 * might be taken offline. 3041 * might be taken offline.
3031 */ 3042 */
@@ -3057,13 +3068,20 @@ err_cleanup_rxqs:
3057static int mvneta_stop(struct net_device *dev) 3068static int mvneta_stop(struct net_device *dev)
3058{ 3069{
3059 struct mvneta_port *pp = netdev_priv(dev); 3070 struct mvneta_port *pp = netdev_priv(dev);
3060 int cpu;
3061 3071
3072 /* Inform that we are stopping so we don't want to setup the
3073 * driver for new CPUs in the notifiers
3074 */
3075 spin_lock(&pp->lock);
3076 pp->is_stopped = true;
3062 mvneta_stop_dev(pp); 3077 mvneta_stop_dev(pp);
3063 mvneta_mdio_remove(pp); 3078 mvneta_mdio_remove(pp);
3064 unregister_cpu_notifier(&pp->cpu_notifier); 3079 unregister_cpu_notifier(&pp->cpu_notifier);
3065 for_each_present_cpu(cpu) 3080 /* Now that the notifier are unregistered, we can release le
3066 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 3081 * lock
3082 */
3083 spin_unlock(&pp->lock);
3084 on_each_cpu(mvneta_percpu_disable, pp, true);
3067 free_percpu_irq(dev->irq, pp->ports); 3085 free_percpu_irq(dev->irq, pp->ports);
3068 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
3069 mvneta_cleanup_txqs(pp); 3087 mvneta_cleanup_txqs(pp);
@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3312 3330
3313 netif_tx_stop_all_queues(pp->dev); 3331 netif_tx_stop_all_queues(pp->dev);
3314 3332
3315 for_each_online_cpu(cpu) 3333 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3316 smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3317 pp, true);
3318 3334
3319 /* We have to synchronise on the napi of each CPU */ 3335 /* We have to synchronise on the napi of each CPU */
3320 for_each_online_cpu(cpu) { 3336 for_each_online_cpu(cpu) {
@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3335 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 3351 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3336 3352
3337 /* Update the elected CPU matching the new rxq_def */ 3353 /* Update the elected CPU matching the new rxq_def */
3354 spin_lock(&pp->lock);
3338 mvneta_percpu_elect(pp); 3355 mvneta_percpu_elect(pp);
3356 spin_unlock(&pp->lock);
3339 3357
3340 /* We have to synchronise on the napi of each CPU */ 3358 /* We have to synchronise on the napi of each CPU */
3341 for_each_online_cpu(cpu) { 3359 for_each_online_cpu(cpu) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf1fd46..c797971aefab 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3061 3061
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 if (!pe) 3063 if (!pe)
3064 return -1; 3064 return -ENOMEM;
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 pe->index = tid; 3066 pe->index = tid;
3067 3067
@@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3077 if (pmap == 0) { 3077 if (pmap == 0) {
3078 if (add) { 3078 if (add) {
3079 kfree(pe); 3079 kfree(pe);
3080 return -1; 3080 return -EINVAL;
3081 } 3081 }
3082 mvpp2_prs_hw_inv(priv, pe->index); 3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false; 3083 priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8affcc9..c7e939945259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
182 err = mlx4_reset_slave(dev); 182 err = mlx4_reset_slave(dev);
183 else 183 else
184 err = mlx4_reset_master(dev); 184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186 185
186 if (!err) {
187 mlx4_err(dev, "device was reset successfully\n");
188 } else {
189 /* EEH could have disabled the PCI channel during reset. That's
190 * recoverable and the PCI error flow will handle it.
191 */
192 if (!pci_channel_offline(dev->persist->pdev))
193 BUG_ON(1);
194 }
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; 195 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex); 196 mutex_unlock(&persist->device_state_mutex);
190 197
191 /* At that step HW was already reset, now notify clients */ 198 /* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index d48d5793407d..e94ca1c3fc7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2429,7 +2429,7 @@ err_thread:
2429 flush_workqueue(priv->mfunc.master.comm_wq); 2429 flush_workqueue(priv->mfunc.master.comm_wq);
2430 destroy_workqueue(priv->mfunc.master.comm_wq); 2430 destroy_workqueue(priv->mfunc.master.comm_wq);
2431err_slaves: 2431err_slaves:
2432 while (--i) { 2432 while (i--) {
2433 for (port = 1; port <= MLX4_MAX_PORTS; port++) 2433 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2435 } 2435 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..a849da92f857 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
318 if (timestamp_en) 318 if (timestamp_en)
319 cq_context->flags |= cpu_to_be32(1 << 19); 319 cq_context->flags |= cpu_to_be32(1 << 19);
320 320
321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 321 cq_context->logsize_usrpage =
322 cpu_to_be32((ilog2(nent) << 24) |
323 mlx4_to_hw_uar_index(dev, uar->index));
322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; 324 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
323 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 326
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
236 .enable = mlx4_en_phc_enable, 236 .enable = mlx4_en_phc_enable,
237}; 237};
238 238
239#define MLX4_EN_WRAP_AROUND_SEC 10ULL
240
241/* This function calculates the max shift that enables the user range
242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
243 */
244static u32 freq_to_shift(u16 freq)
245{
246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
250 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252
253 /* This comes from the reverse of clocksource_khz2mult */
254 return ilog2(div_u64(max_mul * freq_khz, 1000000));
255}
256
239void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
240{ 258{
241 struct mlx4_dev *dev = mdev->dev; 259 struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
254 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 272 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
255 mdev->cycles.read = mlx4_en_read_clock; 273 mdev->cycles.read = mlx4_en_read_clock;
256 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 274 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
257 /* Using shift to make calculation more accurate. Since current HW 275 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
258 * clock frequency is 427 MHz, and cycles are given using a 48 bits
259 * register, the biggest shift when calculating using u64, is 14
260 * (max_cycles * multiplier < 2^64)
261 */
262 mdev->cycles.shift = 14;
263 mdev->cycles.mult = 276 mdev->cycles.mult =
264 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 277 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
265 mdev->nominal_c_mult = mdev->cycles.mult; 278 mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f69a73b..f191a1612589 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2344,8 +2344,6 @@ out:
2344 /* set offloads */ 2344 /* set offloads */
2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; 2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2347 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2348 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2349} 2347}
2350 2348
2351static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2349static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2356 /* unset offloads */ 2354 /* unset offloads */
2357 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2355 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2358 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); 2356 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2359 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2360 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2361 2357
2362 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2358 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2363 VXLAN_STEER_BY_OUTER_MAC, 0); 2359 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2980 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 2976 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2981 } 2977 }
2982 2978
2979 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2980 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2981 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2982 }
2983
2983 mdev->pndev[port] = dev; 2984 mdev->pndev[port] = dev;
2984 mdev->upper[port] = NULL; 2985 mdev->upper[port] = NULL;
2985 2986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
238 stats->collisions = 0; 238 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 241 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
243 stats->rx_frame_errors = 0; 243 stats->rx_frame_errors = 0;
244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
245 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 245 stats->rx_missed_errors = 0;
246 stats->tx_aborted_errors = 0; 246 stats->tx_aborted_errors = 0;
247 stats->tx_carrier_errors = 0; 247 stats->tx_carrier_errors = 0;
248 stats->tx_fifo_errors = 0; 248 stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a659d3..02e925d6f734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
58 } else { 58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 } 60 }
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
62 context->local_qpn = cpu_to_be32(qpn); 63 context->local_qpn = cpu_to_be32(qpn);
63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.ackto = 1 & 0x07;
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e0946ab22010 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
214 ring->cqn, user_prio, &ring->context); 214 ring->cqn, user_prio, &ring->context);
215 if (ring->bf_alloced) 215 if (ring->bf_alloced)
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 216 ring->context.usr_page =
217 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
218 ring->bf.uar->index));
217 219
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 220 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
219 &ring->qp, &ring->qp_state); 221 &ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053165f8..f613977455e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 940
941 if (!priv->eq_table.uar_map[index]) { 941 if (!priv->eq_table.uar_map[index]) {
942 priv->eq_table.uar_map[index] = 942 priv->eq_table.uar_map[index] =
943 ioremap(pci_resource_start(dev->persist->pdev, 2) + 943 ioremap(
944 ((eq->eqn / 4) << PAGE_SHIFT), 944 pci_resource_start(dev->persist->pdev, 2) +
945 PAGE_SIZE); 945 ((eq->eqn / 4) << (dev->uar_page_shift)),
946 (1 << (dev->uar_page_shift)));
946 if (!priv->eq_table.uar_map[index]) { 947 if (!priv->eq_table.uar_map[index]) {
947 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 948 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
948 eq->eqn); 949 eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d219e445..2cc3c626c3fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@ struct mlx4_port_config {
168 168
169static atomic_t pf_loading = ATOMIC_INIT(0); 169static atomic_t pf_loading = ATOMIC_INIT(0);
170 170
171static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
172 struct mlx4_dev_cap *dev_cap)
173{
174 /* The reserved_uars is calculated by system page size unit.
175 * Therefore, adjustment is added when the uar page size is less
176 * than the system page size
177 */
178 dev->caps.reserved_uars =
179 max_t(int,
180 mlx4_get_num_reserved_uar(dev),
181 dev_cap->reserved_uars /
182 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
183}
184
171int mlx4_check_port_params(struct mlx4_dev *dev, 185int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type) 186 enum mlx4_port_type *port_type)
173{ 187{
@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
388 402
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds; 403 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0; 405 dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
407 419
420 /* Save uar page shift */
421 if (!mlx4_is_slave(dev)) {
422 /* Virtual PCI function needs to determine UAR page size from
423 * firmware. Only master PCI function can set the uar page size
424 */
425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
426 mlx4_set_num_reserved_uars(dev, dev_cap);
427 }
428
408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param; 430 struct mlx4_init_hca_param hca_param;
410 431
@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
815 return -ENODEV; 836 return -ENODEV;
816 } 837 }
817 838
818 /* slave gets uar page size from QUERY_HCA fw command */ 839 /* Set uar_page_shift for VF */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 840 dev->uar_page_shift = hca_param.uar_page_sz + 12;
820 841
821 /* TODO: relax this assumption */ 842 /* Make sure the master uar page size is valid */
822 if (dev->caps.uar_page_size != PAGE_SIZE) { 843 if (dev->uar_page_shift > PAGE_SHIFT) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 844 mlx4_err(dev,
824 dev->caps.uar_page_size, PAGE_SIZE); 845 "Invalid configuration: uar page size is larger than system page size\n");
825 return -ENODEV; 846 return -ENODEV;
826 } 847 }
827 848
849 /* Set reserved_uars based on the uar_page_shift */
850 mlx4_set_num_reserved_uars(dev, &dev_cap);
851
852 /* Although uar page size in FW differs from system page size,
853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
854 * still works with assumption that uar page size == system page size
855 */
856 dev->caps.uar_page_size = PAGE_SIZE;
857
828 memset(&func_cap, 0, sizeof(func_cap)); 858 memset(&func_cap, 0, sizeof(func_cap));
829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 if (err) { 860 if (err) {
@@ -2179,8 +2209,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2179 2209
2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2210 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2181 2211
2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2212 /* Always set UAR page size 4KB, set log_uar_sz accordingly */
2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2213 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2214 PAGE_SHIFT -
2215 DEFAULT_UAR_PAGE_SHIFT;
2216 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2217
2184 init_hca.mw_enabled = 0; 2218 init_hca.mw_enabled = 0;
2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2219 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2220 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59dc854e..b3cc3ab63799 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
269 269
270int mlx4_init_uar_table(struct mlx4_dev *dev) 270int mlx4_init_uar_table(struct mlx4_dev *dev)
271{ 271{
272 if (dev->caps.num_uars <= 128) { 272 int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
273 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", 273
274 dev->caps.num_uars); 274 mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
275 mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
276
277 if (dev->caps.num_uars <= num_reserved_uar) {
278 mlx4_err(
279 dev, "Only %d UAR pages (need more than %d)\n",
280 dev->caps.num_uars, num_reserved_uar);
275 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); 281 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
276 return -ENODEV; 282 return -ENODEV;
277 } 283 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe29ef6c..25ce1b030a00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
915 915
916 spin_lock_irq(mlx4_tlock(dev)); 916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER); 917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) 918 if (!r || r->owner != slave) {
919 ret = -EINVAL; 919 ret = -EINVAL;
920 counter = container_of(r, struct res_counter, com); 920 } else {
921 if (!counter->port) 921 counter = container_of(r, struct res_counter, com);
922 counter->port = port; 922 if (!counter->port)
923 counter->port = port;
924 }
923 925
924 spin_unlock_irq(mlx4_tlock(dev)); 926 spin_unlock_irq(mlx4_tlock(dev));
925 return ret; 927 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430f1062..d4e1c3045200 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2024,18 +2024,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2024 vf_stats); 2024 vf_stats);
2025} 2025}
2026 2026
2027static struct net_device_ops mlx5e_netdev_ops = { 2027static const struct net_device_ops mlx5e_netdev_ops_basic = {
2028 .ndo_open = mlx5e_open, 2028 .ndo_open = mlx5e_open,
2029 .ndo_stop = mlx5e_close, 2029 .ndo_stop = mlx5e_close,
2030 .ndo_start_xmit = mlx5e_xmit, 2030 .ndo_start_xmit = mlx5e_xmit,
2031 .ndo_get_stats64 = mlx5e_get_stats, 2031 .ndo_get_stats64 = mlx5e_get_stats,
2032 .ndo_set_rx_mode = mlx5e_set_rx_mode, 2032 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2033 .ndo_set_mac_address = mlx5e_set_mac, 2033 .ndo_set_mac_address = mlx5e_set_mac,
2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2036 .ndo_set_features = mlx5e_set_features, 2036 .ndo_set_features = mlx5e_set_features,
2037 .ndo_change_mtu = mlx5e_change_mtu, 2037 .ndo_change_mtu = mlx5e_change_mtu,
2038 .ndo_do_ioctl = mlx5e_ioctl, 2038 .ndo_do_ioctl = mlx5e_ioctl,
2039};
2040
2041static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2042 .ndo_open = mlx5e_open,
2043 .ndo_stop = mlx5e_close,
2044 .ndo_start_xmit = mlx5e_xmit,
2045 .ndo_get_stats64 = mlx5e_get_stats,
2046 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2047 .ndo_set_mac_address = mlx5e_set_mac,
2048 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2049 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2050 .ndo_set_features = mlx5e_set_features,
2051 .ndo_change_mtu = mlx5e_change_mtu,
2052 .ndo_do_ioctl = mlx5e_ioctl,
2053 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2054 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2055 .ndo_get_vf_config = mlx5e_get_vf_config,
2056 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2057 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2039}; 2058};
2040 2059
2041static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2060static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2137,18 +2156,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
2137 2156
2138 SET_NETDEV_DEV(netdev, &mdev->pdev->dev); 2157 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2139 2158
2140 if (priv->params.num_tc > 1) 2159 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2141 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; 2160 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2142 2161 else
2143 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 2162 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2144 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2145 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2146 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2147 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2148 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2149 }
2150 2163
2151 netdev->netdev_ops = &mlx5e_netdev_ops;
2152 netdev->watchdog_timeo = 15 * HZ; 2164 netdev->watchdog_timeo = 15 * HZ;
2153 2165
2154 netdev->ethtool_ops = &mlx5e_ethtool_ops; 2166 netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f5435b32f..ae65b9940aed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
49#define MLXSW_PORT_MID 0xd000 49#define MLXSW_PORT_MID 0xd000
50 50
51#define MLXSW_PORT_MAX_PHY_PORTS 0x40 51#define MLXSW_PORT_MAX_PHY_PORTS 0x40
52#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS 52#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
53 53
54#define MLXSW_PORT_DEVID_BITS_OFFSET 10 54#define MLXSW_PORT_DEVID_BITS_OFFSET 10
55#define MLXSW_PORT_PHY_BITS_OFFSET 4 55#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bb77e2207804..ffe4c0305733 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
873 } 873 }
874} 874}
875 875
876/* SPAFT - Switch Port Acceptable Frame Types
877 * ------------------------------------------
878 * The Switch Port Acceptable Frame Types register configures the frame
879 * admittance of the port.
880 */
881#define MLXSW_REG_SPAFT_ID 0x2010
882#define MLXSW_REG_SPAFT_LEN 0x08
883
884static const struct mlxsw_reg_info mlxsw_reg_spaft = {
885 .id = MLXSW_REG_SPAFT_ID,
886 .len = MLXSW_REG_SPAFT_LEN,
887};
888
889/* reg_spaft_local_port
890 * Local port number.
891 * Access: Index
892 *
893 * Note: CPU port is not supported (all tag types are allowed).
894 */
895MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
896
897/* reg_spaft_sub_port
898 * Virtual port within the physical port.
899 * Should be set to 0 when virtual ports are not enabled on the port.
900 * Access: RW
901 */
902MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
903
904/* reg_spaft_allow_untagged
905 * When set, untagged frames on the ingress are allowed (default).
906 * Access: RW
907 */
908MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
909
910/* reg_spaft_allow_prio_tagged
911 * When set, priority tagged frames on the ingress are allowed (default).
912 * Access: RW
913 */
914MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
915
916/* reg_spaft_allow_tagged
917 * When set, tagged frames on the ingress are allowed (default).
918 * Access: RW
919 */
920MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
921
922static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
923 bool allow_untagged)
924{
925 MLXSW_REG_ZERO(spaft, payload);
926 mlxsw_reg_spaft_local_port_set(payload, local_port);
927 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
928 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
929 mlxsw_reg_spaft_allow_tagged_set(payload, true);
930}
931
876/* SFGC - Switch Flooding Group Configuration 932/* SFGC - Switch Flooding Group Configuration
877 * ------------------------------------------ 933 * ------------------------------------------
878 * The following register controls the association of flooding tables and MIDs 934 * The following register controls the association of flooding tables and MIDs
@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3203 return "SPVID"; 3259 return "SPVID";
3204 case MLXSW_REG_SPVM_ID: 3260 case MLXSW_REG_SPVM_ID:
3205 return "SPVM"; 3261 return "SPVM";
3262 case MLXSW_REG_SPAFT_ID:
3263 return "SPAFT";
3206 case MLXSW_REG_SFGC_ID: 3264 case MLXSW_REG_SFGC_ID:
3207 return "SFGC"; 3265 return "SFGC";
3208 case MLXSW_REG_SFTR_ID: 3266 case MLXSW_REG_SFTR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 217856bdd400..09ce451c283b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2125 2125
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2127
2126 mlxsw_sp_port->learning = 0; 2128 mlxsw_sp_port->learning = 0;
2127 mlxsw_sp_port->learning_sync = 0; 2129 mlxsw_sp_port->learning_sync = 0;
2128 mlxsw_sp_port->uc_flood = 0; 2130 mlxsw_sp_port->uc_flood = 0;
@@ -2746,6 +2748,13 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2746 goto err_vport_flood_set; 2748 goto err_vport_flood_set;
2747 } 2749 }
2748 2750
2751 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2752 MLXSW_REG_SPMS_STATE_FORWARDING);
2753 if (err) {
2754 netdev_err(dev, "Failed to set STP state\n");
2755 goto err_port_stp_state_set;
2756 }
2757
2749 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2758 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2750 netdev_err(dev, "Failed to flush FDB\n"); 2759 netdev_err(dev, "Failed to flush FDB\n");
2751 2760
@@ -2763,6 +2772,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2763 2772
2764 return 0; 2773 return 0;
2765 2774
2775err_port_stp_state_set:
2766err_vport_flood_set: 2776err_vport_flood_set:
2767err_port_vid_learning_set: 2777err_port_vid_learning_set:
2768err_port_vid_to_fid_validate: 2778err_port_vid_to_fid_validate:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 7f42eb1c320e..3b89ed2f3c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
255 bool set, bool only_uc); 255 bool set, bool only_uc);
256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
257 258
258#endif 259#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e492ca2cdecd..7b56098acc58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
370 return err; 370 return err;
371} 371}
372 372
373static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 u16 vid)
374{ 375{
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
380} 381}
381 382
383static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
384 bool allow)
385{
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
388
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
391}
392
393int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
394{
395 struct net_device *dev = mlxsw_sp_port->dev;
396 int err;
397
398 if (!vid) {
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
400 if (err) {
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
402 return err;
403 }
404 } else {
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
406 if (err) {
407 netdev_err(dev, "Failed to set PVID\n");
408 return err;
409 }
410
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
414 true);
415 if (err) {
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
418 }
419 }
420 }
421
422 mlxsw_sp_port->pvid = vid;
423 return 0;
424
425err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
427 return err;
428}
429
382static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 430static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
383{ 431{
384 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
540 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
541 goto err_port_pvid_set; 589 goto err_port_pvid_set;
542 } 590 }
543 mlxsw_sp_port->pvid = vid_begin; 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
593 if (err) {
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
596 }
544 } 597 }
545 598
546 /* Changing activity bits only if HW operation succeded */ 599 /* Changing activity bits only if HW operation succeded */
@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
892 return err; 945 return err;
893 } 946 }
894 947
948 if (init)
949 goto out;
950
895 pvid = mlxsw_sp_port->pvid; 951 pvid = mlxsw_sp_port->pvid;
896 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 952 if (pvid >= vid_begin && pvid <= vid_end) {
897 /* Default VLAN is always 1 */ 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
898 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
899 if (err) { 954 if (err) {
900 netdev_err(dev, "Unable to del PVID %d\n", pvid); 955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
901 return err; 956 return err;
902 } 957 }
903 mlxsw_sp_port->pvid = 1;
904 } 958 }
905 959
906 if (init)
907 goto out;
908
909 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
910 false, false); 961 false, false);
911 if (err) { 962 if (err) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571d0432..537974cfd427 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6137,28 +6137,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt; 6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6138 sw_cnt_1ms_ini &= 0x0fff; 6138 sw_cnt_1ms_ini &= 0x0fff;
6139 data = r8168_mac_ocp_read(tp, 0xd412); 6139 data = r8168_mac_ocp_read(tp, 0xd412);
6140 data &= 0x0fff; 6140 data &= ~0x0fff;
6141 data |= sw_cnt_1ms_ini; 6141 data |= sw_cnt_1ms_ini;
6142 r8168_mac_ocp_write(tp, 0xd412, data); 6142 r8168_mac_ocp_write(tp, 0xd412, data);
6143 } 6143 }
6144 6144
6145 data = r8168_mac_ocp_read(tp, 0xe056); 6145 data = r8168_mac_ocp_read(tp, 0xe056);
6146 data &= 0xf0; 6146 data &= ~0xf0;
6147 data |= 0x07; 6147 data |= 0x70;
6148 r8168_mac_ocp_write(tp, 0xe056, data); 6148 r8168_mac_ocp_write(tp, 0xe056, data);
6149 6149
6150 data = r8168_mac_ocp_read(tp, 0xe052); 6150 data = r8168_mac_ocp_read(tp, 0xe052);
6151 data &= 0x8008; 6151 data &= ~0x6000;
6152 data |= 0x6000; 6152 data |= 0x8008;
6153 r8168_mac_ocp_write(tp, 0xe052, data); 6153 r8168_mac_ocp_write(tp, 0xe052, data);
6154 6154
6155 data = r8168_mac_ocp_read(tp, 0xe0d6); 6155 data = r8168_mac_ocp_read(tp, 0xe0d6);
6156 data &= 0x01ff; 6156 data &= ~0x01ff;
6157 data |= 0x017f; 6157 data |= 0x017f;
6158 r8168_mac_ocp_write(tp, 0xe0d6, data); 6158 r8168_mac_ocp_write(tp, 0xe0d6, data);
6159 6159
6160 data = r8168_mac_ocp_read(tp, 0xd420); 6160 data = r8168_mac_ocp_read(tp, 0xd420);
6161 data &= 0x0fff; 6161 data &= ~0x0fff;
6162 data |= 0x047f; 6162 data |= 0x047f;
6163 r8168_mac_ocp_write(tp, 0xd420, data); 6163 r8168_mac_ocp_write(tp, 0xd420, data);
6164 6164
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed914fcf..744d7806a9ee 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1139 if (netif_running(ndev)) { 1139 if (netif_running(ndev)) {
1140 netif_device_detach(ndev); 1140 netif_device_detach(ndev);
1141 /* Stop PTP Clock driver */ 1141 /* Stop PTP Clock driver */
1142 ravb_ptp_stop(ndev); 1142 if (priv->chip_id == RCAR_GEN2)
1143 ravb_ptp_stop(ndev);
1143 /* Wait for DMA stopping */ 1144 /* Wait for DMA stopping */
1144 error = ravb_stop_dma(ndev); 1145 error = ravb_stop_dma(ndev);
1145 if (error) { 1146 if (error) {
@@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1170 ravb_emac_init(ndev); 1171 ravb_emac_init(ndev);
1171 1172
1172 /* Initialise PTP Clock driver */ 1173 /* Initialise PTP Clock driver */
1173 ravb_ptp_init(ndev, priv->pdev); 1174 if (priv->chip_id == RCAR_GEN2)
1175 ravb_ptp_init(ndev, priv->pdev);
1174 1176
1175 netif_device_attach(ndev); 1177 netif_device_attach(ndev);
1176 } 1178 }
@@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1298 netif_tx_stop_all_queues(ndev); 1300 netif_tx_stop_all_queues(ndev);
1299 1301
1300 /* Stop PTP Clock driver */ 1302 /* Stop PTP Clock driver */
1301 ravb_ptp_stop(ndev); 1303 if (priv->chip_id == RCAR_GEN2)
1304 ravb_ptp_stop(ndev);
1302 1305
1303 /* Wait for DMA stopping */ 1306 /* Wait for DMA stopping */
1304 ravb_stop_dma(ndev); 1307 ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1311 ravb_emac_init(ndev); 1314 ravb_emac_init(ndev);
1312 1315
1313 /* Initialise PTP Clock driver */ 1316 /* Initialise PTP Clock driver */
1314 ravb_ptp_init(ndev, priv->pdev); 1317 if (priv->chip_id == RCAR_GEN2)
1318 ravb_ptp_init(ndev, priv->pdev);
1315 1319
1316 netif_tx_start_all_queues(ndev); 1320 netif_tx_start_all_queues(ndev);
1317} 1321}
@@ -1814,10 +1818,6 @@ static int ravb_probe(struct platform_device *pdev)
1814 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); 1818 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
1815 } 1819 }
1816 1820
1817 /* Set CSEL value */
1818 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1819 CCC);
1820
1821 /* Set GTI value */ 1821 /* Set GTI value */
1822 error = ravb_set_gti(ndev); 1822 error = ravb_set_gti(ndev);
1823 if (error) 1823 if (error)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a844ab..db7db8ac4ca3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev)
2342 } 2342 }
2343 2343
2344 ndev->irq = platform_get_irq(pdev, 0); 2344 ndev->irq = platform_get_irq(pdev, 0);
2345 if (ndev->irq <= 0) { 2345 if (ndev->irq < 0) {
2346 ret = -ENODEV; 2346 ret = ndev->irq;
2347 goto out_release_io; 2347 goto out_release_io;
2348 } 2348 }
2349 /* 2349 /*
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 70814b7386b3..fc8bbff2d7e3 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev)
1880 } 1880 }
1881 netdev_reset_queue(ndev); 1881 netdev_reset_queue(ndev);
1882 1882
1883 dwceqos_init_hw(lp);
1883 napi_enable(&lp->napi); 1884 napi_enable(&lp->napi);
1884 phy_start(lp->phy_dev); 1885 phy_start(lp->phy_dev);
1885 dwceqos_init_hw(lp);
1886 1886
1887 netif_start_queue(ndev); 1887 netif_start_queue(ndev);
1888 tasklet_enable(&lp->tx_bdreclaim_tasklet); 1888 tasklet_enable(&lp->tx_bdreclaim_tasklet);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e1ec74..c3e85acfdc70 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
63 mode = AM33XX_GMII_SEL_MODE_RGMII; 63 mode = AM33XX_GMII_SEL_MODE_RGMII;
64 break; 64 break;
65 65
66 case PHY_INTERFACE_MODE_MII:
67 default: 66 default:
67 dev_warn(priv->dev,
68 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
69 phy_modes(phy_mode));
70 /* fallthrough */
71 case PHY_INTERFACE_MODE_MII:
68 mode = AM33XX_GMII_SEL_MODE_MII; 72 mode = AM33XX_GMII_SEL_MODE_MII;
69 break; 73 break;
70 }; 74 };
@@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
106 mode = AM33XX_GMII_SEL_MODE_RGMII; 110 mode = AM33XX_GMII_SEL_MODE_RGMII;
107 break; 111 break;
108 112
109 case PHY_INTERFACE_MODE_MII:
110 default: 113 default:
114 dev_warn(priv->dev,
115 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
116 phy_modes(phy_mode));
117 /* fallthrough */
118 case PHY_INTERFACE_MODE_MII:
111 mode = AM33XX_GMII_SEL_MODE_MII; 119 mode = AM33XX_GMII_SEL_MODE_MII;
112 break; 120 break;
113 }; 121 };
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d38634..029841f98c32 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
117 *ndesc = le32_to_cpu(desc->next_desc); 117 *ndesc = le32_to_cpu(desc->next_desc);
118} 118}
119 119
120static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120static u32 get_sw_data(int index, struct knav_dma_desc *desc)
121{ 121{
122 *pad0 = le32_to_cpu(desc->pad[0]); 122 /* No Endian conversion needed as this data is untouched by hw */
123 *pad1 = le32_to_cpu(desc->pad[1]); 123 return desc->sw_data[index];
124 *pad2 = le32_to_cpu(desc->pad[2]);
125} 124}
126 125
127static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 126/* use these macros to get sw data */
128{ 127#define GET_SW_DATA0(desc) get_sw_data(0, desc)
129 u64 pad64; 128#define GET_SW_DATA1(desc) get_sw_data(1, desc)
130 129#define GET_SW_DATA2(desc) get_sw_data(2, desc)
131 pad64 = le32_to_cpu(desc->pad[0]) + 130#define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 ((u64)le32_to_cpu(desc->pad[1]) << 32);
133 *padptr = (void *)(uintptr_t)pad64;
134}
135 131
136static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 132static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
137 struct knav_dma_desc *desc) 133 struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info,
163 desc->packet_info = cpu_to_le32(pkt_info); 159 desc->packet_info = cpu_to_le32(pkt_info);
164} 160}
165 161
166static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 162static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
167{ 163{
168 desc->pad[0] = cpu_to_le32(pad0); 164 /* No Endian conversion needed as this data is untouched by hw */
169 desc->pad[1] = cpu_to_le32(pad1); 165 desc->sw_data[index] = data;
170 desc->pad[2] = cpu_to_le32(pad1);
171} 166}
172 167
168/* use these macros to set sw data */
169#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
173
173static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
174 struct knav_dma_desc *desc) 175 struct knav_dma_desc *desc)
175{ 176{
@@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
581 dma_addr_t dma_desc, dma_buf; 582 dma_addr_t dma_desc, dma_buf;
582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 unsigned int buf_len, dma_sz = sizeof(*ndesc);
583 void *buf_ptr; 584 void *buf_ptr;
584 u32 pad[2];
585 u32 tmp; 585 u32 tmp;
586 586
587 get_words(&dma_desc, 1, &desc->next_desc); 587 get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
593 break; 593 break;
594 } 594 }
595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
596 get_pad_ptr(&buf_ptr, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
598 */
599 buf_ptr = (void *)GET_SW_DATA0(ndesc);
600 buf_len = (int)GET_SW_DATA1(desc);
597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
598 __free_page(buf_ptr); 602 __free_page(buf_ptr);
599 knav_pool_desc_put(netcp->rx_pool, desc); 603 knav_pool_desc_put(netcp->rx_pool, desc);
600 } 604 }
601 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 606 * field as a 32bit value. Will not work on 64bit machines
603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 607 */
608 buf_ptr = (void *)GET_SW_DATA0(desc);
609 buf_len = (int)GET_SW_DATA1(desc);
604 610
605 if (buf_ptr) 611 if (buf_ptr)
606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639 dma_addr_t dma_desc, dma_buff; 645 dma_addr_t dma_desc, dma_buff;
640 struct netcp_packet p_info; 646 struct netcp_packet p_info;
641 struct sk_buff *skb; 647 struct sk_buff *skb;
642 u32 pad[2];
643 void *org_buf_ptr; 648 void *org_buf_ptr;
644 649
645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 650 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
653 } 658 }
654 659
655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 660 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 662 * field as a 32bit value. Will not work on 64bit machines
663 */
664 org_buf_ptr = (void *)GET_SW_DATA0(desc);
665 org_buf_len = (int)GET_SW_DATA1(desc);
658 666
659 if (unlikely(!org_buf_ptr)) { 667 if (unlikely(!org_buf_ptr)) {
660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 668 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
679 /* Fill in the page fragment list */ 687 /* Fill in the page fragment list */
680 while (dma_desc) { 688 while (dma_desc) {
681 struct page *page; 689 struct page *page;
682 void *ptr;
683 690
684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 691 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
685 if (unlikely(!ndesc)) { 692 if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
688 } 695 }
689 696
690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 697 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
691 get_pad_ptr(&ptr, ndesc); 698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
692 page = ptr; 699 * field as a 32bit value. Will not work on 64bit machines
700 */
701 page = (struct page *)GET_SW_DATA0(desc);
693 702
694 if (likely(dma_buff && buf_len && page)) { 703 if (likely(dma_buff && buf_len && page)) {
695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 704 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
777 } 786 }
778 787
779 get_org_pkt_info(&dma, &buf_len, desc); 788 get_org_pkt_info(&dma, &buf_len, desc);
780 get_pad_ptr(&buf_ptr, desc); 789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
791 */
792 buf_ptr = (void *)GET_SW_DATA0(desc);
781 793
782 if (unlikely(!dma)) { 794 if (unlikely(!dma)) {
783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 795 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
829 struct page *page; 841 struct page *page;
830 dma_addr_t dma; 842 dma_addr_t dma;
831 void *bufptr; 843 void *bufptr;
832 u32 pad[3]; 844 u32 sw_data[2];
833 845
834 /* Allocate descriptor */ 846 /* Allocate descriptor */
835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 847 hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
847 859
848 bufptr = netdev_alloc_frag(primary_buf_len); 860 bufptr = netdev_alloc_frag(primary_buf_len);
849 pad[2] = primary_buf_len; 861 sw_data[1] = primary_buf_len;
850 862
851 if (unlikely(!bufptr)) { 863 if (unlikely(!bufptr)) {
852 dev_warn_ratelimited(netcp->ndev_dev, 864 dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 870 if (unlikely(dma_mapping_error(netcp->dev, dma)))
859 goto fail; 871 goto fail;
860 872
861 pad[0] = lower_32_bits((uintptr_t)bufptr); 873 /* warning!!!! We are saving the virtual ptr in the sw_data
862 pad[1] = upper_32_bits((uintptr_t)bufptr); 874 * field as a 32bit value. Will not work on 64bit machines
863 875 */
876 sw_data[0] = (u32)bufptr;
864 } else { 877 } else {
865 /* Allocate a secondary receive queue entry */ 878 /* Allocate a secondary receive queue entry */
866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 879 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
870 } 883 }
871 buf_len = PAGE_SIZE; 884 buf_len = PAGE_SIZE;
872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 885 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
873 pad[0] = lower_32_bits(dma); 886 /* warning!!!! We are saving the virtual ptr in the sw_data
874 pad[1] = upper_32_bits(dma); 887 * field as a 32bit value. Will not work on 64bit machines
875 pad[2] = 0; 888 */
889 sw_data[0] = (u32)page;
890 sw_data[1] = 0;
876 } 891 }
877 892
878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 893 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 897 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
883 KNAV_DMA_DESC_RETQ_SHIFT; 898 KNAV_DMA_DESC_RETQ_SHIFT;
884 set_org_pkt_info(dma, buf_len, hwdesc); 899 set_org_pkt_info(dma, buf_len, hwdesc);
885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 900 SET_SW_DATA0(sw_data[0], hwdesc);
901 SET_SW_DATA1(sw_data[1], hwdesc);
886 set_desc_info(desc_info, pkt_info, hwdesc); 902 set_desc_info(desc_info, pkt_info, hwdesc);
887 903
888 /* Push to FDQs */ 904 /* Push to FDQs */
@@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
971 unsigned int budget) 987 unsigned int budget)
972{ 988{
973 struct knav_dma_desc *desc; 989 struct knav_dma_desc *desc;
974 void *ptr;
975 struct sk_buff *skb; 990 struct sk_buff *skb;
976 unsigned int dma_sz; 991 unsigned int dma_sz;
977 dma_addr_t dma; 992 dma_addr_t dma;
@@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
988 continue; 1003 continue;
989 } 1004 }
990 1005
991 get_pad_ptr(&ptr, desc); 1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
992 skb = ptr; 1007 * field as a 32bit value. Will not work on 64bit machines
1008 */
1009 skb = (struct sk_buff *)GET_SW_DATA0(desc);
993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1010 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
994 if (!skb) { 1011 if (!skb) {
995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1012 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1194 } 1211 }
1195 1212
1196 set_words(&tmp, 1, &desc->packet_info); 1213 set_words(&tmp, 1, &desc->packet_info);
1197 tmp = lower_32_bits((uintptr_t)&skb); 1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1198 set_words(&tmp, 1, &desc->pad[0]); 1215 * field as a 32bit value. Will not work on 64bit machines
1199 tmp = upper_32_bits((uintptr_t)&skb); 1216 */
1200 set_words(&tmp, 1, &desc->pad[1]); 1217 SET_SW_DATA0((u32)skb, desc);
1201 1218
1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1219 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1203 tmp = tx_pipe->switch_to_port; 1220 tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0b14ac3b8d11..0bf7edd99573 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,6 +1039,34 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1039 return geneve_xmit_skb(skb, dev, info); 1039 return geneve_xmit_skb(skb, dev, info);
1040} 1040}
1041 1041
1042static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1043{
1044 /* The max_mtu calculation does not take account of GENEVE
1045 * options, to avoid excluding potentially valid
1046 * configurations.
1047 */
1048 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
1049 - dev->hard_header_len;
1050
1051 if (new_mtu < 68)
1052 return -EINVAL;
1053
1054 if (new_mtu > max_mtu) {
1055 if (strict)
1056 return -EINVAL;
1057
1058 new_mtu = max_mtu;
1059 }
1060
1061 dev->mtu = new_mtu;
1062 return 0;
1063}
1064
1065static int geneve_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 return __geneve_change_mtu(dev, new_mtu, true);
1068}
1069
1042static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1070static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1043{ 1071{
1044 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1072 struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1111,7 @@ static const struct net_device_ops geneve_netdev_ops = {
1083 .ndo_stop = geneve_stop, 1111 .ndo_stop = geneve_stop,
1084 .ndo_start_xmit = geneve_xmit, 1112 .ndo_start_xmit = geneve_xmit,
1085 .ndo_get_stats64 = ip_tunnel_get_stats64, 1113 .ndo_get_stats64 = ip_tunnel_get_stats64,
1086 .ndo_change_mtu = eth_change_mtu, 1114 .ndo_change_mtu = geneve_change_mtu,
1087 .ndo_validate_addr = eth_validate_addr, 1115 .ndo_validate_addr = eth_validate_addr,
1088 .ndo_set_mac_address = eth_mac_addr, 1116 .ndo_set_mac_address = eth_mac_addr,
1089 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 1117 .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
@@ -1150,6 +1178,7 @@ static void geneve_setup(struct net_device *dev)
1150 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1178 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1151 1179
1152 netif_keep_dst(dev); 1180 netif_keep_dst(dev);
1181 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1153 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1182 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1154 eth_hw_addr_random(dev); 1183 eth_hw_addr_random(dev);
1155} 1184}
@@ -1441,12 +1470,23 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1441 return dev; 1470 return dev;
1442 1471
1443 err = geneve_configure(net, dev, &geneve_remote_unspec, 1472 err = geneve_configure(net, dev, &geneve_remote_unspec,
1444 0, 0, 0, htons(dst_port), true, 0); 1473 0, 0, 0, htons(dst_port), true,
1445 if (err) { 1474 GENEVE_F_UDP_ZERO_CSUM6_RX);
1446 free_netdev(dev); 1475 if (err)
1447 return ERR_PTR(err); 1476 goto err;
1448 } 1477
1478 /* openvswitch users expect packet sizes to be unrestricted,
1479 * so set the largest MTU we can.
1480 */
1481 err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
1482 if (err)
1483 goto err;
1484
1449 return dev; 1485 return dev;
1486
1487 err:
1488 free_netdev(dev);
1489 return ERR_PTR(err);
1450} 1490}
1451EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1491EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
1452 1492
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1d3a66563bac..98e34fee45c7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1089,6 +1089,9 @@ static int netvsc_probe(struct hv_device *dev,
1089 net->ethtool_ops = &ethtool_ops; 1089 net->ethtool_ops = &ethtool_ops;
1090 SET_NETDEV_DEV(net, &dev->device); 1090 SET_NETDEV_DEV(net, &dev->device);
1091 1091
1092 /* We always need headroom for rndis header */
1093 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1094
1092 /* Notify the netvsc driver of the new device */ 1095 /* Notify the netvsc driver of the new device */
1093 memset(&device_info, 0, sizeof(device_info)); 1096 memset(&device_info, 0, sizeof(device_info));
1094 device_info.ring_size = ring_size; 1097 device_info.ring_size = ring_size;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3ec5e5..db507e3bcab9 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); 250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
251 phy_read(phydev, MII_BCM7XXX_AUX_MODE); 251 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
252 252
253 /* Workaround only required for 100Mbits/sec capable PHYs */
254 if (phydev->supported & PHY_GBIT_FEATURES)
255 return 0;
256
257 /* set shadow mode 2 */ 253 /* set shadow mode 2 */
258 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 254 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
259 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2); 255 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
270 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); 266 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
271 267
272 /* reset shadow mode 2 */ 268 /* reset shadow mode 2 */
273 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); 269 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
274 if (ret < 0) 270 if (ret < 0)
275 return ret; 271 return ret;
276 272
@@ -307,11 +303,6 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
307 return 0; 303 return 0;
308} 304}
309 305
310static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
311{
312 return 0;
313}
314
315#define BCM7XXX_28NM_GPHY(_oui, _name) \ 306#define BCM7XXX_28NM_GPHY(_oui, _name) \
316{ \ 307{ \
317 .phy_id = (_oui), \ 308 .phy_id = (_oui), \
@@ -337,7 +328,7 @@ static struct phy_driver bcm7xxx_driver[] = {
337 .phy_id = PHY_ID_BCM7425, 328 .phy_id = PHY_ID_BCM7425,
338 .phy_id_mask = 0xfffffff0, 329 .phy_id_mask = 0xfffffff0,
339 .name = "Broadcom BCM7425", 330 .name = "Broadcom BCM7425",
340 .features = PHY_GBIT_FEATURES | 331 .features = PHY_BASIC_FEATURES |
341 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 332 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
342 .flags = PHY_IS_INTERNAL, 333 .flags = PHY_IS_INTERNAL,
343 .config_init = bcm7xxx_config_init, 334 .config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@ static struct phy_driver bcm7xxx_driver[] = {
349 .phy_id = PHY_ID_BCM7429, 340 .phy_id = PHY_ID_BCM7429,
350 .phy_id_mask = 0xfffffff0, 341 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM7429", 342 .name = "Broadcom BCM7429",
352 .features = PHY_GBIT_FEATURES | 343 .features = PHY_BASIC_FEATURES |
353 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 344 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
354 .flags = PHY_IS_INTERNAL, 345 .flags = PHY_IS_INTERNAL,
355 .config_init = bcm7xxx_config_init, 346 .config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@ static struct phy_driver bcm7xxx_driver[] = {
361 .phy_id = PHY_ID_BCM7435, 352 .phy_id = PHY_ID_BCM7435,
362 .phy_id_mask = 0xfffffff0, 353 .phy_id_mask = 0xfffffff0,
363 .name = "Broadcom BCM7435", 354 .name = "Broadcom BCM7435",
364 .features = PHY_GBIT_FEATURES | 355 .features = PHY_BASIC_FEATURES |
365 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 356 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
366 .flags = PHY_IS_INTERNAL, 357 .flags = PHY_IS_INTERNAL,
367 .config_init = bcm7xxx_config_init, 358 .config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@ static struct phy_driver bcm7xxx_driver[] = {
369 .read_status = genphy_read_status, 360 .read_status = genphy_read_status,
370 .suspend = bcm7xxx_suspend, 361 .suspend = bcm7xxx_suspend,
371 .resume = bcm7xxx_config_init, 362 .resume = bcm7xxx_config_init,
372}, {
373 .phy_id = PHY_BCM_OUI_4,
374 .phy_id_mask = 0xffff0000,
375 .name = "Broadcom BCM7XXX 40nm",
376 .features = PHY_GBIT_FEATURES |
377 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
378 .flags = PHY_IS_INTERNAL,
379 .config_init = bcm7xxx_config_init,
380 .config_aneg = genphy_config_aneg,
381 .read_status = genphy_read_status,
382 .suspend = bcm7xxx_suspend,
383 .resume = bcm7xxx_config_init,
384}, {
385 .phy_id = PHY_BCM_OUI_5,
386 .phy_id_mask = 0xffffff00,
387 .name = "Broadcom BCM7XXX 65nm",
388 .features = PHY_BASIC_FEATURES |
389 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
390 .flags = PHY_IS_INTERNAL,
391 .config_init = bcm7xxx_dummy_config_init,
392 .config_aneg = genphy_config_aneg,
393 .read_status = genphy_read_status,
394 .suspend = bcm7xxx_suspend,
395 .resume = bcm7xxx_config_init,
396} }; 363} };
397 364
398static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 365static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
404 { PHY_ID_BCM7439, 0xfffffff0, }, 371 { PHY_ID_BCM7439, 0xfffffff0, },
405 { PHY_ID_BCM7435, 0xfffffff0, }, 372 { PHY_ID_BCM7435, 0xfffffff0, },
406 { PHY_ID_BCM7445, 0xfffffff0, }, 373 { PHY_ID_BCM7445, 0xfffffff0, },
407 { PHY_BCM_OUI_4, 0xffff0000 },
408 { PHY_BCM_OUI_5, 0xffffff00 },
409 { } 374 { }
410}; 375};
411 376
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb96443c97..ab1d0fcaf1d9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return 0;
450}
451
452static int marvell_config_init(struct phy_device *phydev)
453{
454 /* Set registers from marvell,reg-init DT property */
449 return marvell_of_reg_init(phydev); 455 return marvell_of_reg_init(phydev);
450} 456}
451 457
@@ -495,7 +501,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
495 501
496 mdelay(500); 502 mdelay(500);
497 503
498 return 0; 504 return marvell_config_init(phydev);
499} 505}
500 506
501static int m88e3016_config_init(struct phy_device *phydev) 507static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
514 if (reg < 0) 520 if (reg < 0)
515 return reg; 521 return reg;
516 522
517 return 0; 523 return marvell_config_init(phydev);
518} 524}
519 525
520static int m88e1111_config_init(struct phy_device *phydev) 526static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@ static struct phy_driver marvell_drivers[] = {
1078 .features = PHY_GBIT_FEATURES, 1084 .features = PHY_GBIT_FEATURES,
1079 .probe = marvell_probe, 1085 .probe = marvell_probe,
1080 .flags = PHY_HAS_INTERRUPT, 1086 .flags = PHY_HAS_INTERRUPT,
1087 .config_init = &marvell_config_init,
1081 .config_aneg = &marvell_config_aneg, 1088 .config_aneg = &marvell_config_aneg,
1082 .read_status = &genphy_read_status, 1089 .read_status = &genphy_read_status,
1083 .ack_interrupt = &marvell_ack_interrupt, 1090 .ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@ static struct phy_driver marvell_drivers[] = {
1149 .features = PHY_GBIT_FEATURES, 1156 .features = PHY_GBIT_FEATURES,
1150 .flags = PHY_HAS_INTERRUPT, 1157 .flags = PHY_HAS_INTERRUPT,
1151 .probe = marvell_probe, 1158 .probe = marvell_probe,
1159 .config_init = &marvell_config_init,
1152 .config_aneg = &m88e1121_config_aneg, 1160 .config_aneg = &m88e1121_config_aneg,
1153 .read_status = &marvell_read_status, 1161 .read_status = &marvell_read_status,
1154 .ack_interrupt = &marvell_ack_interrupt, 1162 .ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@ static struct phy_driver marvell_drivers[] = {
1167 .features = PHY_GBIT_FEATURES, 1175 .features = PHY_GBIT_FEATURES,
1168 .flags = PHY_HAS_INTERRUPT, 1176 .flags = PHY_HAS_INTERRUPT,
1169 .probe = marvell_probe, 1177 .probe = marvell_probe,
1178 .config_init = &marvell_config_init,
1170 .config_aneg = &m88e1318_config_aneg, 1179 .config_aneg = &m88e1318_config_aneg,
1171 .read_status = &marvell_read_status, 1180 .read_status = &marvell_read_status,
1172 .ack_interrupt = &marvell_ack_interrupt, 1181 .ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@ static struct phy_driver marvell_drivers[] = {
1259 .features = PHY_GBIT_FEATURES, 1268 .features = PHY_GBIT_FEATURES,
1260 .flags = PHY_HAS_INTERRUPT, 1269 .flags = PHY_HAS_INTERRUPT,
1261 .probe = marvell_probe, 1270 .probe = marvell_probe,
1271 .config_init = &marvell_config_init,
1262 .config_aneg = &m88e1510_config_aneg, 1272 .config_aneg = &m88e1510_config_aneg,
1263 .read_status = &marvell_read_status, 1273 .read_status = &marvell_read_status,
1264 .ack_interrupt = &marvell_ack_interrupt, 1274 .ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@ static struct phy_driver marvell_drivers[] = {
1277 .features = PHY_GBIT_FEATURES, 1287 .features = PHY_GBIT_FEATURES,
1278 .flags = PHY_HAS_INTERRUPT, 1288 .flags = PHY_HAS_INTERRUPT,
1279 .probe = marvell_probe, 1289 .probe = marvell_probe,
1290 .config_init = &marvell_config_init,
1280 .config_aneg = &m88e1510_config_aneg, 1291 .config_aneg = &m88e1510_config_aneg,
1281 .read_status = &marvell_read_status, 1292 .read_status = &marvell_read_status,
1282 .ack_interrupt = &marvell_ack_interrupt, 1293 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f005faee..e551f3a89cfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@ int genphy_config_init(struct phy_device *phydev)
1410 1410
1411 features = (SUPPORTED_TP | SUPPORTED_MII 1411 features = (SUPPORTED_TP | SUPPORTED_MII
1412 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1412 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1413 SUPPORTED_BNC); 1413 SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1414 1414
1415 /* Do we support autonegotiation? */ 1415 /* Do we support autonegotiation? */
1416 val = phy_read(phydev, MII_BMSR); 1416 val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c63022eb3c..4ddae8118c85 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
395 395
396 if (!__pppoe_xmit(sk_pppox(relay_po), skb)) 396 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
397 goto abort_put; 397 goto abort_put;
398
399 sock_put(sk_pppox(relay_po));
398 } else { 400 } else {
399 if (sock_queue_rcv_skb(sk, skb)) 401 if (sock_queue_rcv_skb(sk, skb))
400 goto abort_kfree; 402 goto abort_kfree;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..cdde59089f72 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880791fc..570deef53f74 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@ static const struct usb_device_id products[] = {
637 637
638 /* 3. Combined interface devices matching on interface number */ 638 /* 3. Combined interface devices matching on interface number */
639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
640 {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
640 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, 641 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
641 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, 642 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
642 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, 643 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a53025fd0..72ba8ae7f09a 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_TX_RING_MAX_SIZE 4096 377#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 378#define VMXNET3_TC_RING_MAX_SIZE 4096
379#define VMXNET3_RX_RING_MAX_SIZE 4096 379#define VMXNET3_RX_RING_MAX_SIZE 4096
380#define VMXNET3_RX_RING2_MAX_SIZE 2048 380#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 381#define VMXNET3_RC_RING_MAX_SIZE 8192
382 382
383/* a list of reasons for queue stop */ 383/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c0f8aa..729c344e6774 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 65439188c582..e6944b29588e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2171,9 +2171,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2171#endif 2171#endif
2172 } 2172 }
2173 2173
2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA && 2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2175 info && info->mode & IP_TUNNEL_INFO_TX) { 2175 if (info && info->mode & IP_TUNNEL_INFO_TX)
2176 vxlan_xmit_one(skb, dev, NULL, false); 2176 vxlan_xmit_one(skb, dev, NULL, false);
2177 else
2178 kfree_skb(skb);
2177 return NETDEV_TX_OK; 2179 return NETDEV_TX_OK;
2178 } 2180 }
2179 2181
@@ -2367,29 +2369,43 @@ static void vxlan_set_multicast_list(struct net_device *dev)
2367{ 2369{
2368} 2370}
2369 2371
2370static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2372static int __vxlan_change_mtu(struct net_device *dev,
2373 struct net_device *lowerdev,
2374 struct vxlan_rdst *dst, int new_mtu, bool strict)
2371{ 2375{
2372 struct vxlan_dev *vxlan = netdev_priv(dev); 2376 int max_mtu = IP_MAX_MTU;
2373 struct vxlan_rdst *dst = &vxlan->default_dst;
2374 struct net_device *lowerdev;
2375 int max_mtu;
2376 2377
2377 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); 2378 if (lowerdev)
2378 if (lowerdev == NULL) 2379 max_mtu = lowerdev->mtu;
2379 return eth_change_mtu(dev, new_mtu);
2380 2380
2381 if (dst->remote_ip.sa.sa_family == AF_INET6) 2381 if (dst->remote_ip.sa.sa_family == AF_INET6)
2382 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; 2382 max_mtu -= VXLAN6_HEADROOM;
2383 else 2383 else
2384 max_mtu = lowerdev->mtu - VXLAN_HEADROOM; 2384 max_mtu -= VXLAN_HEADROOM;
2385 2385
2386 if (new_mtu < 68 || new_mtu > max_mtu) 2386 if (new_mtu < 68)
2387 return -EINVAL; 2387 return -EINVAL;
2388 2388
2389 if (new_mtu > max_mtu) {
2390 if (strict)
2391 return -EINVAL;
2392
2393 new_mtu = max_mtu;
2394 }
2395
2389 dev->mtu = new_mtu; 2396 dev->mtu = new_mtu;
2390 return 0; 2397 return 0;
2391} 2398}
2392 2399
2400static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2401{
2402 struct vxlan_dev *vxlan = netdev_priv(dev);
2403 struct vxlan_rdst *dst = &vxlan->default_dst;
2404 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2405 dst->remote_ifindex);
2406 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2407}
2408
2393static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, 2409static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
2394 struct ip_tunnel_info *info, 2410 struct ip_tunnel_info *info,
2395 __be16 sport, __be16 dport) 2411 __be16 sport, __be16 dport)
@@ -2523,6 +2539,7 @@ static void vxlan_setup(struct net_device *dev)
2523 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2539 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2524 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2540 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2525 netif_keep_dst(dev); 2541 netif_keep_dst(dev);
2542 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2526 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 2543 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2527 2544
2528 INIT_LIST_HEAD(&vxlan->next); 2545 INIT_LIST_HEAD(&vxlan->next);
@@ -2765,6 +2782,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2765 int err; 2782 int err;
2766 bool use_ipv6 = false; 2783 bool use_ipv6 = false;
2767 __be16 default_port = vxlan->cfg.dst_port; 2784 __be16 default_port = vxlan->cfg.dst_port;
2785 struct net_device *lowerdev = NULL;
2768 2786
2769 vxlan->net = src_net; 2787 vxlan->net = src_net;
2770 2788
@@ -2785,9 +2803,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2785 } 2803 }
2786 2804
2787 if (conf->remote_ifindex) { 2805 if (conf->remote_ifindex) {
2788 struct net_device *lowerdev 2806 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2789 = __dev_get_by_index(src_net, conf->remote_ifindex);
2790
2791 dst->remote_ifindex = conf->remote_ifindex; 2807 dst->remote_ifindex = conf->remote_ifindex;
2792 2808
2793 if (!lowerdev) { 2809 if (!lowerdev) {
@@ -2811,6 +2827,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2811 needed_headroom = lowerdev->hard_header_len; 2827 needed_headroom = lowerdev->hard_header_len;
2812 } 2828 }
2813 2829
2830 if (conf->mtu) {
2831 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2832 if (err)
2833 return err;
2834 }
2835
2814 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2836 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2815 needed_headroom += VXLAN6_HEADROOM; 2837 needed_headroom += VXLAN6_HEADROOM;
2816 else 2838 else
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407208b1..629225980463 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@ try:
1626 if (state & Xpr) { 1626 if (state & Xpr) {
1627 void __iomem *scc_addr; 1627 void __iomem *scc_addr;
1628 unsigned long ring; 1628 unsigned long ring;
1629 int i; 1629 unsigned int i;
1630 1630
1631 /* 1631 /*
1632 * - the busy condition happens (sometimes); 1632 * - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 866067789330..7438fbeef744 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@ config IWLWIFI_LEDS
53 53
54config IWLDVM 54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support" 55 tristate "Intel Wireless WiFi DVM Firmware support"
56 depends on m
57 help 56 help
58 This is the driver that supports the DVM firmware. The list 57 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here: 58 of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a0299d43e..bce9b3420a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
70 71
71/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 20
74#define IWL8265_UCODE_API_MAX 20
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
76 79
77/* Lowest firmware API version supported */ 80/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13 81#define IWL8000_UCODE_API_MIN 13
82#define IWL8265_UCODE_API_MIN 20
79 83
80/* NVM versions */ 84/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 85#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
93#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 99
100#define IWL8265_FW_PRE "iwlwifi-8265-"
101#define IWL8265_MODULE_FIRMWARE(api) \
102 IWL8265_FW_PRE __stringify(api) ".ucode"
103
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10 104#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" 105#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" 106#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
144 .support_tx_backoff = true, 152 .support_tx_backoff = true,
145}; 153};
146 154
147#define IWL_DEVICE_8000 \ 155#define IWL_DEVICE_8000_COMMON \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \ 156 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \ 157 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \ 158 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@ static const struct iwl_tt_params iwl8000_tt_params = {
167 .thermal_params = &iwl8000_tt_params, \ 172 .thermal_params = &iwl8000_tt_params, \
168 .apmg_not_supported = true 173 .apmg_not_supported = true
169 174
175#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \
180
181#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \
186
187#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \
192
170const struct iwl_cfg iwl8260_2n_cfg = { 193const struct iwl_cfg iwl8260_2n_cfg = {
171 .name = "Intel(R) Dual Band Wireless N 8260", 194 .name = "Intel(R) Dual Band Wireless N 8260",
172 .fw_name_pre = IWL8000_FW_PRE, 195 .fw_name_pre = IWL8000_FW_PRE,
173 IWL_DEVICE_8000, 196 IWL_DEVICE_8260,
174 .ht_params = &iwl8000_ht_params, 197 .ht_params = &iwl8000_ht_params,
175 .nvm_ver = IWL8000_NVM_VERSION, 198 .nvm_ver = IWL8000_NVM_VERSION,
176 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 199 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@ const struct iwl_cfg iwl8260_2n_cfg = {
179const struct iwl_cfg iwl8260_2ac_cfg = { 202const struct iwl_cfg iwl8260_2ac_cfg = {
180 .name = "Intel(R) Dual Band Wireless AC 8260", 203 .name = "Intel(R) Dual Band Wireless AC 8260",
181 .fw_name_pre = IWL8000_FW_PRE, 204 .fw_name_pre = IWL8000_FW_PRE,
182 IWL_DEVICE_8000, 205 IWL_DEVICE_8260,
183 .ht_params = &iwl8000_ht_params, 206 .ht_params = &iwl8000_ht_params,
184 .nvm_ver = IWL8000_NVM_VERSION, 207 .nvm_ver = IWL8000_NVM_VERSION,
185 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 208 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
188 211
189const struct iwl_cfg iwl8265_2ac_cfg = { 212const struct iwl_cfg iwl8265_2ac_cfg = {
190 .name = "Intel(R) Dual Band Wireless AC 8265", 213 .name = "Intel(R) Dual Band Wireless AC 8265",
191 .fw_name_pre = IWL8000_FW_PRE, 214 .fw_name_pre = IWL8265_FW_PRE,
192 IWL_DEVICE_8000, 215 IWL_DEVICE_8265,
193 .ht_params = &iwl8000_ht_params, 216 .ht_params = &iwl8000_ht_params,
194 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
195 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
209const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 232const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
210 .name = "Intel(R) Dual Band Wireless-AC 8260", 233 .name = "Intel(R) Dual Band Wireless-AC 8260",
211 .fw_name_pre = IWL8000_FW_PRE, 234 .fw_name_pre = IWL8000_FW_PRE,
212 IWL_DEVICE_8000, 235 IWL_DEVICE_8260,
213 .ht_params = &iwl8000_ht_params, 236 .ht_params = &iwl8000_ht_params,
214 .nvm_ver = IWL8000_NVM_VERSION, 237 .nvm_ver = IWL8000_NVM_VERSION,
215 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 238 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
236}; 259};
237 260
238MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
262MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb49075683..ab4c2a0470b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); 244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245 245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 246 if (rev_step != 'A')
247 "%s%c-%s.ucode", name_pre, rev_step, tag); 247 snprintf(drv->firmware_name,
248 sizeof(drv->firmware_name), "%s%c-%s.ucode",
249 name_pre, rev_step, tag);
248 } 250 }
249 251
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 252 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642f80dd..ea1e177c2ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1298 return -EBUSY; 1298 return -EBUSY;
1299 } 1299 }
1300 1300
1301 /* we don't support "match all" in the firmware */
1302 if (!req->n_match_sets)
1303 return -EOPNOTSUPP;
1304
1301 ret = iwl_mvm_check_running_scans(mvm, type); 1305 ret = iwl_mvm_check_running_scans(mvm, type);
1302 if (ret) 1306 if (ret)
1303 return ret; 1307 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e2700d..73c95594eabe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
491} 491}
492 492
493static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
494{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496
497 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
498 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
500}
501
493static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 502static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
494{ 503{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8cf4b3..152cf9ad9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1438 inta & ~trans_pcie->inta_mask); 1438 inta & ~trans_pcie->inta_mask);
1439 } 1439 }
1440 1440
1441 /* Re-enable all interrupts */ 1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 /* only Re-enable if disabled by irq */ 1442 if (handled & CSR_INT_BIT_FH_TX)
1443 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444 iwl_enable_interrupts(trans); 1446 iwl_enable_interrupts(trans);
1445 /* Re-enable RF_KILL if it occurred */ 1447 /* Re-enable RF_KILL if it occurred */
1446 else if (handled & CSR_INT_BIT_RF_KILL) 1448 else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467a983c..5a854c609477 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1021 &first_ucode_section); 1021 &first_ucode_section);
1022} 1022}
1023 1023
1024static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1025 const struct fw_img *fw, bool run_in_rfkill)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill;
1029 int ret;
1030
1031 mutex_lock(&trans_pcie->mutex);
1032
1033 /* Someone called stop_device, don't try to start_fw */
1034 if (trans_pcie->is_down) {
1035 IWL_WARN(trans,
1036 "Can't start_fw since the HW hasn't been started\n");
1037 ret = EIO;
1038 goto out;
1039 }
1040
1041 /* This may fail if AMT took ownership of the device */
1042 if (iwl_pcie_prepare_card_hw(trans)) {
1043 IWL_WARN(trans, "Exit HW not ready\n");
1044 ret = -EIO;
1045 goto out;
1046 }
1047
1048 iwl_enable_rfkill_int(trans);
1049
1050 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = iwl_is_rfkill_set(trans);
1052 if (hw_rfkill)
1053 set_bit(STATUS_RFKILL, &trans->status);
1054 else
1055 clear_bit(STATUS_RFKILL, &trans->status);
1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1057 if (hw_rfkill && !run_in_rfkill) {
1058 ret = -ERFKILL;
1059 goto out;
1060 }
1061
1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1063
1064 ret = iwl_pcie_nic_init(trans);
1065 if (ret) {
1066 IWL_ERR(trans, "Unable to init nic\n");
1067 goto out;
1068 }
1069
1070 /* make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1074
1075 /* clear (again), then enable host interrupts */
1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1077 iwl_enable_interrupts(trans);
1078
1079 /* really make sure rfkill handshake bits are cleared */
1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1082
1083 /* Load the given image to the HW */
1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1086 else
1087 ret = iwl_pcie_load_given_ucode(trans, fw);
1088
1089out:
1090 mutex_unlock(&trans_pcie->mutex);
1091 return ret;
1092}
1093
1094static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1095{
1096 iwl_pcie_reset_ict(trans);
1097 iwl_pcie_tx_start(trans, scd_addr);
1098}
1099
1100static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1024static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1101{ 1025{
1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1127 * already dead. 1051 * already dead.
1128 */ 1052 */
1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1053 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1054 IWL_DEBUG_INFO(trans,
1055 "DEVICE_ENABLED bit was set and is now cleared\n");
1131 iwl_pcie_tx_stop(trans); 1056 iwl_pcie_tx_stop(trans);
1132 iwl_pcie_rx_stop(trans); 1057 iwl_pcie_rx_stop(trans);
1133 1058
@@ -1161,7 +1086,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1161 iwl_disable_interrupts(trans); 1086 iwl_disable_interrupts(trans);
1162 spin_unlock(&trans_pcie->irq_lock); 1087 spin_unlock(&trans_pcie->irq_lock);
1163 1088
1164
1165 /* clear all status bits */ 1089 /* clear all status bits */
1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1091 clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1194 if (hw_rfkill != was_hw_rfkill) 1118 if (hw_rfkill != was_hw_rfkill)
1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1119 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1196 1120
1197 /* re-take ownership to prevent other users from stealing the deivce */ 1121 /* re-take ownership to prevent other users from stealing the device */
1198 iwl_pcie_prepare_card_hw(trans); 1122 iwl_pcie_prepare_card_hw(trans);
1199} 1123}
1200 1124
1125static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1126 const struct fw_img *fw, bool run_in_rfkill)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 bool hw_rfkill;
1130 int ret;
1131
1132 /* This may fail if AMT took ownership of the device */
1133 if (iwl_pcie_prepare_card_hw(trans)) {
1134 IWL_WARN(trans, "Exit HW not ready\n");
1135 ret = -EIO;
1136 goto out;
1137 }
1138
1139 iwl_enable_rfkill_int(trans);
1140
1141 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1142
1143 /*
1144 * We enabled the RF-Kill interrupt and the handler may very
1145 * well be running. Disable the interrupts to make sure no other
1146 * interrupt can be fired.
1147 */
1148 iwl_disable_interrupts(trans);
1149
1150 /* Make sure it finished running */
1151 synchronize_irq(trans_pcie->pci_dev->irq);
1152
1153 mutex_lock(&trans_pcie->mutex);
1154
1155 /* If platform's RF_KILL switch is NOT set to KILL */
1156 hw_rfkill = iwl_is_rfkill_set(trans);
1157 if (hw_rfkill)
1158 set_bit(STATUS_RFKILL, &trans->status);
1159 else
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1162 if (hw_rfkill && !run_in_rfkill) {
1163 ret = -ERFKILL;
1164 goto out;
1165 }
1166
1167 /* Someone called stop_device, don't try to start_fw */
1168 if (trans_pcie->is_down) {
1169 IWL_WARN(trans,
1170 "Can't start_fw since the HW hasn't been started\n");
1171 ret = -EIO;
1172 goto out;
1173 }
1174
1175 /* make sure rfkill handshake bits are cleared */
1176 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1177 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1178 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1179
1180 /* clear (again), then enable host interrupts */
1181 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1182
1183 ret = iwl_pcie_nic_init(trans);
1184 if (ret) {
1185 IWL_ERR(trans, "Unable to init nic\n");
1186 goto out;
1187 }
1188
1189 /*
1190 * Now, we load the firmware and don't want to be interrupted, even
1191 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1192 * FH_TX interrupt which is needed to load the firmware). If the
1193 * RF-Kill switch is toggled, we will find out after having loaded
1194 * the firmware and return the proper value to the caller.
1195 */
1196 iwl_enable_fw_load_int(trans);
1197
1198 /* really make sure rfkill handshake bits are cleared */
1199 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1201
1202 /* Load the given image to the HW */
1203 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1204 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1205 else
1206 ret = iwl_pcie_load_given_ucode(trans, fw);
1207 iwl_enable_interrupts(trans);
1208
1209 /* re-check RF-Kill state since we may have missed the interrupt */
1210 hw_rfkill = iwl_is_rfkill_set(trans);
1211 if (hw_rfkill)
1212 set_bit(STATUS_RFKILL, &trans->status);
1213 else
1214 clear_bit(STATUS_RFKILL, &trans->status);
1215
1216 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1217 if (hw_rfkill && !run_in_rfkill)
1218 ret = -ERFKILL;
1219
1220out:
1221 mutex_unlock(&trans_pcie->mutex);
1222 return ret;
1223}
1224
1225static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1226{
1227 iwl_pcie_reset_ict(trans);
1228 iwl_pcie_tx_start(trans, scd_addr);
1229}
1230
1201static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1231static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1202{ 1232{
1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce28238..28f7010e7108 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
138 ((wireless_mode == WIRELESS_MODE_N_5G) || 138 ((wireless_mode == WIRELESS_MODE_N_5G) ||
139 (wireless_mode == WIRELESS_MODE_N_24G))) 139 (wireless_mode == WIRELESS_MODE_N_24G)))
140 rate->flags |= IEEE80211_TX_RC_MCS; 140 rate->flags |= IEEE80211_TX_RC_MCS;
141 if (sta && sta->vht_cap.vht_supported &&
142 (wireless_mode == WIRELESS_MODE_AC_5G ||
143 wireless_mode == WIRELESS_MODE_AC_24G ||
144 wireless_mode == WIRELESS_MODE_AC_ONLY))
145 rate->flags |= IEEE80211_TX_RC_VHT_MCS;
141 } 146 }
142} 147}
143 148
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e727e9..564ca750c5ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@ int wlcore_set_partition(struct wl1271 *wl,
175 if (ret < 0) 175 if (ret < 0)
176 goto out; 176 goto out;
177 177
178 /* We don't need the size of the last partition, as it is
179 * automatically calculated based on the total memory size and
180 * the sizes of the previous partitions.
181 */
178 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 182 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
179 if (ret < 0) 183 if (ret < 0)
180 goto out; 184 goto out;
181 185
182 ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
183 if (ret < 0)
184 goto out;
185
186out: 186out:
187 return ret; 187 return ret;
188} 188}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54f415..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) 36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) 37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) 38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) 39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) 40
41#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
42 42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
382 [ND_CMD_ARS_CAP] = { 382 [ND_CMD_ARS_CAP] = {
383 .in_num = 2, 383 .in_num = 2,
384 .in_sizes = { 8, 8, }, 384 .in_sizes = { 8, 8, },
385 .out_num = 2, 385 .out_num = 4,
386 .out_sizes = { 4, 4, }, 386 .out_sizes = { 4, 4, 4, 4, },
387 }, 387 },
388 [ND_CMD_ARS_START] = { 388 [ND_CMD_ARS_START] = {
389 .in_num = 4, 389 .in_num = 5,
390 .in_sizes = { 8, 8, 2, 6, }, 390 .in_sizes = { 8, 8, 2, 1, 5, },
391 .out_num = 1, 391 .out_num = 2,
392 .out_sizes = { 4, }, 392 .out_sizes = { 4, 4, },
393 }, 393 },
394 [ND_CMD_ARS_STATUS] = { 394 [ND_CMD_ARS_STATUS] = {
395 .out_num = 2, 395 .out_num = 3,
396 .out_sizes = { 4, UINT_MAX, }, 396 .out_sizes = { 4, 4, UINT_MAX, },
397 }, 397 },
398}; 398};
399 399
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
442 return in_field[1]; 442 return in_field[1];
443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) 443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
444 return out_field[1]; 444 return out_field[1];
445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1) 445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
446 return ND_CMD_ARS_STATUS_MAX; 446 return out_field[1] - 8;
447 447
448 return UINT_MAX; 448 return UINT_MAX;
449} 449}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
41 phys_addr_t phys_addr; 41 phys_addr_t phys_addr;
42 /* when non-zero this device is hosting a 'pfn' instance */ 42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset; 43 phys_addr_t data_offset;
44 unsigned long pfn_flags; 44 u64 pfn_flags;
45 void __pmem *virt_addr; 45 void __pmem *virt_addr;
46 size_t size; 46 size_t size;
47 struct badblocks bb; 47 struct badblocks bb;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d6237391dcd..b586d84f2518 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI
17 and block devices nodes, as well a a translation for a small 17 and block devices nodes, as well a a translation for a small
18 number of selected SCSI commands to NVMe commands to the NVMe 18 number of selected SCSI commands to NVMe commands to the NVMe
19 driver. If you don't know what this means you probably want 19 driver. If you don't know what this means you probably want
20 to say N here, and if you know what it means you probably 20 to say N here, unless you run a distro that abuses the SCSI
21 want to say N as well. 21 emulation to provide stable device names for mount by id, like
22 some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001af559..3cd921e6121e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1121,7 +1121,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1121 ns->queue = blk_mq_init_queue(ctrl->tagset); 1121 ns->queue = blk_mq_init_queue(ctrl->tagset);
1122 if (IS_ERR(ns->queue)) 1122 if (IS_ERR(ns->queue))
1123 goto out_free_ns; 1123 goto out_free_ns;
1124 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1125 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1124 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1126 ns->queue->queuedata = ns; 1125 ns->queue->queuedata = ns;
1127 ns->ctrl = ctrl; 1126 ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725e2fa4..6bb15e4926dc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149#define NVME_NVM_LP_MLC_PAIRS 886
149struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
150 __u16 num_pairs; 151 __u16 num_pairs;
151 __u8 pairs[886]; 152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
152}; 153};
153 154
154struct nvme_nvm_lp_tbl { 155struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
282 memcpy(dst->lptbl.id, src->lptbl.id, 8); 283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs = 284 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs); 285 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */ 286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1); 293 dst->lptbl.mlc.num_pairs);
288 } 294 }
289 } 295 }
290 296
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb737868..9664d07d807d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -139,9 +139,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
139 u32 val = 0; 139 u32 val = 0;
140 140
141 if (ctrl->ops->io_incapable(ctrl)) 141 if (ctrl->ops->io_incapable(ctrl))
142 return false; 142 return true;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false; 144 return true;
145 return val & NVME_CSTS_CFS; 145 return val & NVME_CSTS_CFS;
146} 146}
147 147
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef8322d32a..a128672472ec 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -678,6 +678,11 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
678 blk_mq_start_request(req); 678 blk_mq_start_request(req);
679 679
680 spin_lock_irq(&nvmeq->q_lock); 680 spin_lock_irq(&nvmeq->q_lock);
681 if (unlikely(nvmeq->cq_vector < 0)) {
682 ret = BLK_MQ_RQ_QUEUE_BUSY;
683 spin_unlock_irq(&nvmeq->q_lock);
684 goto out;
685 }
681 __nvme_submit_cmd(nvmeq, &cmnd); 686 __nvme_submit_cmd(nvmeq, &cmnd);
682 nvme_process_cq(nvmeq); 687 nvme_process_cq(nvmeq);
683 spin_unlock_irq(&nvmeq->q_lock); 688 spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1004,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
999 if (!blk_mq_request_started(req)) 1004 if (!blk_mq_request_started(req))
1000 return; 1005 return;
1001 1006
1002 dev_warn(nvmeq->q_dmadev, 1007 dev_dbg_ratelimited(nvmeq->q_dmadev,
1003 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); 1008 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1004 1009
1005 status = NVME_SC_ABORT_REQ; 1010 status = NVME_SC_ABORT_REQ;
@@ -2111,16 +2116,12 @@ static void nvme_remove(struct pci_dev *pdev)
2111{ 2116{
2112 struct nvme_dev *dev = pci_get_drvdata(pdev); 2117 struct nvme_dev *dev = pci_get_drvdata(pdev);
2113 2118
2114 spin_lock(&dev_list_lock);
2115 list_del_init(&dev->node);
2116 spin_unlock(&dev_list_lock);
2117
2118 pci_set_drvdata(pdev, NULL); 2119 pci_set_drvdata(pdev, NULL);
2119 flush_work(&dev->reset_work);
2120 flush_work(&dev->scan_work); 2120 flush_work(&dev->scan_work);
2121 nvme_remove_namespaces(&dev->ctrl); 2121 nvme_remove_namespaces(&dev->ctrl);
2122 nvme_uninit_ctrl(&dev->ctrl); 2122 nvme_uninit_ctrl(&dev->ctrl);
2123 nvme_dev_disable(dev, true); 2123 nvme_dev_disable(dev, true);
2124 flush_work(&dev->reset_work);
2124 nvme_dev_remove_admin(dev); 2125 nvme_dev_remove_admin(dev);
2125 nvme_free_queues(dev, 0); 2126 nvme_free_queues(dev, 0);
2126 nvme_release_cmb(dev); 2127 nvme_release_cmb(dev);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6fd4e5a5ef4a..9d11d9837312 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
70 if (pos >= nvmem->size) 70 if (pos >= nvmem->size)
71 return 0; 71 return 0;
72 72
73 if (count < nvmem->word_size)
74 return -EINVAL;
75
73 if (pos + count > nvmem->size) 76 if (pos + count > nvmem->size)
74 count = nvmem->size - pos; 77 count = nvmem->size - pos;
75 78
@@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
95 if (pos >= nvmem->size) 98 if (pos >= nvmem->size)
96 return 0; 99 return 0;
97 100
101 if (count < nvmem->word_size)
102 return -EINVAL;
103
98 if (pos + count > nvmem->size) 104 if (pos + count > nvmem->size)
99 count = nvmem->size - pos; 105 count = nvmem->size - pos;
100 106
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index afb67e7eeee4..3829e5fbf8c3 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = {
21 .reg_bits = 32, 21 .reg_bits = 32,
22 .val_bits = 8, 22 .val_bits = 8,
23 .reg_stride = 1, 23 .reg_stride = 1,
24 .val_format_endian = REGMAP_ENDIAN_LITTLE,
24}; 25};
25 26
26static struct nvmem_config econfig = { 27static struct nvmem_config econfig = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae305ae..e7bfc175b8e1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
635 msi_base = be32_to_cpup(msi_map + 2); 635 msi_base = be32_to_cpup(msi_map + 2);
636 rid_len = be32_to_cpup(msi_map + 3); 636 rid_len = be32_to_cpup(msi_map + 3);
637 637
638 if (rid_base & ~map_mask) {
639 dev_err(parent_dev,
640 "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
641 map_mask, rid_base);
642 return rid_out;
643 }
644
638 msi_controller_node = of_find_node_by_phandle(phandle); 645 msi_controller_node = of_find_node_by_phandle(phandle);
639 646
640 matched = (masked_rid >= rid_base && 647 matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
654 if (!matched) 661 if (!matched)
655 return rid_out; 662 return rid_out;
656 663
657 rid_out = masked_rid + msi_base; 664 rid_out = masked_rid - rid_base + msi_base;
658 dev_dbg(dev, 665 dev_dbg(dev,
659 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", 666 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
660 dev_name(parent_dev), map_mask, rid_base, msi_base, 667 dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5648317d355f..39c4be41ef83 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -154,6 +154,7 @@ static const struct of_device_id whitelist_phys[] = {
154 { .compatible = "marvell,88E1111", }, 154 { .compatible = "marvell,88E1111", },
155 { .compatible = "marvell,88e1116", }, 155 { .compatible = "marvell,88e1116", },
156 { .compatible = "marvell,88e1118", }, 156 { .compatible = "marvell,88e1118", },
157 { .compatible = "marvell,88e1145", },
157 { .compatible = "marvell,88e1149r", }, 158 { .compatible = "marvell,88e1149r", },
158 { .compatible = "marvell,88e1310", }, 159 { .compatible = "marvell,88e1310", },
159 { .compatible = "marvell,88E1510", }, 160 { .compatible = "marvell,88E1510", },
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
14config PCI_MVEBU 14config PCI_MVEBU
15 bool "Marvell EBU PCIe controller" 15 bool "Marvell EBU PCIe controller"
16 depends on ARCH_MVEBU || ARCH_DOVE 16 depends on ARCH_MVEBU || ARCH_DOVE
17 depends on ARM
17 depends on OF 18 depends on OF
18 19
19config PCIE_DW 20config PCIE_DW
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 5816bceddb65..a576aeeb22da 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -64,7 +64,6 @@
64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) 64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
65 65
66#define MAX_NUM_OB_WINDOWS 2 66#define MAX_NUM_OB_WINDOWS 2
67#define MAX_NUM_PAXC_PF 4
68 67
69#define IPROC_PCIE_REG_INVALID 0xffff 68#define IPROC_PCIE_REG_INVALID 0xffff
70 69
@@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
170 writel(val, pcie->base + offset + (window * 8)); 169 writel(val, pcie->base + offset + (window * 8));
171} 170}
172 171
173static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
174 unsigned int slot,
175 unsigned int fn)
176{
177 if (slot > 0)
178 return false;
179
180 /* PAXC can only support limited number of functions */
181 if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
182 return false;
183
184 return true;
185}
186
187/** 172/**
188 * Note access to the configuration registers are protected at the higher layer 173 * Note access to the configuration registers are protected at the higher layer
189 * by 'pci_lock' in drivers/pci/access.c 174 * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
199 u32 val; 184 u32 val;
200 u16 offset; 185 u16 offset;
201 186
202 if (!iproc_pcie_device_is_valid(pcie, slot, fn))
203 return NULL;
204
205 /* root complex access */ 187 /* root complex access */
206 if (busno == 0) { 188 if (busno == 0) {
189 if (slot > 0 || fn > 0)
190 return NULL;
191
207 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, 192 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
208 where & CFG_IND_ADDR_MASK); 193 where & CFG_IND_ADDR_MASK);
209 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); 194 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
213 return (pcie->base + offset); 198 return (pcie->base + offset);
214 } 199 }
215 200
201 /*
202 * PAXC is connected to an internally emulated EP within the SoC. It
203 * allows only one device.
204 */
205 if (pcie->type == IPROC_PCIE_PAXC)
206 if (slot > 0)
207 return NULL;
208
216 /* EP device access */ 209 /* EP device access */
217 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | 210 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
218 (slot << CFG_ADDR_DEV_NUM_SHIFT) | 211 (slot << CFG_ADDR_DEV_NUM_SHIFT) |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a0fb..48d21e0edd56 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
262 rpc->rpd = dev; 262 rpc->rpd = dev;
263 INIT_WORK(&rpc->dpc_handler, aer_isr); 263 INIT_WORK(&rpc->dpc_handler, aer_isr);
264 mutex_init(&rpc->rpc_mutex); 264 mutex_init(&rpc->rpc_mutex);
265 init_waitqueue_head(&rpc->wait_release);
266 265
267 /* Use PCIe bus function to store rpc into PCIe device */ 266 /* Use PCIe bus function to store rpc into PCIe device */
268 set_service_data(dev, rpc); 267 set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
285 if (rpc->isr) 284 if (rpc->isr)
286 free_irq(dev->irq, dev); 285 free_irq(dev->irq, dev);
287 286
288 wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); 287 flush_work(&rpc->dpc_handler);
289
290 aer_disable_rootport(rpc); 288 aer_disable_rootport(rpc);
291 kfree(rpc); 289 kfree(rpc);
292 set_service_data(dev, NULL); 290 set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c9456..945c939a86c5 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
72 * recovery on the same 72 * recovery on the same
73 * root port hierarchy 73 * root port hierarchy
74 */ 74 */
75 wait_queue_head_t wait_release;
76}; 75};
77 76
78struct aer_broadcast_data { 77struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 712392504ed9..521e39c1b66d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
811 while (get_e_source(rpc, &e_src)) 811 while (get_e_source(rpc, &e_src))
812 aer_isr_one_error(p_device, &e_src); 812 aer_isr_one_error(p_device, &e_src);
813 mutex_unlock(&rpc->rpc_mutex); 813 mutex_unlock(&rpc->rpc_mutex);
814
815 wake_up(&rpc->wait_release);
816} 814}
817 815
818/** 816/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
53}; 53};
54 54
55struct pcifront_sd { 55struct pcifront_sd {
56 int domain; 56 struct pci_sysdata sd;
57 struct pcifront_device *pdev; 57 struct pcifront_device *pdev;
58}; 58};
59 59
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
67 unsigned int domain, unsigned int bus, 67 unsigned int domain, unsigned int bus,
68 struct pcifront_device *pdev) 68 struct pcifront_device *pdev)
69{ 69{
70 sd->domain = domain; 70 /* Because we do not expose that information via XenBus. */
71 sd->sd.node = first_online_node;
72 sd->sd.domain = domain;
71 sd->pdev = pdev; 73 sd->pdev = pdev;
72} 74}
73 75
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
468 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", 470 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
469 domain, bus); 471 domain, bus);
470 472
471 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); 473 bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
472 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 474 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
473 if (!bus_entry || !sd) { 475 if (!bus_entry || !sd) {
474 err = -ENOMEM; 476 err = -ENOMEM;
475 goto err_out; 477 goto err_out;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e7e117d5dbbe..0124d17bd9fe 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -224,6 +224,7 @@ config PHY_MT65XX_USB3
224 224
225config PHY_HI6220_USB 225config PHY_HI6220_USB
226 tristate "hi6220 USB PHY support" 226 tristate "hi6220 USB PHY support"
227 depends on (ARCH_HISI && ARM64) || COMPILE_TEST
227 select GENERIC_PHY 228 select GENERIC_PHY
228 select MFD_SYSCON 229 select MFD_SYSCON
229 help 230 help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27db6ad3..e7e574dc667a 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
275 275
276int phy_power_on(struct phy *phy) 276int phy_power_on(struct phy *phy)
277{ 277{
278 int ret; 278 int ret = 0;
279 279
280 if (!phy) 280 if (!phy)
281 return 0; 281 goto out;
282 282
283 if (phy->pwr) { 283 if (phy->pwr) {
284 ret = regulator_enable(phy->pwr); 284 ret = regulator_enable(phy->pwr);
285 if (ret) 285 if (ret)
286 return ret; 286 goto out;
287 } 287 }
288 288
289 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
290 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
291 return ret; 291 goto err_pm_sync;
292
292 ret = 0; /* Override possible ret == -ENOTSUPP */ 293 ret = 0; /* Override possible ret == -ENOTSUPP */
293 294
294 mutex_lock(&phy->mutex); 295 mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
296 ret = phy->ops->power_on(phy); 297 ret = phy->ops->power_on(phy);
297 if (ret < 0) { 298 if (ret < 0) {
298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 299 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
299 goto out; 300 goto err_pwr_on;
300 } 301 }
301 } 302 }
302 ++phy->power_count; 303 ++phy->power_count;
303 mutex_unlock(&phy->mutex); 304 mutex_unlock(&phy->mutex);
304 return 0; 305 return 0;
305 306
306out: 307err_pwr_on:
307 mutex_unlock(&phy->mutex); 308 mutex_unlock(&phy->mutex);
308 phy_pm_runtime_put_sync(phy); 309 phy_pm_runtime_put_sync(phy);
310err_pm_sync:
309 if (phy->pwr) 311 if (phy->pwr)
310 regulator_disable(phy->pwr); 312 regulator_disable(phy->pwr);
311 313out:
312 return ret; 314 return ret;
313} 315}
314EXPORT_SYMBOL_GPL(phy_power_on); 316EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 4a3fc6e59f8e..840f3eae428b 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
715 pm_runtime_use_autosuspend(&pdev->dev); 715 pm_runtime_use_autosuspend(&pdev->dev);
716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); 716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
717 pm_runtime_enable(&pdev->dev); 717 pm_runtime_enable(&pdev->dev);
718 pm_runtime_get_sync(&pdev->dev);
718 719
719 /* Our job is to use irqs and status from the power module 720 /* Our job is to use irqs and status from the power module
720 * to keep the transceiver disabled when nothing's connected. 721 * to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
750 struct twl4030_usb *twl = platform_get_drvdata(pdev); 751 struct twl4030_usb *twl = platform_get_drvdata(pdev);
751 int val; 752 int val;
752 753
754 usb_remove_phy(&twl->phy);
753 pm_runtime_get_sync(twl->dev); 755 pm_runtime_get_sync(twl->dev);
754 cancel_delayed_work(&twl->id_workaround_work); 756 cancel_delayed_work(&twl->id_workaround_work);
755 device_remove_file(twl->dev, &dev_attr_vbus); 757 device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
757 /* set transceiver mode to power on defaults */ 759 /* set transceiver mode to power on defaults */
758 twl4030_usb_set_mode(twl, -1); 760 twl4030_usb_set_mode(twl, -1);
759 761
762 /* idle ulpi before powering off */
763 if (cable_present(twl->linkstat))
764 pm_runtime_put_noidle(twl->dev);
765 pm_runtime_mark_last_busy(twl->dev);
766 pm_runtime_put_sync_suspend(twl->dev);
767 pm_runtime_disable(twl->dev);
768
760 /* autogate 60MHz ULPI clock, 769 /* autogate 60MHz ULPI clock,
761 * clear dpll clock request for i2c access, 770 * clear dpll clock request for i2c access,
762 * disable 32KHz 771 * disable 32KHz
@@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
771 /* disable complete OTG block */ 780 /* disable complete OTG block */
772 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); 781 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
773 782
774 if (cable_present(twl->linkstat))
775 pm_runtime_put_noidle(twl->dev);
776 pm_runtime_mark_last_busy(twl->dev);
777 pm_runtime_put(twl->dev);
778
779 return 0; 783 return 0;
780} 784}
781 785
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4ed225..e96e86d2e745 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg); 347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
348 break; 348 break;
349 case PIN_CONFIG_INPUT_ENABLE: 349 case PIN_CONFIG_INPUT_ENABLE:
350 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
350 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 351 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
351 break; 352 break;
352 case PIN_CONFIG_OUTPUT: 353 case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
354 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false); 355 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
355 break; 356 break;
356 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 357 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
358 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
357 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 359 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
358 break; 360 break;
359 case PIN_CONFIG_DRIVE_STRENGTH: 361 case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d473811bb3..3ef798fac81b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; 666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
667 struct mvebu_pinctrl_group *grp; 667 struct mvebu_pinctrl_group *grp;
668 unsigned num_settings; 668 unsigned num_settings;
669 unsigned supp_settings;
669 670
670 for (num_settings = 0; ; set++) { 671 for (num_settings = 0, supp_settings = 0; ; set++) {
671 if (!set->name) 672 if (!set->name)
672 break; 673 break;
673 674
675 num_settings++;
676
674 /* skip unsupported settings for this variant */ 677 /* skip unsupported settings for this variant */
675 if (pctl->variant && !(pctl->variant & set->variant)) 678 if (pctl->variant && !(pctl->variant & set->variant))
676 continue; 679 continue;
677 680
678 num_settings++; 681 supp_settings++;
679 682
680 /* find gpio/gpo/gpi settings */ 683 /* find gpio/gpo/gpi settings */
681 if (strcmp(set->name, "gpio") == 0) 684 if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
688 } 691 }
689 692
690 /* skip modes with no settings for this variant */ 693 /* skip modes with no settings for this variant */
691 if (!num_settings) 694 if (!supp_settings)
692 continue; 695 continue;
693 696
694 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); 697 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e60106ec2..1f7469c9857d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@ static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); 191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
192} 192}
193 193
194#ifdef CONFIG_DEBUG_FS
194static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset, 195static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
195 enum abx500_gpio_pull_updown *pull_updown) 196 enum abx500_gpio_pull_updown *pull_updown)
196{ 197{
@@ -226,6 +227,7 @@ out:
226 227
227 return ret; 228 return ret;
228} 229}
230#endif
229 231
230static int abx500_set_pull_updown(struct abx500_pinctrl *pct, 232static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
231 int offset, enum abx500_gpio_pull_updown val) 233 int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@ out:
468 return ret; 470 return ret;
469} 471}
470 472
473#ifdef CONFIG_DEBUG_FS
471static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, 474static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
472 unsigned gpio) 475 unsigned gpio)
473{ 476{
@@ -553,8 +556,6 @@ out:
553 return ret; 556 return ret;
554} 557}
555 558
556#ifdef CONFIG_DEBUG_FS
557
558#include <linux/seq_file.h> 559#include <linux/seq_file.h>
559 560
560static void abx500_gpio_dbg_show_one(struct seq_file *s, 561static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205cf809..216f227c6009 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
426 426
427 return 0; 427 return 0;
428} 428}
429EXPORT_SYMBOL(pxa2xx_pinctrl_init);
429 430
430int pxa2xx_pinctrl_exit(struct platform_device *pdev) 431int pxa2xx_pinctrl_exit(struct platform_device *pdev)
431{ 432{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e958589..5cc97f85db02 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@ static const struct pinconf_ops samsung_pinconf_ops = {
514 .pin_config_group_set = samsung_pinconf_group_set, 514 .pin_config_group_set = samsung_pinconf_group_set,
515}; 515};
516 516
517/* gpiolib gpio_set callback function */ 517/*
518static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 518 * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
519 * to avoid race condition.
520 */
521static void samsung_gpio_set_value(struct gpio_chip *gc,
522 unsigned offset, int value)
519{ 523{
520 struct samsung_pin_bank *bank = gpiochip_get_data(gc); 524 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
521 const struct samsung_pin_bank_type *type = bank->type; 525 const struct samsung_pin_bank_type *type = bank->type;
522 unsigned long flags;
523 void __iomem *reg; 526 void __iomem *reg;
524 u32 data; 527 u32 data;
525 528
526 reg = bank->drvdata->virt_base + bank->pctl_offset; 529 reg = bank->drvdata->virt_base + bank->pctl_offset;
527 530
528 spin_lock_irqsave(&bank->slock, flags);
529
530 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); 531 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
531 data &= ~(1 << offset); 532 data &= ~(1 << offset);
532 if (value) 533 if (value)
533 data |= 1 << offset; 534 data |= 1 << offset;
534 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]); 535 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
536}
537
538/* gpiolib gpio_set callback function */
539static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
540{
541 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
542 unsigned long flags;
535 543
544 spin_lock_irqsave(&bank->slock, flags);
545 samsung_gpio_set_value(gc, offset, value);
536 spin_unlock_irqrestore(&bank->slock, flags); 546 spin_unlock_irqrestore(&bank->slock, flags);
537} 547}
538 548
@@ -553,6 +563,8 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
553} 563}
554 564
555/* 565/*
566 * The samsung_gpio_set_direction() should be called with "bank->slock" held
567 * to avoid race condition.
556 * The calls to gpio_direction_output() and gpio_direction_input() 568 * The calls to gpio_direction_output() and gpio_direction_input()
557 * leads to this function call. 569 * leads to this function call.
558 */ 570 */
@@ -564,7 +576,6 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
564 struct samsung_pinctrl_drv_data *drvdata; 576 struct samsung_pinctrl_drv_data *drvdata;
565 void __iomem *reg; 577 void __iomem *reg;
566 u32 data, mask, shift; 578 u32 data, mask, shift;
567 unsigned long flags;
568 579
569 bank = gpiochip_get_data(gc); 580 bank = gpiochip_get_data(gc);
570 type = bank->type; 581 type = bank->type;
@@ -581,31 +592,42 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
581 reg += 4; 592 reg += 4;
582 } 593 }
583 594
584 spin_lock_irqsave(&bank->slock, flags);
585
586 data = readl(reg); 595 data = readl(reg);
587 data &= ~(mask << shift); 596 data &= ~(mask << shift);
588 if (!input) 597 if (!input)
589 data |= FUNC_OUTPUT << shift; 598 data |= FUNC_OUTPUT << shift;
590 writel(data, reg); 599 writel(data, reg);
591 600
592 spin_unlock_irqrestore(&bank->slock, flags);
593
594 return 0; 601 return 0;
595} 602}
596 603
597/* gpiolib gpio_direction_input callback function. */ 604/* gpiolib gpio_direction_input callback function. */
598static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 605static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
599{ 606{
600 return samsung_gpio_set_direction(gc, offset, true); 607 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&bank->slock, flags);
612 ret = samsung_gpio_set_direction(gc, offset, true);
613 spin_unlock_irqrestore(&bank->slock, flags);
614 return ret;
601} 615}
602 616
603/* gpiolib gpio_direction_output callback function. */ 617/* gpiolib gpio_direction_output callback function. */
604static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, 618static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
605 int value) 619 int value)
606{ 620{
607 samsung_gpio_set(gc, offset, value); 621 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 return samsung_gpio_set_direction(gc, offset, false); 622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&bank->slock, flags);
626 samsung_gpio_set_value(gc, offset, value);
627 ret = samsung_gpio_set_direction(gc, offset, false);
628 spin_unlock_irqrestore(&bank->slock, flags);
629
630 return ret;
609} 631}
610 632
611/* 633/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf047cee..11760bbe9d51 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
492 .pins = sun8i_h3_pins, 492 .pins = sun8i_h3_pins,
493 .npins = ARRAY_SIZE(sun8i_h3_pins), 493 .npins = ARRAY_SIZE(sun8i_h3_pins),
494 .irq_banks = 2, 494 .irq_banks = 2,
495 .irq_read_needs_mux = true
495}; 496};
496 497
497static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) 498static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 20f0ad9bb9f3..e20f23e04c24 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = {
41 { KE_KEY, 4, { KEY_HOME } }, 41 { KE_KEY, 4, { KEY_HOME } },
42 { KE_KEY, 5, { KEY_END } }, 42 { KE_KEY, 5, { KEY_END } },
43 { KE_KEY, 6, { KEY_PAGEUP } }, 43 { KE_KEY, 6, { KEY_PAGEUP } },
44 { KE_KEY, 4, { KEY_PAGEDOWN } }, 44 { KE_KEY, 7, { KEY_PAGEDOWN } },
45 { KE_KEY, 4, { KEY_HOME } },
46 { KE_KEY, 8, { KEY_RFKILL } }, 45 { KE_KEY, 8, { KEY_RFKILL } },
47 { KE_KEY, 9, { KEY_POWER } }, 46 { KE_KEY, 9, { KEY_POWER } },
48 { KE_KEY, 11, { KEY_SLEEP } }, 47 { KE_KEY, 11, { KEY_SLEEP } },
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6343c3..aa454241489c 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@ struct scu_ipc_data {
49 49
50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) 50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
51{ 51{
52 int count = data->count; 52 unsigned int count = data->count;
53 53
54 if (count == 0 || count == 3 || count > 4) 54 if (count == 0 || count == 3 || count > 4)
55 return -EINVAL; 55 return -EINVAL;
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
21 21
22#include <linux/power/bq27xxx_battery.h> 22#include <linux/power/bq27xxx_battery.h>
23 23
24static DEFINE_IDR(battery_id);
25static DEFINE_MUTEX(battery_mutex);
26
24static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data) 27static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
25{ 28{
26 struct bq27xxx_device_info *di = data; 29 struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
70{ 73{
71 struct bq27xxx_device_info *di; 74 struct bq27xxx_device_info *di;
72 int ret; 75 int ret;
76 char *name;
77 int num;
78
79 /* Get new ID for the new battery device */
80 mutex_lock(&battery_mutex);
81 num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
82 mutex_unlock(&battery_mutex);
83 if (num < 0)
84 return num;
85
86 name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
87 if (!name)
88 goto err_mem;
73 89
74 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL); 90 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
75 if (!di) 91 if (!di)
76 return -ENOMEM; 92 goto err_mem;
77 93
94 di->id = num;
78 di->dev = &client->dev; 95 di->dev = &client->dev;
79 di->chip = id->driver_data; 96 di->chip = id->driver_data;
80 di->name = id->name; 97 di->name = name;
81 di->bus.read = bq27xxx_battery_i2c_read; 98 di->bus.read = bq27xxx_battery_i2c_read;
82 99
83 ret = bq27xxx_battery_setup(di); 100 ret = bq27xxx_battery_setup(di);
84 if (ret) 101 if (ret)
85 return ret; 102 goto err_failed;
86 103
87 /* Schedule a polling after about 1 min */ 104 /* Schedule a polling after about 1 min */
88 schedule_delayed_work(&di->work, 60 * HZ); 105 schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
103 } 120 }
104 121
105 return 0; 122 return 0;
123
124err_mem:
125 ret = -ENOMEM;
126
127err_failed:
128 mutex_lock(&battery_mutex);
129 idr_remove(&battery_id, num);
130 mutex_unlock(&battery_mutex);
131
132 return ret;
106} 133}
107 134
108static int bq27xxx_battery_i2c_remove(struct i2c_client *client) 135static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
111 138
112 bq27xxx_battery_teardown(di); 139 bq27xxx_battery_teardown(di);
113 140
141 mutex_lock(&battery_mutex);
142 idr_remove(&battery_id, di->id);
143 mutex_unlock(&battery_mutex);
144
114 return 0; 145 return 0;
115} 146}
116 147
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605dac8309..c78db05e75b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3035 max = block->base->discipline->max_blocks << block->s2b_shift; 3035 max = block->base->discipline->max_blocks << block->s2b_shift;
3036 } 3036 }
3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3038 block->request_queue->limits.max_dev_sectors = max;
3038 blk_queue_logical_block_size(block->request_queue, 3039 blk_queue_logical_block_size(block->request_queue,
3039 block->bp_block); 3040 block->bp_block);
3040 blk_queue_max_hw_sectors(block->request_queue, max); 3041 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
723 ASCEBC((char *) &cqr->magic, 4); 731 ASCEBC((char *) &cqr->magic, 4);
724 ccw = cqr->cpaddr; 732 ccw = cqr->cpaddr;
725 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 733 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
726 ccw->flags = 0 ; 734 ccw->flags = CCW_FLAG_SLI;
727 ccw->count = 16; 735 ccw->count = 16;
728 ccw->cda = (__u32)(addr_t) cqr->data; 736 ccw->cda = (__u32)(addr_t) cqr->data;
729 ((char *)cqr->data)[0] = reason; 737 ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
930 /* 3. read new alias configuration */ 938 /* 3. read new alias configuration */
931 _schedule_lcu_update(lcu, device); 939 _schedule_lcu_update(lcu, device);
932 lcu->suc_data.device = NULL; 940 lcu->suc_data.device = NULL;
941 dasd_put_device(device);
933 spin_unlock_irqrestore(&lcu->lock, flags); 942 spin_unlock_irqrestore(&lcu->lock, flags);
934} 943}
935 944
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
989 } 998 }
990 lcu->suc_data.reason = reason; 999 lcu->suc_data.reason = reason;
991 lcu->suc_data.device = device; 1000 lcu->suc_data.device = device;
1001 dasd_get_device(device);
992 spin_unlock(&lcu->lock); 1002 spin_unlock(&lcu->lock);
993 schedule_work(&lcu->suc_data.worker); 1003 if (!schedule_work(&lcu->suc_data.worker))
1004 dasd_put_device(device);
994}; 1005};
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 361358134315..93880ed6291c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
562 /* 562 /*
563 * Command Lock contention 563 * Command Lock contention
564 */ 564 */
565 err = SCSI_DH_RETRY; 565 err = SCSI_DH_IMM_RETRY;
566 break; 566 break;
567 default: 567 default:
568 break; 568 break;
@@ -612,6 +612,8 @@ retry:
612 err = mode_select_handle_sense(sdev, h->sense); 612 err = mode_select_handle_sense(sdev, h->sense);
613 if (err == SCSI_DH_RETRY && retry_cnt--) 613 if (err == SCSI_DH_RETRY && retry_cnt--)
614 goto retry; 614 goto retry;
615 if (err == SCSI_DH_IMM_RETRY)
616 goto retry;
615 } 617 }
616 if (err == SCSI_DH_OK) { 618 if (err == SCSI_DH_OK) {
617 h->state = RDAC_STATE_ACTIVE; 619 h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index b67661836c9f..d1dd1616f983 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,6 +1,6 @@
1config SCSI_HISI_SAS 1config SCSI_HISI_SAS
2 tristate "HiSilicon SAS" 2 tristate "HiSilicon SAS"
3 depends on HAS_DMA 3 depends on HAS_DMA && HAS_IOMEM
4 depends on ARM64 || COMPILE_TEST 4 depends on ARM64 || COMPILE_TEST
5 select SCSI_SAS_LIBSAS 5 select SCSI_SAS_LIBSAS
6 select BLK_DEV_INTEGRITY 6 select BLK_DEV_INTEGRITY
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 057fdeb720ac..eea24d7531cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
1289 goto out; 1289 goto out;
1290 } 1290 }
1291 1291
1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) { 1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
1293 if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) || 1293 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
1294 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
1295 ts->stat = SAS_DATA_OVERRUN;
1296 else
1297 slot_err_v1_hw(hisi_hba, task, slot);
1298 1294
1295 slot_err_v1_hw(hisi_hba, task, slot);
1299 goto out; 1296 goto out;
1300 } 1297 }
1301 1298
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a87657c7dd..692a7570b5e1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2204 /* Clear outstanding commands array. */ 2204 /* Clear outstanding commands array. */
2205 for (que = 0; que < ha->max_req_queues; que++) { 2205 for (que = 0; que < ha->max_req_queues; que++) {
2206 req = ha->req_q_map[que]; 2206 req = ha->req_q_map[que];
2207 if (!req) 2207 if (!req || !test_bit(que, ha->req_qid_map))
2208 continue; 2208 continue;
2209 req->out_ptr = (void *)(req->ring + req->length); 2209 req->out_ptr = (void *)(req->ring + req->length);
2210 *req->out_ptr = 0; 2210 *req->out_ptr = 0;
@@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2221 2221
2222 for (que = 0; que < ha->max_rsp_queues; que++) { 2222 for (que = 0; que < ha->max_rsp_queues; que++) {
2223 rsp = ha->rsp_q_map[que]; 2223 rsp = ha->rsp_q_map[que];
2224 if (!rsp) 2224 if (!rsp || !test_bit(que, ha->rsp_qid_map))
2225 continue; 2225 continue;
2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2227 *rsp->in_ptr = 0; 2227 *rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4981 4981
4982 for (i = 1; i < ha->max_rsp_queues; i++) { 4982 for (i = 1; i < ha->max_rsp_queues; i++) {
4983 rsp = ha->rsp_q_map[i]; 4983 rsp = ha->rsp_q_map[i];
4984 if (rsp) { 4984 if (rsp && test_bit(i, ha->rsp_qid_map)) {
4985 rsp->options &= ~BIT_0; 4985 rsp->options &= ~BIT_0;
4986 ret = qla25xx_init_rsp_que(base_vha, rsp); 4986 ret = qla25xx_init_rsp_que(base_vha, rsp);
4987 if (ret != QLA_SUCCESS) 4987 if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4996 } 4996 }
4997 for (i = 1; i < ha->max_req_queues; i++) { 4997 for (i = 1; i < ha->max_req_queues; i++) {
4998 req = ha->req_q_map[i]; 4998 req = ha->req_q_map[i];
4999 if (req) { 4999 if (req && test_bit(i, ha->req_qid_map)) {
5000 /* Clear outstanding commands array. */ 5000 /* Clear outstanding commands array. */
5001 req->options &= ~BIT_0; 5001 req->options &= ~BIT_0;
5002 ret = qla25xx_init_req_que(base_vha, req); 5002 ret = qla25xx_init_req_que(base_vha, req);
5003 if (ret != QLA_SUCCESS) 5003 if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb0e9b4..4af95479a9db 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3063 "MSI-X: Failed to enable support " 3063 "MSI-X: Failed to enable support "
3064 "-- %d/%d\n Retry with %d vectors.\n", 3064 "-- %d/%d\n Retry with %d vectors.\n",
3065 ha->msix_count, ret, ret); 3065 ha->msix_count, ret, ret);
3066 ha->msix_count = ret;
3067 ha->max_rsp_queues = ha->msix_count - 1;
3066 } 3068 }
3067 ha->msix_count = ret;
3068 ha->max_rsp_queues = ha->msix_count - 1;
3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3070 ha->msix_count, GFP_KERNEL); 3070 ha->msix_count, GFP_KERNEL);
3071 if (!ha->msix_entries) { 3071 if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594f6c31..cf7ba52bae66 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
600 /* Delete request queues */ 600 /* Delete request queues */
601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
602 req = ha->req_q_map[cnt]; 602 req = ha->req_q_map[cnt];
603 if (req) { 603 if (req && test_bit(cnt, ha->req_qid_map)) {
604 ret = qla25xx_delete_req_que(vha, req); 604 ret = qla25xx_delete_req_que(vha, req);
605 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
606 ql_log(ql_log_warn, vha, 0x00ea, 606 ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
614 /* Delete response queues */ 614 /* Delete response queues */
615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
616 rsp = ha->rsp_q_map[cnt]; 616 rsp = ha->rsp_q_map[cnt];
617 if (rsp) { 617 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
618 ret = qla25xx_delete_rsp_que(vha, rsp); 618 ret = qla25xx_delete_rsp_que(vha, rsp);
619 if (ret != QLA_SUCCESS) { 619 if (ret != QLA_SUCCESS) {
620 ql_log(ql_log_warn, vha, 0x00eb, 620 ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db43195..f6c7ce35b542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
409 int cnt; 409 int cnt;
410 410
411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
412 if (!test_bit(cnt, ha->req_qid_map))
413 continue;
414
412 req = ha->req_q_map[cnt]; 415 req = ha->req_q_map[cnt];
413 qla2x00_free_req_que(ha, req); 416 qla2x00_free_req_que(ha, req);
414 } 417 }
@@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
416 ha->req_q_map = NULL; 419 ha->req_q_map = NULL;
417 420
418 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 421 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
422 if (!test_bit(cnt, ha->rsp_qid_map))
423 continue;
424
419 rsp = ha->rsp_q_map[cnt]; 425 rsp = ha->rsp_q_map[cnt];
420 qla2x00_free_rsp_que(ha, rsp); 426 qla2x00_free_rsp_que(ha, rsp);
421 } 427 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4cdb45c..ee967becd257 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags); 106 int fn, void *iocb, int flags);
107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked); 108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock); 110 struct qla_tgt_srr_imm *imm, int ha_lock);
111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, 111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1757 0, 0, 0, 0, 0, 0); 1757 0, 0, 0, 0, 0, 0);
1758 else { 1758 else {
1759 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1759 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1761 mcmd->fc_tm_rsp, false); 1761 mcmd->fc_tm_rsp, false);
1762 else 1762 else
@@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2665 /* no need to terminate. FW already freed exchange. */ 2665 /* no need to terminate. FW already freed exchange. */
2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2667 else 2667 else
2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
2669 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2670 return 0; 2670 return 0;
2671 } 2671 }
@@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3173} 3173}
3174 3174
3175static void qlt_send_term_exchange(struct scsi_qla_host *vha, 3175static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3177 int ul_abort)
3177{ 3178{
3178 unsigned long flags = 0; 3179 unsigned long flags = 0;
3179 int rc; 3180 int rc;
@@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3194 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3194 3195
3195done: 3196done:
3196 if (cmd && (!cmd->aborted || 3197 if (cmd && !ul_abort && !cmd->aborted) {
3197 !cmd->cmd_sent_to_fw)) {
3198 if (cmd->sg_mapped) 3198 if (cmd->sg_mapped)
3199 qlt_unmap_sg(vha, cmd); 3199 qlt_unmap_sg(vha, cmd);
3200 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3200 vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3253 3253
3254} 3254}
3255 3255
3256void qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3256int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3257{ 3257{
3258 struct qla_tgt *tgt = cmd->tgt; 3258 struct qla_tgt *tgt = cmd->tgt;
3259 struct scsi_qla_host *vha = tgt->vha; 3259 struct scsi_qla_host *vha = tgt->vha;
3260 struct se_cmd *se_cmd = &cmd->se_cmd; 3260 struct se_cmd *se_cmd = &cmd->se_cmd;
3261 unsigned long flags;
3261 3262
3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3263 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3263 "qla_target(%d): terminating exchange for aborted cmd=%p " 3264 "qla_target(%d): terminating exchange for aborted cmd=%p "
3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3265 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3265 se_cmd->tag); 3266 se_cmd->tag);
3266 3267
3268 spin_lock_irqsave(&cmd->cmd_lock, flags);
3269 if (cmd->aborted) {
3270 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3271 /*
3272 * It's normal to see 2 calls in this path:
3273 * 1) XFER Rdy completion + CMD_T_ABORT
3274 * 2) TCM TMR - drain_state_list
3275 */
3276 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
3277 "multiple abort. %p transport_state %x, t_state %x,"
3278 " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
3279 cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
3280 return EIO;
3281 }
3267 cmd->aborted = 1; 3282 cmd->aborted = 1;
3268 cmd->cmd_flags |= BIT_6; 3283 cmd->cmd_flags |= BIT_6;
3284 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3269 3285
3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 3286 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
3287 return 0;
3271} 3288}
3272EXPORT_SYMBOL(qlt_abort_cmd); 3289EXPORT_SYMBOL(qlt_abort_cmd);
3273 3290
@@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3282 3299
3283 BUG_ON(cmd->cmd_in_wq); 3300 BUG_ON(cmd->cmd_in_wq);
3284 3301
3302 if (cmd->sg_mapped)
3303 qlt_unmap_sg(cmd->vha, cmd);
3304
3285 if (!cmd->q_full) 3305 if (!cmd->q_full)
3286 qlt_decr_num_pend_cmds(cmd->vha); 3306 qlt_decr_num_pend_cmds(cmd->vha);
3287 3307
@@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3399 term = 1; 3419 term = 1;
3400 3420
3401 if (term) 3421 if (term)
3402 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3422 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3403 3423
3404 return term; 3424 return term;
3405} 3425}
@@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3580 case CTIO_PORT_LOGGED_OUT: 3600 case CTIO_PORT_LOGGED_OUT:
3581 case CTIO_PORT_UNAVAILABLE: 3601 case CTIO_PORT_UNAVAILABLE:
3582 { 3602 {
3583 int logged_out = (status & 0xFFFF); 3603 int logged_out =
3604 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3605
3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3606 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3585 "qla_target(%d): CTIO with %s status %x " 3607 "qla_target(%d): CTIO with %s status %x "
3586 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3608 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3587 (logged_out == CTIO_PORT_LOGGED_OUT) ? 3609 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3589 status, cmd->state, se_cmd); 3610 status, cmd->state, se_cmd);
3590 3611
3591 if (logged_out && cmd->sess) { 3612 if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3754 goto out_term; 3775 goto out_term;
3755 } 3776 }
3756 3777
3778 spin_lock_init(&cmd->cmd_lock);
3757 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3779 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3758 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3780 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3759 cmd->unpacked_lun = scsilun_to_int( 3781 cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@ out_term:
3796 */ 3818 */
3797 cmd->cmd_flags |= BIT_2; 3819 cmd->cmd_flags |= BIT_2;
3798 spin_lock_irqsave(&ha->hardware_lock, flags); 3820 spin_lock_irqsave(&ha->hardware_lock, flags);
3799 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3821 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
3800 3822
3801 qlt_decr_num_pend_cmds(vha); 3823 qlt_decr_num_pend_cmds(vha);
3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3824 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3918 3940
3919out_term: 3941out_term:
3920 spin_lock_irqsave(&ha->hardware_lock, flags); 3942 spin_lock_irqsave(&ha->hardware_lock, flags);
3921 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3943 qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
3922 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3944 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3923 kfree(op); 3945 kfree(op);
3924 3946
@@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3982 4004
3983 cmd->cmd_in_wq = 1; 4005 cmd->cmd_in_wq = 1;
3984 cmd->cmd_flags |= BIT_0; 4006 cmd->cmd_flags |= BIT_0;
3985 cmd->se_cmd.cpuid = -1; 4007 cmd->se_cmd.cpuid = ha->msix_count ?
4008 ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
3986 4009
3987 spin_lock(&vha->cmd_list_lock); 4010 spin_lock(&vha->cmd_list_lock);
3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4011 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3990 4013
3991 INIT_WORK(&cmd->work, qlt_do_work); 4014 INIT_WORK(&cmd->work, qlt_do_work);
3992 if (ha->msix_count) { 4015 if (ha->msix_count) {
3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4016 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
3995 queue_work_on(smp_processor_id(), qla_tgt_wq, 4017 queue_work_on(smp_processor_id(), qla_tgt_wq,
3996 &cmd->work); 4018 &cmd->work);
@@ -4771,7 +4793,7 @@ out_reject:
4771 dump_stack(); 4793 dump_stack();
4772 } else { 4794 } else {
4773 cmd->cmd_flags |= BIT_9; 4795 cmd->cmd_flags |= BIT_9;
4774 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 4796 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
4775 } 4797 }
4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4777} 4799}
@@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4950 sctio, sctio->srr_id); 4972 sctio, sctio->srr_id);
4951 list_del(&sctio->srr_list_entry); 4973 list_del(&sctio->srr_list_entry);
4952 qlt_send_term_exchange(vha, sctio->cmd, 4974 qlt_send_term_exchange(vha, sctio->cmd,
4953 &sctio->cmd->atio, 1); 4975 &sctio->cmd->atio, 1, 0);
4954 kfree(sctio); 4976 kfree(sctio);
4955 } 4977 }
4956 } 4978 }
@@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
5123 atio->u.isp24.fcp_hdr.s_id); 5145 atio->u.isp24.fcp_hdr.s_id);
5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5146 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5125 if (!sess) { 5147 if (!sess) {
5126 qlt_send_term_exchange(vha, NULL, atio, 1); 5148 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5127 return 0; 5149 return 0;
5128 } 5150 }
5129 /* Sending marker isn't necessary, since we called from ISR */ 5151 /* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5406#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5428#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5429 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5408#else 5430#else
5409 qlt_send_term_exchange(vha, NULL, atio, 1); 5431 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5410#endif 5432#endif
5411 5433
5412 if (!ha_locked) 5434 if (!ha_locked)
@@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5523#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5545#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5524 qlt_send_busy(vha, atio, 0); 5546 qlt_send_busy(vha, atio, 0);
5525#else 5547#else
5526 qlt_send_term_exchange(vha, NULL, atio, 1); 5548 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5527#endif 5549#endif
5528 } else { 5550 } else {
5529 if (tgt->tgt_stop) { 5551 if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5532 "command to target, sending TERM " 5554 "command to target, sending TERM "
5533 "EXCHANGE for rsp\n"); 5555 "EXCHANGE for rsp\n");
5534 qlt_send_term_exchange(vha, NULL, 5556 qlt_send_term_exchange(vha, NULL,
5535 atio, 1); 5557 atio, 1, 0);
5536 } else { 5558 } else {
5537 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5559 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5538 "qla_target(%d): Unable to send " 5560 "qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5960 return; 5982 return;
5961 5983
5962out_term: 5984out_term:
5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); 5985 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5964 if (sess) 5986 if (sess)
5965 ha->tgt.tgt_ops->put_sess(sess); 5987 ha->tgt.tgt_ops->put_sess(sess);
5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5988 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865ba3c8..22a6a767fe07 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@ struct qla_tgt_sess {
943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; 943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
944}; 944};
945 945
946typedef enum {
947 /*
948 * BIT_0 - Atio Arrival / schedule to work
949 * BIT_1 - qlt_do_work
950 * BIT_2 - qlt_do work failed
951 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
952 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
953 * BIT_5 - status respond / tcm_qla2xx_queue_status
954 * BIT_6 - tcm request to abort/Term exchange.
955 * pre_xmit_response->qlt_send_term_exchange
956 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
957 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
958 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
959 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
960
961 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
962 * BIT_13 - Bad completion -
963 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
964 * BIT_14 - Back end data received/sent.
965 * BIT_15 - SRR prepare ctio
966 * BIT_16 - complete free
967 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
968 * BIT_18 - completion w/abort status
969 * BIT_19 - completion w/unknown status
970 * BIT_20 - tcm_qla2xxx_free_cmd
971 */
972 CMD_FLAG_DATA_WORK = BIT_11,
973 CMD_FLAG_DATA_WORK_FREE = BIT_21,
974} cmd_flags_t;
975
946struct qla_tgt_cmd { 976struct qla_tgt_cmd {
947 struct se_cmd se_cmd; 977 struct se_cmd se_cmd;
948 struct qla_tgt_sess *sess; 978 struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@ struct qla_tgt_cmd {
952 /* Sense buffer that will be mapped into outgoing status */ 982 /* Sense buffer that will be mapped into outgoing status */
953 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 983 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
954 984
985 spinlock_t cmd_lock;
955 /* to save extra sess dereferences */ 986 /* to save extra sess dereferences */
956 unsigned int conf_compl_supported:1; 987 unsigned int conf_compl_supported:1;
957 unsigned int sg_mapped:1; 988 unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@ struct qla_tgt_cmd {
986 1017
987 uint64_t jiffies_at_alloc; 1018 uint64_t jiffies_at_alloc;
988 uint64_t jiffies_at_free; 1019 uint64_t jiffies_at_free;
989 /* BIT_0 - Atio Arrival / schedule to work 1020
990 * BIT_1 - qlt_do_work 1021 cmd_flags_t cmd_flags;
991 * BIT_2 - qlt_do work failed
992 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
993 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
994 * BIT_5 - status respond / tcm_qla2xx_queue_status
995 * BIT_6 - tcm request to abort/Term exchange.
996 * pre_xmit_response->qlt_send_term_exchange
997 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
998 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
999 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
1000 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
1001 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
1002 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
1003 * BIT_13 - Bad completion -
1004 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
1005 * BIT_14 - Back end data received/sent.
1006 * BIT_15 - SRR prepare ctio
1007 * BIT_16 - complete free
1008 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
1009 * BIT_18 - completion w/abort status
1010 * BIT_19 - completion w/unknown status
1011 */
1012 uint32_t cmd_flags;
1013}; 1022};
1014 1023
1015struct qla_tgt_sess_work_param { 1024struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
1148extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1157extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1149extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1158extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1150extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1159extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1151extern void qlt_abort_cmd(struct qla_tgt_cmd *); 1160extern int qlt_abort_cmd(struct qla_tgt_cmd *);
1152extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1161extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1153extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1162extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1154extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1163extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7ac14d..c3e622524604 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { 395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
396 for (i = 0; i < vha->hw->max_req_queues; i++) { 396 for (i = 0; i < vha->hw->max_req_queues; i++) {
397 struct req_que *req = vha->hw->req_q_map[i]; 397 struct req_que *req = vha->hw->req_q_map[i];
398
399 if (!test_bit(i, vha->hw->req_qid_map))
400 continue;
401
398 if (req || !buf) { 402 if (req || !buf) {
399 length = req ? 403 length = req ?
400 req->length : REQUEST_ENTRY_CNT_24XX; 404 req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
408 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 412 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
409 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 413 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
410 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 414 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
415
416 if (!test_bit(i, vha->hw->rsp_qid_map))
417 continue;
418
411 if (rsp || !buf) { 419 if (rsp || !buf) {
412 length = rsp ? 420 length = rsp ?
413 rsp->length : RESPONSE_ENTRY_CNT_MQ; 421 rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
634 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { 642 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
635 for (i = 0; i < vha->hw->max_req_queues; i++) { 643 for (i = 0; i < vha->hw->max_req_queues; i++) {
636 struct req_que *req = vha->hw->req_q_map[i]; 644 struct req_que *req = vha->hw->req_q_map[i];
645
646 if (!test_bit(i, vha->hw->req_qid_map))
647 continue;
648
637 if (req || !buf) { 649 if (req || !buf) {
638 qla27xx_insert16(i, buf, len); 650 qla27xx_insert16(i, buf, len);
639 qla27xx_insert16(1, buf, len); 651 qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
645 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 657 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
646 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 658 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
647 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 659 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
660
661 if (!test_bit(i, vha->hw->rsp_qid_map))
662 continue;
663
648 if (rsp || !buf) { 664 if (rsp || !buf) {
649 qla27xx_insert16(i, buf, len); 665 qla27xx_insert16(i, buf, len);
650 qla27xx_insert16(1, buf, len); 666 qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a126627f..1808a01cfb7e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
298{ 298{
299 cmd->vha->tgt_counters.core_qla_free_cmd++; 299 cmd->vha->tgt_counters.core_qla_free_cmd++;
300 cmd->cmd_in_wq = 1; 300 cmd->cmd_in_wq = 1;
301
302 BUG_ON(cmd->cmd_flags & BIT_20);
303 cmd->cmd_flags |= BIT_20;
304
301 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 305 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
302 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 306 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
303} 307}
@@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 378{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 379 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 380 struct qla_tgt_cmd, se_cmd);
381
382 if (cmd->aborted) {
383 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
384 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
385 * already kick start the free.
386 */
387 pr_debug("write_pending aborted cmd[%p] refcount %d "
388 "transport_state %x, t_state %x, se_cmd_flags %x\n",
389 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
390 cmd->se_cmd.transport_state,
391 cmd->se_cmd.t_state,
392 cmd->se_cmd.se_cmd_flags);
393 return 0;
394 }
377 cmd->cmd_flags |= BIT_3; 395 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 396 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 397 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 423 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 424 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 425 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3 * HZ); 426 50);
409 return 0; 427 return 0;
410 } 428 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 429 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
444 if (bidi) 462 if (bidi)
445 flags |= TARGET_SCF_BIDI_OP; 463 flags |= TARGET_SCF_BIDI_OP;
446 464
465 if (se_cmd->cpuid != WORK_CPU_UNBOUND)
466 flags |= TARGET_SCF_USE_CPUID;
467
447 sess = cmd->sess; 468 sess = cmd->sess;
448 if (!sess) { 469 if (!sess) {
449 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 470 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
465static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 486static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
466{ 487{
467 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 488 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
489 unsigned long flags;
468 490
469 /* 491 /*
470 * Ensure that the complete FCP WRITE payload has been received. 492 * Ensure that the complete FCP WRITE payload has been received.
471 * Otherwise return an exception via CHECK_CONDITION status. 493 * Otherwise return an exception via CHECK_CONDITION status.
472 */ 494 */
473 cmd->cmd_in_wq = 0; 495 cmd->cmd_in_wq = 0;
474 cmd->cmd_flags |= BIT_11; 496
497 spin_lock_irqsave(&cmd->cmd_lock, flags);
498 cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
499 if (cmd->aborted) {
500 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
501 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
502
503 tcm_qla2xxx_free_cmd(cmd);
504 return;
505 }
506 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
507
475 cmd->vha->tgt_counters.qla_core_ret_ctio++; 508 cmd->vha->tgt_counters.qla_core_ret_ctio++;
476 if (!cmd->write_data_transferred) { 509 if (!cmd->write_data_transferred) {
477 /* 510 /*
@@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
546 struct qla_tgt_cmd *cmd = container_of(se_cmd, 579 struct qla_tgt_cmd *cmd = container_of(se_cmd,
547 struct qla_tgt_cmd, se_cmd); 580 struct qla_tgt_cmd, se_cmd);
548 581
582 if (cmd->aborted) {
583 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
584 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
585 * already kick start the free.
586 */
587 pr_debug("queue_data_in aborted cmd[%p] refcount %d "
588 "transport_state %x, t_state %x, se_cmd_flags %x\n",
589 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
590 cmd->se_cmd.transport_state,
591 cmd->se_cmd.t_state,
592 cmd->se_cmd.se_cmd_flags);
593 return 0;
594 }
595
549 cmd->cmd_flags |= BIT_4; 596 cmd->cmd_flags |= BIT_4;
550 cmd->bufflen = se_cmd->data_length; 597 cmd->bufflen = se_cmd->data_length;
551 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 598 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
637 qlt_xmit_tm_rsp(mcmd); 684 qlt_xmit_tm_rsp(mcmd);
638} 685}
639 686
687
688#define DATA_WORK_NOT_FREE(_flags) \
689 (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
690 CMD_FLAG_DATA_WORK)
640static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 691static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
641{ 692{
642 struct qla_tgt_cmd *cmd = container_of(se_cmd, 693 struct qla_tgt_cmd *cmd = container_of(se_cmd,
643 struct qla_tgt_cmd, se_cmd); 694 struct qla_tgt_cmd, se_cmd);
644 qlt_abort_cmd(cmd); 695 unsigned long flags;
696
697 if (qlt_abort_cmd(cmd))
698 return;
699
700 spin_lock_irqsave(&cmd->cmd_lock, flags);
701 if ((cmd->state == QLA_TGT_STATE_NEW)||
702 ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
703 DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
704
705 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
706 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
707 /* Cmd have not reached firmware.
708 * Use this trigger to free it. */
709 tcm_qla2xxx_free_cmd(cmd);
710 return;
711 }
712 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
713 return;
714
645} 715}
646 716
647static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 717static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 47b9d13f97b8..bbfbfd9e5aa3 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,8 @@ static struct {
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, 205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
209 {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 210 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
209 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 211 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
210 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 212 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a851e2c7..00bc7218a7f8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1272void scsi_remove_target(struct device *dev) 1272void scsi_remove_target(struct device *dev)
1273{ 1273{
1274 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1274 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1275 struct scsi_target *starget; 1275 struct scsi_target *starget, *last_target = NULL;
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
1278restart: 1278restart:
1279 spin_lock_irqsave(shost->host_lock, flags); 1279 spin_lock_irqsave(shost->host_lock, flags);
1280 list_for_each_entry(starget, &shost->__targets, siblings) { 1280 list_for_each_entry(starget, &shost->__targets, siblings) {
1281 if (starget->state == STARGET_DEL) 1281 if (starget->state == STARGET_DEL ||
1282 starget == last_target)
1282 continue; 1283 continue;
1283 if (starget->dev.parent == dev || &starget->dev == dev) { 1284 if (starget->dev.parent == dev || &starget->dev == dev) {
1284 kref_get(&starget->reap_ref); 1285 kref_get(&starget->reap_ref);
1286 last_target = starget;
1285 spin_unlock_irqrestore(shost->host_lock, flags); 1287 spin_unlock_irqrestore(shost->host_lock, flags);
1286 __scsi_remove_target(starget); 1288 __scsi_remove_target(starget);
1287 scsi_target_reap(starget); 1289 scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bb669d32ccd0..d749da765df1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
761 break; 761 break;
762 762
763 default: 763 default:
764 ret = BLKPREP_KILL; 764 ret = BLKPREP_INVALID;
765 goto out; 765 goto out;
766 } 766 }
767 767
@@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
839 int ret; 839 int ret;
840 840
841 if (sdkp->device->no_write_same) 841 if (sdkp->device->no_write_same)
842 return BLKPREP_KILL; 842 return BLKPREP_INVALID;
843 843
844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
845 845
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 55627d097873..292c04eec9ad 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,6 +42,7 @@
42#include <scsi/scsi_devinfo.h> 42#include <scsi/scsi_devinfo.h>
43#include <scsi/scsi_dbg.h> 43#include <scsi/scsi_dbg.h>
44#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
45#include <scsi/scsi_transport.h>
45 46
46/* 47/*
47 * All wire protocol details (storage protocol between the guest and the host) 48 * All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@ struct hv_host_device {
477struct storvsc_scan_work { 478struct storvsc_scan_work {
478 struct work_struct work; 479 struct work_struct work;
479 struct Scsi_Host *host; 480 struct Scsi_Host *host;
480 uint lun; 481 u8 lun;
482 u8 tgt_id;
481}; 483};
482 484
483static void storvsc_device_scan(struct work_struct *work) 485static void storvsc_device_scan(struct work_struct *work)
484{ 486{
485 struct storvsc_scan_work *wrk; 487 struct storvsc_scan_work *wrk;
486 uint lun;
487 struct scsi_device *sdev; 488 struct scsi_device *sdev;
488 489
489 wrk = container_of(work, struct storvsc_scan_work, work); 490 wrk = container_of(work, struct storvsc_scan_work, work);
490 lun = wrk->lun;
491 491
492 sdev = scsi_device_lookup(wrk->host, 0, 0, lun); 492 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
493 if (!sdev) 493 if (!sdev)
494 goto done; 494 goto done;
495 scsi_rescan_device(&sdev->sdev_gendev); 495 scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work)
540 if (!scsi_host_get(wrk->host)) 540 if (!scsi_host_get(wrk->host))
541 goto done; 541 goto done;
542 542
543 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); 543 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
544 544
545 if (sdev) { 545 if (sdev) {
546 scsi_remove_device(sdev); 546 scsi_remove_device(sdev);
@@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
940 940
941 wrk->host = host; 941 wrk->host = host;
942 wrk->lun = vm_srb->lun; 942 wrk->lun = vm_srb->lun;
943 wrk->tgt_id = vm_srb->target_id;
943 INIT_WORK(&wrk->work, process_err_fn); 944 INIT_WORK(&wrk->work, process_err_fn);
944 schedule_work(&wrk->work); 945 schedule_work(&wrk->work);
945} 946}
@@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void)
1770 fc_transport_template = fc_attach_transport(&fc_transport_functions); 1771 fc_transport_template = fc_attach_transport(&fc_transport_functions);
1771 if (!fc_transport_template) 1772 if (!fc_transport_template)
1772 return -ENODEV; 1773 return -ENODEV;
1774
1775 /*
1776 * Install Hyper-V specific timeout handler.
1777 */
1778 fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
1773#endif 1779#endif
1774 1780
1775 ret = vmbus_driver_register(&storvsc_drv); 1781 ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { 37 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1571 1571
1572 as->use_cs_gpios = true; 1572 as->use_cs_gpios = true;
1573 if (atmel_spi_is_v2(as) && 1573 if (atmel_spi_is_v2(as) &&
1574 pdev->dev.of_node &&
1574 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { 1575 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1575 as->use_cs_gpios = false; 1576 as->use_cs_gpios = false;
1576 master->num_chipselect = 4; 1577 master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
73 73
74/* Bitfields in CNTL1 */ 74/* Bitfields in CNTL1 */
75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700 75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
76#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080 76#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
77#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040 77#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002 78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001 79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
80 80
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
84/* SPCOM register values */ 84/* SPCOM register values */
85#define SPCOM_CS(x) ((x) << 30) 85#define SPCOM_CS(x) ((x) << 30)
86#define SPCOM_TRANLEN(x) ((x) << 0) 86#define SPCOM_TRANLEN(x) ((x) << 0)
87#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 87#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 88
89#define AUTOSUSPEND_TIMEOUT 2000 89#define AUTOSUSPEND_TIMEOUT 2000
90 90
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
233 reinit_completion(&mpc8xxx_spi->done); 233 reinit_completion(&mpc8xxx_spi->done);
234 234
235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
236 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 236 if (t->len > SPCOM_TRANLEN_MAX) {
237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
238 " beyond the SPCOM[TRANLEN] field\n", t->len); 238 " beyond the SPCOM[TRANLEN] field\n", t->len);
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..6a4ff27f4357 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
929 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 929 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc_tx) 931 if (!desc_tx)
932 goto no_dma; 932 goto tx_nodma;
933 933
934 desc_tx->callback = spi_imx_dma_tx_callback; 934 desc_tx->callback = spi_imx_dma_tx_callback;
935 desc_tx->callback_param = (void *)spi_imx; 935 desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
941 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 941 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
943 if (!desc_rx) 943 if (!desc_rx)
944 goto no_dma; 944 goto rx_nodma;
945 945
946 desc_rx->callback = spi_imx_dma_rx_callback; 946 desc_rx->callback = spi_imx_dma_rx_callback;
947 desc_rx->callback_param = (void *)spi_imx; 947 desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1008 1008
1009 return ret; 1009 return ret;
1010 1010
1011no_dma: 1011rx_nodma:
1012 dmaengine_terminate_all(master->dma_tx);
1013tx_nodma:
1012 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 1014 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1013 dev_driver_string(&master->dev), 1015 dev_driver_string(&master->dev),
1014 dev_name(&master->dev)); 1016 dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
761 test.iterate_transfer_mask = 1; 761 test.iterate_transfer_mask = 1;
762 762
763 /* count number of transfers with tx/rx_buf != NULL */ 763 /* count number of transfers with tx/rx_buf != NULL */
764 rx_count = tx_count = 0;
764 for (i = 0; i < test.transfer_count; i++) { 765 for (i = 0; i < test.transfer_count; i++) {
765 if (test.transfers[i].tx_buf) 766 if (test.transfers[i].tx_buf)
766 tx_count++; 767 tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1490 return status; 1490 return status;
1491 1491
1492disable_pm: 1492disable_pm:
1493 pm_runtime_dont_use_autosuspend(&pdev->dev);
1494 pm_runtime_put_sync(&pdev->dev);
1493 pm_runtime_disable(&pdev->dev); 1495 pm_runtime_disable(&pdev->dev);
1494free_master: 1496free_master:
1495 spi_master_put(master); 1497 spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1501 struct spi_master *master = platform_get_drvdata(pdev); 1503 struct spi_master *master = platform_get_drvdata(pdev);
1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1504 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1503 1505
1506 pm_runtime_dont_use_autosuspend(mcspi->dev);
1504 pm_runtime_put_sync(mcspi->dev); 1507 pm_runtime_put_sync(mcspi->dev);
1505 pm_runtime_disable(&pdev->dev); 1508 pm_runtime_disable(&pdev->dev);
1506 1509
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49674d3..713c63d9681b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
898 da->unmap_zeroes_data = flag; 898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", 899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
900 da->da_dev, flag); 900 da->da_dev, flag);
901 return 0; 901 return count;
902} 902}
903 903
904/* 904/*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a8cbd0..da457e25717a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
828 return dev; 828 return dev;
829} 829}
830 830
831/*
832 * Check if the underlying struct block_device request_queue supports
833 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
834 * in ATA and we need to set TPE=1
835 */
836bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
837 struct request_queue *q, int block_size)
838{
839 if (!blk_queue_discard(q))
840 return false;
841
842 attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
843 block_size;
844 /*
845 * Currently hardcoded to 1 in Linux/SCSI code..
846 */
847 attrib->max_unmap_block_desc_count = 1;
848 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
849 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
850 block_size;
851 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
852 return true;
853}
854EXPORT_SYMBOL(target_configure_unmap_from_queue);
855
856/*
857 * Convert from blocksize advertised to the initiator to the 512 byte
858 * units unconditionally used by the Linux block layer.
859 */
860sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
861{
862 switch (dev->dev_attrib.block_size) {
863 case 4096:
864 return lb << 3;
865 case 2048:
866 return lb << 2;
867 case 1024:
868 return lb << 1;
869 default:
870 return lb;
871 }
872}
873EXPORT_SYMBOL(target_to_linux_sector);
874
831int target_configure_device(struct se_device *dev) 875int target_configure_device(struct se_device *dev)
832{ 876{
833 struct se_hba *hba = dev->se_hba; 877 struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e3195700211a..75f0f08b2a34 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
160 " block_device blocks: %llu logical_block_size: %d\n", 160 " block_device blocks: %llu logical_block_size: %d\n",
161 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 161 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
162 fd_dev->fd_block_size); 162 fd_dev->fd_block_size);
163 /* 163
164 * Check if the underlying struct block_device request_queue supports 164 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
165 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 165 fd_dev->fd_block_size))
166 * in ATA and we need to set TPE=1
167 */
168 if (blk_queue_discard(q)) {
169 dev->dev_attrib.max_unmap_lba_count =
170 q->limits.max_discard_sectors;
171 /*
172 * Currently hardcoded to 1 in Linux/SCSI code..
173 */
174 dev->dev_attrib.max_unmap_block_desc_count = 1;
175 dev->dev_attrib.unmap_granularity =
176 q->limits.discard_granularity >> 9;
177 dev->dev_attrib.unmap_granularity_alignment =
178 q->limits.discard_alignment;
179 pr_debug("IFILE: BLOCK Discard support available," 166 pr_debug("IFILE: BLOCK Discard support available,"
180 " disabled by default\n"); 167 " disabled by default\n");
181 }
182 /* 168 /*
183 * Enable write same emulation for IBLOCK and use 0xFFFF as 169 * Enable write same emulation for IBLOCK and use 0xFFFF as
184 * the smaller WRITE_SAME(10) only has a two-byte block count. 170 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
490 if (S_ISBLK(inode->i_mode)) { 476 if (S_ISBLK(inode->i_mode)) {
491 /* The backend is block device, use discard */ 477 /* The backend is block device, use discard */
492 struct block_device *bdev = inode->i_bdev; 478 struct block_device *bdev = inode->i_bdev;
479 struct se_device *dev = cmd->se_dev;
493 480
494 ret = blkdev_issue_discard(bdev, lba, 481 ret = blkdev_issue_discard(bdev,
495 nolb, GFP_KERNEL, 0); 482 target_to_linux_sector(dev, lba),
483 target_to_linux_sector(dev, nolb),
484 GFP_KERNEL, 0);
496 if (ret < 0) { 485 if (ret < 0) {
497 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 486 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
498 ret); 487 ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f9f50b..abe4eb997a84 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
122 dev->dev_attrib.hw_queue_depth = q->nr_requests; 122 dev->dev_attrib.hw_queue_depth = q->nr_requests;
123 123
124 /* 124 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
125 * Check if the underlying struct block_device request_queue supports 125 dev->dev_attrib.hw_block_size))
126 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
127 * in ATA and we need to set TPE=1
128 */
129 if (blk_queue_discard(q)) {
130 dev->dev_attrib.max_unmap_lba_count =
131 q->limits.max_discard_sectors;
132
133 /*
134 * Currently hardcoded to 1 in Linux/SCSI code..
135 */
136 dev->dev_attrib.max_unmap_block_desc_count = 1;
137 dev->dev_attrib.unmap_granularity =
138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment;
141 dev->dev_attrib.unmap_zeroes_data =
142 q->limits.discard_zeroes_data;
143
144 pr_debug("IBLOCK: BLOCK Discard support available," 126 pr_debug("IBLOCK: BLOCK Discard support available,"
145 " disabled by default\n"); 127 " disabled by default\n");
146 } 128
147 /* 129 /*
148 * Enable write same emulation for IBLOCK and use 0xFFFF as 130 * Enable write same emulation for IBLOCK and use 0xFFFF as
149 * the smaller WRITE_SAME(10) only has a two-byte block count. 131 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@ static sense_reason_t
415iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 397iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
416{ 398{
417 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 399 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
400 struct se_device *dev = cmd->se_dev;
418 int ret; 401 int ret;
419 402
420 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 403 ret = blkdev_issue_discard(bdev,
404 target_to_linux_sector(dev, lba),
405 target_to_linux_sector(dev, nolb),
406 GFP_KERNEL, 0);
421 if (ret < 0) { 407 if (ret < 0) {
422 pr_err("blkdev_issue_discard() failed: %d\n", ret); 408 pr_err("blkdev_issue_discard() failed: %d\n", ret);
423 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 409 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
433 struct scatterlist *sg; 419 struct scatterlist *sg;
434 struct bio *bio; 420 struct bio *bio;
435 struct bio_list list; 421 struct bio_list list;
436 sector_t block_lba = cmd->t_task_lba; 422 struct se_device *dev = cmd->se_dev;
437 sector_t sectors = sbc_get_write_same_sectors(cmd); 423 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
424 sector_t sectors = target_to_linux_sector(dev,
425 sbc_get_write_same_sectors(cmd));
438 426
439 if (cmd->prot_op) { 427 if (cmd->prot_op) {
440 pr_err("WRITE_SAME: Protection information with IBLOCK" 428 pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
648 enum dma_data_direction data_direction) 636 enum dma_data_direction data_direction)
649{ 637{
650 struct se_device *dev = cmd->se_dev; 638 struct se_device *dev = cmd->se_dev;
639 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
651 struct iblock_req *ibr; 640 struct iblock_req *ibr;
652 struct bio *bio, *bio_start; 641 struct bio *bio, *bio_start;
653 struct bio_list list; 642 struct bio_list list;
654 struct scatterlist *sg; 643 struct scatterlist *sg;
655 u32 sg_num = sgl_nents; 644 u32 sg_num = sgl_nents;
656 sector_t block_lba;
657 unsigned bio_cnt; 645 unsigned bio_cnt;
658 int rw = 0; 646 int rw = 0;
659 int i; 647 int i;
@@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
679 rw = READ; 667 rw = READ;
680 } 668 }
681 669
682 /*
683 * Convert the blocksize advertised to the initiator to the 512 byte
684 * units unconditionally used by the Linux block layer.
685 */
686 if (dev->dev_attrib.block_size == 4096)
687 block_lba = (cmd->t_task_lba << 3);
688 else if (dev->dev_attrib.block_size == 2048)
689 block_lba = (cmd->t_task_lba << 2);
690 else if (dev->dev_attrib.block_size == 1024)
691 block_lba = (cmd->t_task_lba << 1);
692 else if (dev->dev_attrib.block_size == 512)
693 block_lba = cmd->t_task_lba;
694 else {
695 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
696 " %u\n", dev->dev_attrib.block_size);
697 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
698 }
699
700 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 670 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
701 if (!ibr) 671 if (!ibr)
702 goto fail; 672 goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c2032..db4412fe6b8a 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); 141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
144bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
145void transport_clear_lun_ref(struct se_lun *); 144void transport_clear_lun_ref(struct se_lun *);
146void transport_send_task_abort(struct se_cmd *); 145void transport_send_task_abort(struct se_cmd *);
147sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 146sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb117c60d..82a663ba9800 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
68 68
69 if (dev) { 69 if (dev) {
70 spin_lock_irqsave(&dev->se_tmr_lock, flags); 70 spin_lock_irqsave(&dev->se_tmr_lock, flags);
71 list_del(&tmr->tmr_list); 71 list_del_init(&tmr->tmr_list);
72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
73 } 73 }
74 74
75 kfree(tmr); 75 kfree(tmr);
76} 76}
77 77
78static void core_tmr_handle_tas_abort( 78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
79 struct se_node_acl *tmr_nacl,
80 struct se_cmd *cmd,
81 int tas)
82{ 79{
83 bool remove = true; 80 unsigned long flags;
81 bool remove = true, send_tas;
84 /* 82 /*
85 * TASK ABORTED status (TAS) bit support 83 * TASK ABORTED status (TAS) bit support
86 */ 84 */
87 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { 85 spin_lock_irqsave(&cmd->t_state_lock, flags);
86 send_tas = (cmd->transport_state & CMD_T_TAS);
87 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
88
89 if (send_tas) {
88 remove = false; 90 remove = false;
89 transport_send_task_abort(cmd); 91 transport_send_task_abort(cmd);
90 } 92 }
@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
107 return 1; 109 return 1;
108} 110}
109 111
112static bool __target_check_io_state(struct se_cmd *se_cmd,
113 struct se_session *tmr_sess, int tas)
114{
115 struct se_session *sess = se_cmd->se_sess;
116
117 assert_spin_locked(&sess->sess_cmd_lock);
118 WARN_ON_ONCE(!irqs_disabled());
119 /*
120 * If command already reached CMD_T_COMPLETE state within
121 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
122 * this se_cmd has been passed to fabric driver and will
123 * not be aborted.
124 *
125 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
126 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
127 * long as se_cmd->cmd_kref is still active unless zero.
128 */
129 spin_lock(&se_cmd->t_state_lock);
130 if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
131 pr_debug("Attempted to abort io tag: %llu already complete or"
132 " fabric stop, skipping\n", se_cmd->tag);
133 spin_unlock(&se_cmd->t_state_lock);
134 return false;
135 }
136 if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
137 pr_debug("Attempted to abort io tag: %llu already shutdown,"
138 " skipping\n", se_cmd->tag);
139 spin_unlock(&se_cmd->t_state_lock);
140 return false;
141 }
142 se_cmd->transport_state |= CMD_T_ABORTED;
143
144 if ((tmr_sess != se_cmd->se_sess) && tas)
145 se_cmd->transport_state |= CMD_T_TAS;
146
147 spin_unlock(&se_cmd->t_state_lock);
148
149 return kref_get_unless_zero(&se_cmd->cmd_kref);
150}
151
110void core_tmr_abort_task( 152void core_tmr_abort_task(
111 struct se_device *dev, 153 struct se_device *dev,
112 struct se_tmr_req *tmr, 154 struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 172 if (tmr->ref_task_tag != ref_tag)
131 continue; 173 continue;
132 174
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 175 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
137 se_cmd->se_tfo->get_fabric_name(), ref_tag); 176 se_cmd->se_tfo->get_fabric_name(), ref_tag);
138 177
139 spin_lock(&se_cmd->t_state_lock); 178 if (!__target_check_io_state(se_cmd, se_sess, 0)) {
140 if (se_cmd->transport_state & CMD_T_COMPLETE) {
141 printk("ABORT_TASK: ref_tag: %llu already complete,"
142 " skipping\n", ref_tag);
143 spin_unlock(&se_cmd->t_state_lock);
144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 179 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd); 180 target_put_sess_cmd(se_cmd);
147
148 goto out; 181 goto out;
149 } 182 }
150 se_cmd->transport_state |= CMD_T_ABORTED;
151 spin_unlock(&se_cmd->t_state_lock);
152
153 list_del_init(&se_cmd->se_cmd_list); 183 list_del_init(&se_cmd->se_cmd_list);
154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 184 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
155 185
156 cancel_work_sync(&se_cmd->work); 186 cancel_work_sync(&se_cmd->work);
157 transport_wait_for_tasks(se_cmd); 187 transport_wait_for_tasks(se_cmd);
158 188
159 target_put_sess_cmd(se_cmd);
160 transport_cmd_finish_abort(se_cmd, true); 189 transport_cmd_finish_abort(se_cmd, true);
190 target_put_sess_cmd(se_cmd);
161 191
162 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 192 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
163 " ref_tag: %llu\n", ref_tag); 193 " ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
178 struct list_head *preempt_and_abort_list) 208 struct list_head *preempt_and_abort_list)
179{ 209{
180 LIST_HEAD(drain_tmr_list); 210 LIST_HEAD(drain_tmr_list);
211 struct se_session *sess;
181 struct se_tmr_req *tmr_p, *tmr_pp; 212 struct se_tmr_req *tmr_p, *tmr_pp;
182 struct se_cmd *cmd; 213 struct se_cmd *cmd;
183 unsigned long flags; 214 unsigned long flags;
215 bool rc;
184 /* 216 /*
185 * Release all pending and outgoing TMRs aside from the received 217 * Release all pending and outgoing TMRs aside from the received
186 * LUN_RESET tmr.. 218 * LUN_RESET tmr..
@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 238 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
207 continue; 239 continue;
208 240
241 sess = cmd->se_sess;
242 if (WARN_ON_ONCE(!sess))
243 continue;
244
245 spin_lock(&sess->sess_cmd_lock);
209 spin_lock(&cmd->t_state_lock); 246 spin_lock(&cmd->t_state_lock);
210 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 247 if (!(cmd->transport_state & CMD_T_ACTIVE) ||
248 (cmd->transport_state & CMD_T_FABRIC_STOP)) {
211 spin_unlock(&cmd->t_state_lock); 249 spin_unlock(&cmd->t_state_lock);
250 spin_unlock(&sess->sess_cmd_lock);
212 continue; 251 continue;
213 } 252 }
214 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 253 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
215 spin_unlock(&cmd->t_state_lock); 254 spin_unlock(&cmd->t_state_lock);
255 spin_unlock(&sess->sess_cmd_lock);
216 continue; 256 continue;
217 } 257 }
258 if (sess->sess_tearing_down || cmd->cmd_wait_set) {
259 spin_unlock(&cmd->t_state_lock);
260 spin_unlock(&sess->sess_cmd_lock);
261 continue;
262 }
263 cmd->transport_state |= CMD_T_ABORTED;
218 spin_unlock(&cmd->t_state_lock); 264 spin_unlock(&cmd->t_state_lock);
219 265
266 rc = kref_get_unless_zero(&cmd->cmd_kref);
267 if (!rc) {
268 printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
269 spin_unlock(&sess->sess_cmd_lock);
270 continue;
271 }
272 spin_unlock(&sess->sess_cmd_lock);
273
220 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); 274 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
221 } 275 }
222 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 276 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
230 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 284 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
231 tmr_p->function, tmr_p->response, cmd->t_state); 285 tmr_p->function, tmr_p->response, cmd->t_state);
232 286
287 cancel_work_sync(&cmd->work);
288 transport_wait_for_tasks(cmd);
289
233 transport_cmd_finish_abort(cmd, 1); 290 transport_cmd_finish_abort(cmd, 1);
291 target_put_sess_cmd(cmd);
234 } 292 }
235} 293}
236 294
237static void core_tmr_drain_state_list( 295static void core_tmr_drain_state_list(
238 struct se_device *dev, 296 struct se_device *dev,
239 struct se_cmd *prout_cmd, 297 struct se_cmd *prout_cmd,
240 struct se_node_acl *tmr_nacl, 298 struct se_session *tmr_sess,
241 int tas, 299 int tas,
242 struct list_head *preempt_and_abort_list) 300 struct list_head *preempt_and_abort_list)
243{ 301{
244 LIST_HEAD(drain_task_list); 302 LIST_HEAD(drain_task_list);
303 struct se_session *sess;
245 struct se_cmd *cmd, *next; 304 struct se_cmd *cmd, *next;
246 unsigned long flags; 305 unsigned long flags;
306 int rc;
247 307
248 /* 308 /*
249 * Complete outstanding commands with TASK_ABORTED SAM status. 309 * Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
282 if (prout_cmd == cmd) 342 if (prout_cmd == cmd)
283 continue; 343 continue;
284 344
345 sess = cmd->se_sess;
346 if (WARN_ON_ONCE(!sess))
347 continue;
348
349 spin_lock(&sess->sess_cmd_lock);
350 rc = __target_check_io_state(cmd, tmr_sess, tas);
351 spin_unlock(&sess->sess_cmd_lock);
352 if (!rc)
353 continue;
354
285 list_move_tail(&cmd->state_list, &drain_task_list); 355 list_move_tail(&cmd->state_list, &drain_task_list);
286 cmd->state_active = false; 356 cmd->state_active = false;
287 } 357 }
@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
289 359
290 while (!list_empty(&drain_task_list)) { 360 while (!list_empty(&drain_task_list)) {
291 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); 361 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
292 list_del(&cmd->state_list); 362 list_del_init(&cmd->state_list);
293 363
294 pr_debug("LUN_RESET: %s cmd: %p" 364 pr_debug("LUN_RESET: %s cmd: %p"
295 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" 365 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
313 * loop above, but we do it down here given that 383 * loop above, but we do it down here given that
314 * cancel_work_sync may block. 384 * cancel_work_sync may block.
315 */ 385 */
316 if (cmd->t_state == TRANSPORT_COMPLETE) 386 cancel_work_sync(&cmd->work);
317 cancel_work_sync(&cmd->work); 387 transport_wait_for_tasks(cmd);
318
319 spin_lock_irqsave(&cmd->t_state_lock, flags);
320 target_stop_cmd(cmd, &flags);
321
322 cmd->transport_state |= CMD_T_ABORTED;
323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
324 388
325 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); 389 core_tmr_handle_tas_abort(cmd, tas);
390 target_put_sess_cmd(cmd);
326 } 391 }
327} 392}
328 393
@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
334{ 399{
335 struct se_node_acl *tmr_nacl = NULL; 400 struct se_node_acl *tmr_nacl = NULL;
336 struct se_portal_group *tmr_tpg = NULL; 401 struct se_portal_group *tmr_tpg = NULL;
402 struct se_session *tmr_sess = NULL;
337 int tas; 403 int tas;
338 /* 404 /*
339 * TASK_ABORTED status bit, this is configurable via ConfigFS 405 * TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
352 * or struct se_device passthrough.. 418 * or struct se_device passthrough..
353 */ 419 */
354 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 420 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
355 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 421 tmr_sess = tmr->task_cmd->se_sess;
356 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 422 tmr_nacl = tmr_sess->se_node_acl;
423 tmr_tpg = tmr_sess->se_tpg;
357 if (tmr_nacl && tmr_tpg) { 424 if (tmr_nacl && tmr_tpg) {
358 pr_debug("LUN_RESET: TMR caller fabric: %s" 425 pr_debug("LUN_RESET: TMR caller fabric: %s"
359 " initiator port %s\n", 426 " initiator port %s\n",
@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
366 dev->transport->name, tas); 433 dev->transport->name, tas);
367 434
368 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 435 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
369 core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, 436 core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
370 preempt_and_abort_list); 437 preempt_and_abort_list);
371 438
372 /* 439 /*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e10f25..867bc6d0a68a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess)
534} 534}
535EXPORT_SYMBOL(transport_deregister_session); 535EXPORT_SYMBOL(transport_deregister_session);
536 536
537/*
538 * Called with cmd->t_state_lock held.
539 */
540static void target_remove_from_state_list(struct se_cmd *cmd) 537static void target_remove_from_state_list(struct se_cmd *cmd)
541{ 538{
542 struct se_device *dev = cmd->se_dev; 539 struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
561{ 558{
562 unsigned long flags; 559 unsigned long flags;
563 560
564 spin_lock_irqsave(&cmd->t_state_lock, flags);
565 if (write_pending)
566 cmd->t_state = TRANSPORT_WRITE_PENDING;
567
568 if (remove_from_lists) { 561 if (remove_from_lists) {
569 target_remove_from_state_list(cmd); 562 target_remove_from_state_list(cmd);
570 563
@@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
574 cmd->se_lun = NULL; 567 cmd->se_lun = NULL;
575 } 568 }
576 569
570 spin_lock_irqsave(&cmd->t_state_lock, flags);
571 if (write_pending)
572 cmd->t_state = TRANSPORT_WRITE_PENDING;
573
577 /* 574 /*
578 * Determine if frontend context caller is requesting the stopping of 575 * Determine if frontend context caller is requesting the stopping of
579 * this command for frontend exceptions. 576 * this command for frontend exceptions.
@@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
627 624
628void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 625void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
629{ 626{
627 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
628
630 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 629 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
631 transport_lun_remove_cmd(cmd); 630 transport_lun_remove_cmd(cmd);
632 /* 631 /*
@@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
638 637
639 if (transport_cmd_check_stop_to_fabric(cmd)) 638 if (transport_cmd_check_stop_to_fabric(cmd))
640 return; 639 return;
641 if (remove) 640 if (remove && ack_kref)
642 transport_put_cmd(cmd); 641 transport_put_cmd(cmd);
643} 642}
644 643
@@ -694,19 +693,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
694 } 693 }
695 694
696 /* 695 /*
697 * See if we are waiting to complete for an exception condition.
698 */
699 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
700 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701 complete(&cmd->task_stop_comp);
702 return;
703 }
704
705 /*
706 * Check for case where an explicit ABORT_TASK has been received 696 * Check for case where an explicit ABORT_TASK has been received
707 * and transport_wait_for_tasks() will be waiting for completion.. 697 * and transport_wait_for_tasks() will be waiting for completion..
708 */ 698 */
709 if (cmd->transport_state & CMD_T_ABORTED && 699 if (cmd->transport_state & CMD_T_ABORTED ||
710 cmd->transport_state & CMD_T_STOP) { 700 cmd->transport_state & CMD_T_STOP) {
711 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
712 complete_all(&cmd->t_transport_stop_comp); 702 complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
721 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 711 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
722 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 712 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
723 713
724 if (cmd->cpuid == -1) 714 if (cmd->se_cmd_flags & SCF_USE_CPUID)
725 queue_work(target_completion_wq, &cmd->work);
726 else
727 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 715 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
716 else
717 queue_work(target_completion_wq, &cmd->work);
728} 718}
729EXPORT_SYMBOL(target_complete_cmd); 719EXPORT_SYMBOL(target_complete_cmd);
730 720
@@ -1203,7 +1193,6 @@ void transport_init_se_cmd(
1203 INIT_LIST_HEAD(&cmd->state_list); 1193 INIT_LIST_HEAD(&cmd->state_list);
1204 init_completion(&cmd->t_transport_stop_comp); 1194 init_completion(&cmd->t_transport_stop_comp);
1205 init_completion(&cmd->cmd_wait_comp); 1195 init_completion(&cmd->cmd_wait_comp);
1206 init_completion(&cmd->task_stop_comp);
1207 spin_lock_init(&cmd->t_state_lock); 1196 spin_lock_init(&cmd->t_state_lock);
1208 kref_init(&cmd->cmd_kref); 1197 kref_init(&cmd->cmd_kref);
1209 cmd->transport_state = CMD_T_DEV_ACTIVE; 1198 cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1437 */ 1426 */
1438 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1427 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1439 data_length, data_dir, task_attr, sense); 1428 data_length, data_dir, task_attr, sense);
1429
1430 if (flags & TARGET_SCF_USE_CPUID)
1431 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1432 else
1433 se_cmd->cpuid = WORK_CPU_UNBOUND;
1434
1440 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1435 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1441 se_cmd->unknown_data_length = 1; 1436 se_cmd->unknown_data_length = 1;
1442 /* 1437 /*
@@ -1635,33 +1630,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1635EXPORT_SYMBOL(target_submit_tmr); 1630EXPORT_SYMBOL(target_submit_tmr);
1636 1631
1637/* 1632/*
1638 * If the cmd is active, request it to be stopped and sleep until it
1639 * has completed.
1640 */
1641bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1642 __releases(&cmd->t_state_lock)
1643 __acquires(&cmd->t_state_lock)
1644{
1645 bool was_active = false;
1646
1647 if (cmd->transport_state & CMD_T_BUSY) {
1648 cmd->transport_state |= CMD_T_REQUEST_STOP;
1649 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1650
1651 pr_debug("cmd %p waiting to complete\n", cmd);
1652 wait_for_completion(&cmd->task_stop_comp);
1653 pr_debug("cmd %p stopped successfully\n", cmd);
1654
1655 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1656 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1657 cmd->transport_state &= ~CMD_T_BUSY;
1658 was_active = true;
1659 }
1660
1661 return was_active;
1662}
1663
1664/*
1665 * Handle SAM-esque emulation for generic transport request failures. 1633 * Handle SAM-esque emulation for generic transport request failures.
1666 */ 1634 */
1667void transport_generic_request_failure(struct se_cmd *cmd, 1635void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1859 return true; 1827 return true;
1860} 1828}
1861 1829
1830static int __transport_check_aborted_status(struct se_cmd *, int);
1831
1862void target_execute_cmd(struct se_cmd *cmd) 1832void target_execute_cmd(struct se_cmd *cmd)
1863{ 1833{
1864 /* 1834 /*
1865 * If the received CDB has aleady been aborted stop processing it here.
1866 */
1867 if (transport_check_aborted_status(cmd, 1))
1868 return;
1869
1870 /*
1871 * Determine if frontend context caller is requesting the stopping of 1835 * Determine if frontend context caller is requesting the stopping of
1872 * this command for frontend exceptions. 1836 * this command for frontend exceptions.
1837 *
1838 * If the received CDB has aleady been aborted stop processing it here.
1873 */ 1839 */
1874 spin_lock_irq(&cmd->t_state_lock); 1840 spin_lock_irq(&cmd->t_state_lock);
1841 if (__transport_check_aborted_status(cmd, 1)) {
1842 spin_unlock_irq(&cmd->t_state_lock);
1843 return;
1844 }
1875 if (cmd->transport_state & CMD_T_STOP) { 1845 if (cmd->transport_state & CMD_T_STOP) {
1876 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1846 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1877 __func__, __LINE__, cmd->tag); 1847 __func__, __LINE__, cmd->tag);
@@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2222} 2192}
2223 2193
2224/** 2194/**
2225 * transport_release_cmd - free a command 2195 * transport_put_cmd - release a reference to a command
2226 * @cmd: command to free 2196 * @cmd: command to release
2227 * 2197 *
2228 * This routine unconditionally frees a command, and reference counting 2198 * This routine releases our reference to the command and frees it if possible.
2229 * or list removal must be done in the caller.
2230 */ 2199 */
2231static int transport_release_cmd(struct se_cmd *cmd) 2200static int transport_put_cmd(struct se_cmd *cmd)
2232{ 2201{
2233 BUG_ON(!cmd->se_tfo); 2202 BUG_ON(!cmd->se_tfo);
2234
2235 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2236 core_tmr_release_req(cmd->se_tmr_req);
2237 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2238 kfree(cmd->t_task_cdb);
2239 /* 2203 /*
2240 * If this cmd has been setup with target_get_sess_cmd(), drop 2204 * If this cmd has been setup with target_get_sess_cmd(), drop
2241 * the kref and call ->release_cmd() in kref callback. 2205 * the kref and call ->release_cmd() in kref callback.
@@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
2243 return target_put_sess_cmd(cmd); 2207 return target_put_sess_cmd(cmd);
2244} 2208}
2245 2209
2246/**
2247 * transport_put_cmd - release a reference to a command
2248 * @cmd: command to release
2249 *
2250 * This routine releases our reference to the command and frees it if possible.
2251 */
2252static int transport_put_cmd(struct se_cmd *cmd)
2253{
2254 transport_free_pages(cmd);
2255 return transport_release_cmd(cmd);
2256}
2257
2258void *transport_kmap_data_sg(struct se_cmd *cmd) 2210void *transport_kmap_data_sg(struct se_cmd *cmd)
2259{ 2211{
2260 struct scatterlist *sg = cmd->t_data_sg; 2212 struct scatterlist *sg = cmd->t_data_sg;
@@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2450 } 2402 }
2451} 2403}
2452 2404
2453int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2405static bool
2406__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2407 unsigned long *flags);
2408
2409static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2454{ 2410{
2455 unsigned long flags; 2411 unsigned long flags;
2412
2413 spin_lock_irqsave(&cmd->t_state_lock, flags);
2414 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2415 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2416}
2417
2418int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2419{
2456 int ret = 0; 2420 int ret = 0;
2421 bool aborted = false, tas = false;
2457 2422
2458 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2423 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2459 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2424 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2460 transport_wait_for_tasks(cmd); 2425 target_wait_free_cmd(cmd, &aborted, &tas);
2461 2426
2462 ret = transport_release_cmd(cmd); 2427 if (!aborted || tas)
2428 ret = transport_put_cmd(cmd);
2463 } else { 2429 } else {
2464 if (wait_for_tasks) 2430 if (wait_for_tasks)
2465 transport_wait_for_tasks(cmd); 2431 target_wait_free_cmd(cmd, &aborted, &tas);
2466 /* 2432 /*
2467 * Handle WRITE failure case where transport_generic_new_cmd() 2433 * Handle WRITE failure case where transport_generic_new_cmd()
2468 * has already added se_cmd to state_list, but fabric has 2434 * has already added se_cmd to state_list, but fabric has
2469 * failed command before I/O submission. 2435 * failed command before I/O submission.
2470 */ 2436 */
2471 if (cmd->state_active) { 2437 if (cmd->state_active)
2472 spin_lock_irqsave(&cmd->t_state_lock, flags);
2473 target_remove_from_state_list(cmd); 2438 target_remove_from_state_list(cmd);
2474 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2475 }
2476 2439
2477 if (cmd->se_lun) 2440 if (cmd->se_lun)
2478 transport_lun_remove_cmd(cmd); 2441 transport_lun_remove_cmd(cmd);
2479 2442
2480 ret = transport_put_cmd(cmd); 2443 if (!aborted || tas)
2444 ret = transport_put_cmd(cmd);
2445 }
2446 /*
2447 * If the task has been internally aborted due to TMR ABORT_TASK
2448 * or LUN_RESET, target_core_tmr.c is responsible for performing
2449 * the remaining calls to target_put_sess_cmd(), and not the
2450 * callers of this function.
2451 */
2452 if (aborted) {
2453 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2454 wait_for_completion(&cmd->cmd_wait_comp);
2455 cmd->se_tfo->release_cmd(cmd);
2456 ret = 1;
2481 } 2457 }
2482 return ret; 2458 return ret;
2483} 2459}
@@ -2517,26 +2493,46 @@ out:
2517} 2493}
2518EXPORT_SYMBOL(target_get_sess_cmd); 2494EXPORT_SYMBOL(target_get_sess_cmd);
2519 2495
2496static void target_free_cmd_mem(struct se_cmd *cmd)
2497{
2498 transport_free_pages(cmd);
2499
2500 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2501 core_tmr_release_req(cmd->se_tmr_req);
2502 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2503 kfree(cmd->t_task_cdb);
2504}
2505
2520static void target_release_cmd_kref(struct kref *kref) 2506static void target_release_cmd_kref(struct kref *kref)
2521{ 2507{
2522 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2508 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2523 struct se_session *se_sess = se_cmd->se_sess; 2509 struct se_session *se_sess = se_cmd->se_sess;
2524 unsigned long flags; 2510 unsigned long flags;
2511 bool fabric_stop;
2525 2512
2526 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2513 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2527 if (list_empty(&se_cmd->se_cmd_list)) { 2514 if (list_empty(&se_cmd->se_cmd_list)) {
2528 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2515 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2516 target_free_cmd_mem(se_cmd);
2529 se_cmd->se_tfo->release_cmd(se_cmd); 2517 se_cmd->se_tfo->release_cmd(se_cmd);
2530 return; 2518 return;
2531 } 2519 }
2532 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2520
2521 spin_lock(&se_cmd->t_state_lock);
2522 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
2523 spin_unlock(&se_cmd->t_state_lock);
2524
2525 if (se_cmd->cmd_wait_set || fabric_stop) {
2526 list_del_init(&se_cmd->se_cmd_list);
2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2527 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2528 target_free_cmd_mem(se_cmd);
2534 complete(&se_cmd->cmd_wait_comp); 2529 complete(&se_cmd->cmd_wait_comp);
2535 return; 2530 return;
2536 } 2531 }
2537 list_del(&se_cmd->se_cmd_list); 2532 list_del_init(&se_cmd->se_cmd_list);
2538 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2539 2534
2535 target_free_cmd_mem(se_cmd);
2540 se_cmd->se_tfo->release_cmd(se_cmd); 2536 se_cmd->se_tfo->release_cmd(se_cmd);
2541} 2537}
2542 2538
@@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2548 struct se_session *se_sess = se_cmd->se_sess; 2544 struct se_session *se_sess = se_cmd->se_sess;
2549 2545
2550 if (!se_sess) { 2546 if (!se_sess) {
2547 target_free_cmd_mem(se_cmd);
2551 se_cmd->se_tfo->release_cmd(se_cmd); 2548 se_cmd->se_tfo->release_cmd(se_cmd);
2552 return 1; 2549 return 1;
2553 } 2550 }
@@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2564{ 2561{
2565 struct se_cmd *se_cmd; 2562 struct se_cmd *se_cmd;
2566 unsigned long flags; 2563 unsigned long flags;
2564 int rc;
2567 2565
2568 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2566 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2569 if (se_sess->sess_tearing_down) { 2567 if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2573 se_sess->sess_tearing_down = 1; 2571 se_sess->sess_tearing_down = 1;
2574 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2572 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2575 2573
2576 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2574 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
2577 se_cmd->cmd_wait_set = 1; 2575 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2576 if (rc) {
2577 se_cmd->cmd_wait_set = 1;
2578 spin_lock(&se_cmd->t_state_lock);
2579 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2580 spin_unlock(&se_cmd->t_state_lock);
2581 }
2582 }
2578 2583
2579 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2584 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2580} 2585}
@@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2587{ 2592{
2588 struct se_cmd *se_cmd, *tmp_cmd; 2593 struct se_cmd *se_cmd, *tmp_cmd;
2589 unsigned long flags; 2594 unsigned long flags;
2595 bool tas;
2590 2596
2591 list_for_each_entry_safe(se_cmd, tmp_cmd, 2597 list_for_each_entry_safe(se_cmd, tmp_cmd,
2592 &se_sess->sess_wait_list, se_cmd_list) { 2598 &se_sess->sess_wait_list, se_cmd_list) {
2593 list_del(&se_cmd->se_cmd_list); 2599 list_del_init(&se_cmd->se_cmd_list);
2594 2600
2595 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2601 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2596 " %d\n", se_cmd, se_cmd->t_state, 2602 " %d\n", se_cmd, se_cmd->t_state,
2597 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2603 se_cmd->se_tfo->get_cmd_state(se_cmd));
2598 2604
2605 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2606 tas = (se_cmd->transport_state & CMD_T_TAS);
2607 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2608
2609 if (!target_put_sess_cmd(se_cmd)) {
2610 if (tas)
2611 target_put_sess_cmd(se_cmd);
2612 }
2613
2599 wait_for_completion(&se_cmd->cmd_wait_comp); 2614 wait_for_completion(&se_cmd->cmd_wait_comp);
2600 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2615 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2601 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2616 " fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
2617 wait_for_completion(&lun->lun_ref_comp); 2632 wait_for_completion(&lun->lun_ref_comp);
2618} 2633}
2619 2634
2620/** 2635static bool
2621 * transport_wait_for_tasks - wait for completion to occur 2636__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2622 * @cmd: command to wait 2637 bool *aborted, bool *tas, unsigned long *flags)
2623 * 2638 __releases(&cmd->t_state_lock)
2624 * Called from frontend fabric context to wait for storage engine 2639 __acquires(&cmd->t_state_lock)
2625 * to pause and/or release frontend generated struct se_cmd.
2626 */
2627bool transport_wait_for_tasks(struct se_cmd *cmd)
2628{ 2640{
2629 unsigned long flags;
2630 2641
2631 spin_lock_irqsave(&cmd->t_state_lock, flags); 2642 assert_spin_locked(&cmd->t_state_lock);
2643 WARN_ON_ONCE(!irqs_disabled());
2644
2645 if (fabric_stop)
2646 cmd->transport_state |= CMD_T_FABRIC_STOP;
2647
2648 if (cmd->transport_state & CMD_T_ABORTED)
2649 *aborted = true;
2650
2651 if (cmd->transport_state & CMD_T_TAS)
2652 *tas = true;
2653
2632 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2654 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2633 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2655 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2635 return false; 2656 return false;
2636 }
2637 2657
2638 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2658 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2639 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2659 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2640 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2641 return false; 2660 return false;
2642 }
2643 2661
2644 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2662 if (!(cmd->transport_state & CMD_T_ACTIVE))
2645 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2663 return false;
2664
2665 if (fabric_stop && *aborted)
2646 return false; 2666 return false;
2647 }
2648 2667
2649 cmd->transport_state |= CMD_T_STOP; 2668 cmd->transport_state |= CMD_T_STOP;
2650 2669
2651 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2670 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2652 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2671 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2672 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2653 2673
2654 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2674 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2655 2675
2656 wait_for_completion(&cmd->t_transport_stop_comp); 2676 wait_for_completion(&cmd->t_transport_stop_comp);
2657 2677
2658 spin_lock_irqsave(&cmd->t_state_lock, flags); 2678 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2659 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2679 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2660 2680
2661 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2681 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2662 cmd->tag); 2682 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2663 2683
2684 return true;
2685}
2686
2687/**
2688 * transport_wait_for_tasks - wait for completion to occur
2689 * @cmd: command to wait
2690 *
2691 * Called from frontend fabric context to wait for storage engine
2692 * to pause and/or release frontend generated struct se_cmd.
2693 */
2694bool transport_wait_for_tasks(struct se_cmd *cmd)
2695{
2696 unsigned long flags;
2697 bool ret, aborted = false, tas = false;
2698
2699 spin_lock_irqsave(&cmd->t_state_lock, flags);
2700 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2664 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2665 2702
2666 return true; 2703 return ret;
2667} 2704}
2668EXPORT_SYMBOL(transport_wait_for_tasks); 2705EXPORT_SYMBOL(transport_wait_for_tasks);
2669 2706
@@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2845} 2882}
2846EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2883EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2847 2884
2848int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2885static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2886 __releases(&cmd->t_state_lock)
2887 __acquires(&cmd->t_state_lock)
2849{ 2888{
2889 assert_spin_locked(&cmd->t_state_lock);
2890 WARN_ON_ONCE(!irqs_disabled());
2891
2850 if (!(cmd->transport_state & CMD_T_ABORTED)) 2892 if (!(cmd->transport_state & CMD_T_ABORTED))
2851 return 0; 2893 return 0;
2852
2853 /* 2894 /*
2854 * If cmd has been aborted but either no status is to be sent or it has 2895 * If cmd has been aborted but either no status is to be sent or it has
2855 * already been sent, just return 2896 * already been sent, just return
2856 */ 2897 */
2857 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2898 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2899 if (send_status)
2900 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2858 return 1; 2901 return 1;
2902 }
2859 2903
2860 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2904 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2861 cmd->t_task_cdb[0], cmd->tag); 2905 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2862 2906
2863 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2907 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2864 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2908 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2865 trace_target_cmd_complete(cmd); 2909 trace_target_cmd_complete(cmd);
2910
2911 spin_unlock_irq(&cmd->t_state_lock);
2866 cmd->se_tfo->queue_status(cmd); 2912 cmd->se_tfo->queue_status(cmd);
2913 spin_lock_irq(&cmd->t_state_lock);
2867 2914
2868 return 1; 2915 return 1;
2869} 2916}
2917
2918int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2919{
2920 int ret;
2921
2922 spin_lock_irq(&cmd->t_state_lock);
2923 ret = __transport_check_aborted_status(cmd, send_status);
2924 spin_unlock_irq(&cmd->t_state_lock);
2925
2926 return ret;
2927}
2870EXPORT_SYMBOL(transport_check_aborted_status); 2928EXPORT_SYMBOL(transport_check_aborted_status);
2871 2929
2872void transport_send_task_abort(struct se_cmd *cmd) 2930void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
2888 */ 2946 */
2889 if (cmd->data_direction == DMA_TO_DEVICE) { 2947 if (cmd->data_direction == DMA_TO_DEVICE) {
2890 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2948 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2891 cmd->transport_state |= CMD_T_ABORTED; 2949 spin_lock_irqsave(&cmd->t_state_lock, flags);
2950 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
2951 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2952 goto send_abort;
2953 }
2892 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2954 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2955 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2893 return; 2956 return;
2894 } 2957 }
2895 } 2958 }
2959send_abort:
2896 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2960 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2897 2961
2898 transport_lun_remove_cmd(cmd); 2962 transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work)
2909 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2973 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2910 struct se_device *dev = cmd->se_dev; 2974 struct se_device *dev = cmd->se_dev;
2911 struct se_tmr_req *tmr = cmd->se_tmr_req; 2975 struct se_tmr_req *tmr = cmd->se_tmr_req;
2976 unsigned long flags;
2912 int ret; 2977 int ret;
2913 2978
2979 spin_lock_irqsave(&cmd->t_state_lock, flags);
2980 if (cmd->transport_state & CMD_T_ABORTED) {
2981 tmr->response = TMR_FUNCTION_REJECTED;
2982 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2983 goto check_stop;
2984 }
2985 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2986
2914 switch (tmr->function) { 2987 switch (tmr->function) {
2915 case TMR_ABORT_TASK: 2988 case TMR_ABORT_TASK:
2916 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2989 core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work)
2943 break; 3016 break;
2944 } 3017 }
2945 3018
3019 spin_lock_irqsave(&cmd->t_state_lock, flags);
3020 if (cmd->transport_state & CMD_T_ABORTED) {
3021 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3022 goto check_stop;
3023 }
2946 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3024 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3025 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3026
2947 cmd->se_tfo->queue_tm_rsp(cmd); 3027 cmd->se_tfo->queue_tm_rsp(cmd);
2948 3028
3029check_stop:
2949 transport_cmd_check_stop_to_fabric(cmd); 3030 transport_cmd_check_stop_to_fabric(cmd);
2950} 3031}
2951 3032
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5ead71..94f5154ac788 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev)
903 info->version = __stringify(TCMU_MAILBOX_VERSION); 903 info->version = __stringify(TCMU_MAILBOX_VERSION);
904 904
905 info->mem[0].name = "tcm-user command & data buffer"; 905 info->mem[0].name = "tcm-user command & data buffer";
906 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 906 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
907 info->mem[0].size = TCMU_RING_SIZE; 907 info->mem[0].size = TCMU_RING_SIZE;
908 info->mem[0].memtype = UIO_MEM_VIRTUAL; 908 info->mem[0].memtype = UIO_MEM_VIRTUAL;
909 909
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac64a91c..7c92c09be213 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@ config IMX_THERMAL
195 passive trip is crossed. 195 passive trip is crossed.
196 196
197config SPEAR_THERMAL 197config SPEAR_THERMAL
198 bool "SPEAr thermal sensor driver" 198 tristate "SPEAr thermal sensor driver"
199 depends on PLAT_SPEAR || COMPILE_TEST 199 depends on PLAT_SPEAR || COMPILE_TEST
200 depends on OF 200 depends on OF
201 help 201 help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
237 framework. 237 framework.
238 238
239config DB8500_THERMAL 239config DB8500_THERMAL
240 bool "DB8500 thermal management" 240 tristate "DB8500 thermal management"
241 depends on ARCH_U8500 241 depends on MFD_DB8500_PRCMU
242 default y 242 default y
243 help 243 help
244 Adds DB8500 thermal management implementation according to the thermal 244 Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a5d88f..6ceac4f2d4b2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
377 * get_load() - get load for a cpu since last updated 377 * get_load() - get load for a cpu since last updated
378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu 378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
379 * @cpu: cpu number 379 * @cpu: cpu number
380 * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
380 * 381 *
381 * Return: The average load of cpu @cpu in percentage since this 382 * Return: The average load of cpu @cpu in percentage since this
382 * function was last called. 383 * function was last called.
383 */ 384 */
384static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) 385static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
386 int cpu_idx)
385{ 387{
386 u32 load; 388 u32 load;
387 u64 now, now_idle, delta_time, delta_idle; 389 u64 now, now_idle, delta_time, delta_idle;
388 390
389 now_idle = get_cpu_idle_time(cpu, &now, 0); 391 now_idle = get_cpu_idle_time(cpu, &now, 0);
390 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; 392 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
391 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; 393 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
392 394
393 if (delta_time <= delta_idle) 395 if (delta_time <= delta_idle)
394 load = 0; 396 load = 0;
395 else 397 else
396 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 398 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
397 399
398 cpufreq_device->time_in_idle[cpu] = now_idle; 400 cpufreq_device->time_in_idle[cpu_idx] = now_idle;
399 cpufreq_device->time_in_idle_timestamp[cpu] = now; 401 cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
400 402
401 return load; 403 return load;
402} 404}
@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
598 u32 load; 600 u32 load;
599 601
600 if (cpu_online(cpu)) 602 if (cpu_online(cpu))
601 load = get_load(cpufreq_device, cpu); 603 load = get_load(cpufreq_device, cpu, i);
602 else 604 else
603 load = 0; 605 load = 0;
604 606
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index be4eedcb839a..9043f8f91852 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
475 475
476 sensor_np = of_node_get(dev->of_node); 476 sensor_np = of_node_get(dev->of_node);
477 477
478 for_each_child_of_node(np, child) { 478 for_each_available_child_of_node(np, child) {
479 struct of_phandle_args sensor_specs; 479 struct of_phandle_args sensor_specs;
480 int ret, id; 480 int ret, id;
481 481
482 /* Check whether child is enabled or not */
483 if (!of_device_is_available(child))
484 continue;
485
486 /* For now, thermal framework supports only 1 sensor per zone */ 482 /* For now, thermal framework supports only 1 sensor per zone */
487 ret = of_parse_phandle_with_args(child, "thermal-sensors", 483 ret = of_parse_phandle_with_args(child, "thermal-sensors",
488 "#thermal-sensor-cells", 484 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void)
881 return 0; /* Run successfully on systems without thermal DT */ 877 return 0; /* Run successfully on systems without thermal DT */
882 } 878 }
883 879
884 for_each_child_of_node(np, child) { 880 for_each_available_child_of_node(np, child) {
885 struct thermal_zone_device *zone; 881 struct thermal_zone_device *zone;
886 struct thermal_zone_params *tzp; 882 struct thermal_zone_params *tzp;
887 int i, mask = 0; 883 int i, mask = 0;
888 u32 prop; 884 u32 prop;
889 885
890 /* Check whether child is enabled or not */
891 if (!of_device_is_available(child))
892 continue;
893
894 tz = thermal_of_build_thermal_zone(child); 886 tz = thermal_of_build_thermal_zone(child);
895 if (IS_ERR(tz)) { 887 if (IS_ERR(tz)) {
896 pr_err("failed to build thermal zone %s: %ld\n", 888 pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void)
968 return; 960 return;
969 } 961 }
970 962
971 for_each_child_of_node(np, child) { 963 for_each_available_child_of_node(np, child) {
972 struct thermal_zone_device *zone; 964 struct thermal_zone_device *zone;
973 965
974 /* Check whether child is enabled or not */
975 if (!of_device_is_available(child))
976 continue;
977
978 zone = thermal_zone_get_zone_by_name(child->name); 966 zone = thermal_zone_get_zone_by_name(child->name);
979 if (IS_ERR(zone)) 967 if (IS_ERR(zone))
980 continue; 968 continue;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 44b9c485157d..0e735acea33a 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of_device.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
28#include <linux/reboot.h> 29#include <linux/reboot.h>
@@ -75,8 +76,10 @@ struct rcar_thermal_priv {
75#define rcar_has_irq_support(priv) ((priv)->common->base) 76#define rcar_has_irq_support(priv) ((priv)->common->base)
76#define rcar_id_to_shift(priv) ((priv)->id * 8) 77#define rcar_id_to_shift(priv) ((priv)->id * 8)
77 78
79#define USE_OF_THERMAL 1
78static const struct of_device_id rcar_thermal_dt_ids[] = { 80static const struct of_device_id rcar_thermal_dt_ids[] = {
79 { .compatible = "renesas,rcar-thermal", }, 81 { .compatible = "renesas,rcar-thermal", },
82 { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
80 {}, 83 {},
81}; 84};
82MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); 85MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@ err_out_unlock:
200 return ret; 203 return ret;
201} 204}
202 205
203static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) 206static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
207 int *temp)
204{ 208{
205 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
206 int tmp; 209 int tmp;
207 int ret; 210 int ret;
208 211
@@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
226 return 0; 229 return 0;
227} 230}
228 231
232static int rcar_thermal_of_get_temp(void *data, int *temp)
233{
234 struct rcar_thermal_priv *priv = data;
235
236 return rcar_thermal_get_current_temp(priv, temp);
237}
238
239static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
240{
241 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
242
243 return rcar_thermal_get_current_temp(priv, temp);
244}
245
229static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, 246static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
230 int trip, enum thermal_trip_type *type) 247 int trip, enum thermal_trip_type *type)
231{ 248{
@@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone,
282 return 0; 299 return 0;
283} 300}
284 301
302static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
303 .get_temp = rcar_thermal_of_get_temp,
304};
305
285static struct thermal_zone_device_ops rcar_thermal_zone_ops = { 306static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
286 .get_temp = rcar_thermal_get_temp, 307 .get_temp = rcar_thermal_get_temp,
287 .get_trip_type = rcar_thermal_get_trip_type, 308 .get_trip_type = rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work)
318 339
319 priv = container_of(work, struct rcar_thermal_priv, work.work); 340 priv = container_of(work, struct rcar_thermal_priv, work.work);
320 341
321 rcar_thermal_get_temp(priv->zone, &cctemp); 342 ret = rcar_thermal_get_current_temp(priv, &cctemp);
343 if (ret < 0)
344 return;
345
322 ret = rcar_thermal_update_temp(priv); 346 ret = rcar_thermal_update_temp(priv);
323 if (ret < 0) 347 if (ret < 0)
324 return; 348 return;
325 349
326 rcar_thermal_irq_enable(priv); 350 rcar_thermal_irq_enable(priv);
327 351
328 rcar_thermal_get_temp(priv->zone, &nctemp); 352 ret = rcar_thermal_get_current_temp(priv, &nctemp);
353 if (ret < 0)
354 return;
355
329 if (nctemp != cctemp) 356 if (nctemp != cctemp)
330 thermal_zone_device_update(priv->zone); 357 thermal_zone_device_update(priv->zone);
331} 358}
@@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
403 struct rcar_thermal_priv *priv; 430 struct rcar_thermal_priv *priv;
404 struct device *dev = &pdev->dev; 431 struct device *dev = &pdev->dev;
405 struct resource *res, *irq; 432 struct resource *res, *irq;
433 const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
434 unsigned long of_data = (unsigned long)of_id->data;
406 int mres = 0; 435 int mres = 0;
407 int i; 436 int i;
408 int ret = -ENODEV; 437 int ret = -ENODEV;
@@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev)
463 if (ret < 0) 492 if (ret < 0)
464 goto error_unregister; 493 goto error_unregister;
465 494
466 priv->zone = thermal_zone_device_register("rcar_thermal", 495 if (of_data == USE_OF_THERMAL)
496 priv->zone = thermal_zone_of_sensor_register(
497 dev, i, priv,
498 &rcar_thermal_zone_of_ops);
499 else
500 priv->zone = thermal_zone_device_register(
501 "rcar_thermal",
467 1, 0, priv, 502 1, 0, priv,
468 &rcar_thermal_zone_ops, NULL, 0, 503 &rcar_thermal_zone_ops, NULL, 0,
469 idle); 504 idle);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd9136662..81b35aace9de 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
54 .get_temp = thermal_get_temp, 54 .get_temp = thermal_get_temp,
55}; 55};
56 56
57#ifdef CONFIG_PM 57static int __maybe_unused spear_thermal_suspend(struct device *dev)
58static int spear_thermal_suspend(struct device *dev)
59{ 58{
60 struct platform_device *pdev = to_platform_device(dev); 59 struct platform_device *pdev = to_platform_device(dev);
61 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 60 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
72 return 0; 71 return 0;
73} 72}
74 73
75static int spear_thermal_resume(struct device *dev) 74static int __maybe_unused spear_thermal_resume(struct device *dev)
76{ 75{
77 struct platform_device *pdev = to_platform_device(dev); 76 struct platform_device *pdev = to_platform_device(dev);
78 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 77 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
94 93
95 return 0; 94 return 0;
96} 95}
97#endif
98 96
99static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, 97static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
100 spear_thermal_resume); 98 spear_thermal_resume);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b3110040164a..2348fa613707 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
681/* this is called once with whichever end is closed last */ 681/* this is called once with whichever end is closed last */
682static void pty_unix98_shutdown(struct tty_struct *tty) 682static void pty_unix98_shutdown(struct tty_struct *tty)
683{ 683{
684 devpts_kill_index(tty->driver_data, tty->index); 684 struct inode *ptmx_inode;
685
686 if (tty->driver->subtype == PTY_TYPE_MASTER)
687 ptmx_inode = tty->driver_data;
688 else
689 ptmx_inode = tty->link->driver_data;
690 devpts_kill_index(ptmx_inode, tty->index);
691 devpts_del_ref(ptmx_inode);
685} 692}
686 693
687static const struct tty_operations ptm_unix98_ops = { 694static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
773 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 780 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
774 tty->driver_data = inode; 781 tty->driver_data = inode;
775 782
783 /*
784 * In the case where all references to ptmx inode are dropped and we
785 * still have /dev/tty opened pointing to the master/slave pair (ptmx
786 * is closed/released before /dev/tty), we must make sure that the inode
787 * is still valid when we call the final pty_unix98_shutdown, thus we
788 * hold an additional reference to the ptmx inode. For the same /dev/tty
789 * last close case, we also need to make sure the super_block isn't
790 * destroyed (devpts instance unmounted), before /dev/tty is closed and
791 * on its release devpts_kill_index is called.
792 */
793 devpts_add_ref(inode);
794
776 tty_add_file(tty, filp); 795 tty_add_file(tty, filp);
777 796
778 slave_inode = devpts_pty_new(inode, 797 slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e71ec78fc11e..7cd6f9a90542 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1941,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1941#define PCIE_VENDOR_ID_WCH 0x1c00 1941#define PCIE_VENDOR_ID_WCH 0x1c00
1942#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1942#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1943#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 1943#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
1944#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
1944 1945
1945#define PCI_VENDOR_ID_PERICOM 0x12D8 1946#define PCI_VENDOR_ID_PERICOM 0x12D8
1946#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 1947#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
@@ -2637,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2637 .subdevice = PCI_ANY_ID, 2638 .subdevice = PCI_ANY_ID,
2638 .setup = pci_wch_ch353_setup, 2639 .setup = pci_wch_ch353_setup,
2639 }, 2640 },
2641 /* WCH CH382 2S card (16850 clone) */
2642 {
2643 .vendor = PCIE_VENDOR_ID_WCH,
2644 .device = PCIE_DEVICE_ID_WCH_CH382_2S,
2645 .subvendor = PCI_ANY_ID,
2646 .subdevice = PCI_ANY_ID,
2647 .setup = pci_wch_ch38x_setup,
2648 },
2640 /* WCH CH382 2S1P card (16850 clone) */ 2649 /* WCH CH382 2S1P card (16850 clone) */
2641 { 2650 {
2642 .vendor = PCIE_VENDOR_ID_WCH, 2651 .vendor = PCIE_VENDOR_ID_WCH,
@@ -2955,6 +2964,7 @@ enum pci_board_num_t {
2955 pbn_fintek_4, 2964 pbn_fintek_4,
2956 pbn_fintek_8, 2965 pbn_fintek_8,
2957 pbn_fintek_12, 2966 pbn_fintek_12,
2967 pbn_wch382_2,
2958 pbn_wch384_4, 2968 pbn_wch384_4,
2959 pbn_pericom_PI7C9X7951, 2969 pbn_pericom_PI7C9X7951,
2960 pbn_pericom_PI7C9X7952, 2970 pbn_pericom_PI7C9X7952,
@@ -3775,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
3775 .base_baud = 115200, 3785 .base_baud = 115200,
3776 .first_offset = 0x40, 3786 .first_offset = 0x40,
3777 }, 3787 },
3788 [pbn_wch382_2] = {
3789 .flags = FL_BASE0,
3790 .num_ports = 2,
3791 .base_baud = 115200,
3792 .uart_offset = 8,
3793 .first_offset = 0xC0,
3794 },
3778 [pbn_wch384_4] = { 3795 [pbn_wch384_4] = {
3779 .flags = FL_BASE0, 3796 .flags = FL_BASE0,
3780 .num_ports = 4, 3797 .num_ports = 4,
@@ -5574,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
5574 PCI_ANY_ID, PCI_ANY_ID, 5591 PCI_ANY_ID, PCI_ANY_ID,
5575 0, 0, pbn_b0_bt_2_115200 }, 5592 0, 0, pbn_b0_bt_2_115200 },
5576 5593
5594 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
5595 PCI_ANY_ID, PCI_ANY_ID,
5596 0, 0, pbn_wch382_2 },
5597
5577 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5598 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5578 PCI_ANY_ID, PCI_ANY_ID, 5599 PCI_ANY_ID, PCI_ANY_ID,
5579 0, 0, pbn_wch384_4 }, 5600 0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b645f9228ed7..fa49eb1e2fa2 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port)
1165 1165
1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1167 1167
1168static void wait_for_xmitr(struct uart_omap_port *up) 1168static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
1169{ 1169{
1170 unsigned int status, tmout = 10000; 1170 unsigned int status, tmout = 10000;
1171 1171
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
1343 1343
1344/* Enable or disable the rs485 support */ 1344/* Enable or disable the rs485 support */
1345static int 1345static int
1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) 1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
1347{ 1347{
1348 struct uart_omap_port *up = to_uart_omap_port(port); 1348 struct uart_omap_port *up = to_uart_omap_port(port);
1349 unsigned int mode; 1349 unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
1356 up->ier = 0; 1356 up->ier = 0;
1357 serial_out(up, UART_IER, 0); 1357 serial_out(up, UART_IER, 0);
1358 1358
1359 /* Clamp the delays to [0, 100ms] */
1360 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
1361 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
1362
1359 /* store new config */ 1363 /* store new config */
1360 port->rs485 = *rs485conf; 1364 port->rs485 = *rs485;
1361 1365
1362 /* 1366 /*
1363 * Just as a precaution, only allow rs485 1367 * Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5cec01c75691..a7eacef1bd22 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2066,13 +2066,12 @@ retry_open:
2066 if (tty) { 2066 if (tty) {
2067 mutex_unlock(&tty_mutex); 2067 mutex_unlock(&tty_mutex);
2068 retval = tty_lock_interruptible(tty); 2068 retval = tty_lock_interruptible(tty);
2069 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2069 if (retval) { 2070 if (retval) {
2070 if (retval == -EINTR) 2071 if (retval == -EINTR)
2071 retval = -ERESTARTSYS; 2072 retval = -ERESTARTSYS;
2072 goto err_unref; 2073 goto err_unref;
2073 } 2074 }
2074 /* safe to drop the kref from tty_driver_lookup_tty() */
2075 tty_kref_put(tty);
2076 retval = tty_reopen(tty); 2075 retval = tty_reopen(tty);
2077 if (retval < 0) { 2076 if (retval < 0) {
2078 tty_unlock(tty); 2077 tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d2f3c4cd697f..dfa9ec03fa8e 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -21,10 +21,15 @@ EXPORT_SYMBOL(tty_lock);
21 21
22int tty_lock_interruptible(struct tty_struct *tty) 22int tty_lock_interruptible(struct tty_struct *tty)
23{ 23{
24 int ret;
25
24 if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) 26 if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
25 return -EIO; 27 return -EIO;
26 tty_kref_get(tty); 28 tty_kref_get(tty);
27 return mutex_lock_interruptible(&tty->legacy_mutex); 29 ret = mutex_lock_interruptible(&tty->legacy_mutex);
30 if (ret)
31 tty_kref_put(tty);
32 return ret;
28} 33}
29 34
30void __lockfunc tty_unlock(struct tty_struct *tty) 35void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
85 85
86 /* register a nop PHY */ 86 /* register a nop PHY */
87 ci->phy = usb_phy_generic_register(); 87 ci->phy = usb_phy_generic_register();
88 if (!ci->phy) 88 if (IS_ERR(ci->phy))
89 return -ENOMEM; 89 return PTR_ERR(ci->phy);
90 90
91 memset(res, 0, sizeof(res)); 91 memset(res, 0, sizeof(res));
92 res[0].start = pci_resource_start(pdev, 0); 92 res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
100 if (sscanf(buf, "%u", &mode) != 1) 100 if (sscanf(buf, "%u", &mode) != 1)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 if (mode > 255)
104 return -EBADRQC;
105
103 pm_runtime_get_sync(ci->dev); 106 pm_runtime_get_sync(ci->dev);
104 spin_lock_irqsave(&ci->lock, flags); 107 spin_lock_irqsave(&ci->lock, flags);
105 ret = hw_port_test_set(ci, mode); 108 ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9af5d8..51b436918f78 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5401 } 5401 }
5402 5402
5403 bos = udev->bos; 5403 bos = udev->bos;
5404 udev->bos = NULL;
5404 5405
5405 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5406 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5406 5407
@@ -5493,11 +5494,8 @@ done:
5493 usb_set_usb2_hardware_lpm(udev, 1); 5494 usb_set_usb2_hardware_lpm(udev, 1);
5494 usb_unlocked_enable_lpm(udev); 5495 usb_unlocked_enable_lpm(udev);
5495 usb_enable_ltm(udev); 5496 usb_enable_ltm(udev);
5496 /* release the new BOS descriptor allocated by hub_port_init() */ 5497 usb_release_bos_descriptor(udev);
5497 if (udev->bos != bos) { 5498 udev->bos = bos;
5498 usb_release_bos_descriptor(udev);
5499 udev->bos = bos;
5500 }
5501 return 0; 5499 return 0;
5502 5500
5503re_enumerate: 5501re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
1config USB_DWC2 1config USB_DWC2
2 tristate "DesignWare USB2 DRD Core Support" 2 tristate "DesignWare USB2 DRD Core Support"
3 depends on HAS_DMA
3 depends on USB || USB_GADGET 4 depends on USB || USB_GADGET
4 help 5 help
5 Say Y here if your system has a Dual Role Hi-Speed USB 6 Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55914db..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
619 __func__, hsotg->dr_mode); 619 __func__, hsotg->dr_mode);
620 break; 620 break;
621 } 621 }
622
623 /*
624 * NOTE: This is required for some rockchip soc based
625 * platforms.
626 */
627 msleep(50);
622} 628}
623 629
624/* 630/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175 halt_status, n_bytes, 1175 halt_status, n_bytes,
1176 xfer_done); 1176 xfer_done);
1177 if (*xfer_done && urb->status != -EINPROGRESS) 1177 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1178 failed = 1;
1179
1180 if (failed) {
1181 dwc2_host_complete(hsotg, qtd, urb->status); 1178 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1179 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1180 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1184 failed, *xfer_done, urb->status); 1181 failed, *xfer_done);
1185 return failed; 1182 return failed;
1186 } 1183 }
1187 1184
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1236 1233
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1234 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1238 int i; 1235 int i;
1236 int qtd_desc_count;
1239 1237
1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1238 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1241 xfer_done = 0; 1239 xfer_done = 0;
1240 qtd_desc_count = qtd->n_desc;
1242 1241
1243 for (i = 0; i < qtd->n_desc; i++) { 1242 for (i = 0; i < qtd_desc_count; i++) {
1244 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1243 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1245 desc_num, halt_status, 1244 desc_num, halt_status,
1246 &xfer_done)) { 1245 &xfer_done))
1247 qtd = NULL; 1246 goto stop_scan;
1248 break; 1247
1249 }
1250 desc_num++; 1248 desc_num++;
1251 } 1249 }
1252 } 1250 }
1253 1251
1252stop_scan:
1254 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1253 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1255 /* 1254 /*
1256 * Resetting the data toggle for bulk and interrupt endpoints 1255 * Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1258 */ 1257 */
1259 if (halt_status == DWC2_HC_XFER_STALL) 1258 if (halt_status == DWC2_HC_XFER_STALL)
1260 qh->data_toggle = DWC2_HC_PID_DATA0; 1259 qh->data_toggle = DWC2_HC_PID_DATA0;
1261 else if (qtd) 1260 else
1262 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1263 } 1262 }
1264 1263
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
526 526
527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
528 if (WARN(!chan || !chan->qh,
529 "chan->qh must be specified for non-control eps\n"))
530 return;
531
528 if (pid == TSIZ_SC_MC_PID_DATA0) 532 if (pid == TSIZ_SC_MC_PID_DATA0)
529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 533 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
530 else 534 else
531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 535 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
532 } else { 536 } else {
537 if (WARN(!qtd,
538 "qtd must be specified for control eps\n"))
539 return;
540
533 if (pid == TSIZ_SC_MC_PID_DATA0) 541 if (pid == TSIZ_SC_MC_PID_DATA0)
534 qtd->data_toggle = DWC2_HC_PID_DATA0; 542 qtd->data_toggle = DWC2_HC_PID_DATA0;
535 else 543 else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
856 unsigned pullups_connected:1; 856 unsigned pullups_connected:1;
857 unsigned resize_fifos:1; 857 unsigned resize_fifos:1;
858 unsigned setup_packet_pending:1; 858 unsigned setup_packet_pending:1;
859 unsigned start_config_issued:1;
860 unsigned three_stage_setup:1; 859 unsigned three_stage_setup:1;
861 unsigned usb3_lpm_capable:1; 860 unsigned usb3_lpm_capable:1;
862 861
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
555 int ret; 555 int ret;
556 u32 reg; 556 u32 reg;
557 557
558 dwc->start_config_issued = false;
559 cfg = le16_to_cpu(ctrl->wValue); 558 cfg = le16_to_cpu(ctrl->wValue);
560 559
561 switch (state) { 560 switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
737 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 736 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
738 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 737 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
739 break; 738 break;
740 case USB_REQ_SET_INTERFACE:
741 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
742 dwc->start_config_issued = false;
743 /* Fall through */
744 default: 739 default:
745 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 740 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
746 ret = dwc3_ep0_delegate_req(dwc, ctrl); 741 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82a95ac..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
385 dep->trb_pool_dma = 0; 385 dep->trb_pool_dma = 0;
386} 386}
387 387
388static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
389
390/**
391 * dwc3_gadget_start_config - Configure EP resources
392 * @dwc: pointer to our controller context structure
393 * @dep: endpoint that is being enabled
394 *
395 * The assignment of transfer resources cannot perfectly follow the
396 * data book due to the fact that the controller driver does not have
397 * all knowledge of the configuration in advance. It is given this
398 * information piecemeal by the composite gadget framework after every
399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
400 * programming model in this scenario can cause errors. For two
401 * reasons:
402 *
403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
405 * multiple interfaces.
406 *
407 * 2) The databook does not mention doing more DEPXFERCFG for new
408 * endpoint on alt setting (8.1.6).
409 *
410 * The following simplified method is used instead:
411 *
412 * All hardware endpoints can be assigned a transfer resource and this
413 * setting will stay persistent until either a core reset or
414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
415 * do DEPXFERCFG for every hardware endpoint as well. We are
416 * guaranteed that there are as many transfer resources as endpoints.
417 *
418 * This function is called for each endpoint when it is being enabled
419 * but is triggered only when called for EP0-out, which always happens
420 * first, and which should only happen in one of the above conditions.
421 */
388static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 422static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
389{ 423{
390 struct dwc3_gadget_ep_cmd_params params; 424 struct dwc3_gadget_ep_cmd_params params;
391 u32 cmd; 425 u32 cmd;
426 int i;
427 int ret;
428
429 if (dep->number)
430 return 0;
392 431
393 memset(&params, 0x00, sizeof(params)); 432 memset(&params, 0x00, sizeof(params));
433 cmd = DWC3_DEPCMD_DEPSTARTCFG;
394 434
395 if (dep->number != 1) { 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
396 cmd = DWC3_DEPCMD_DEPSTARTCFG; 436 if (ret)
397 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 437 return ret;
398 if (dep->number > 1) {
399 if (dwc->start_config_issued)
400 return 0;
401 dwc->start_config_issued = true;
402 cmd |= DWC3_DEPCMD_PARAM(2);
403 }
404 438
405 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params); 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
440 struct dwc3_ep *dep = dwc->eps[i];
441
442 if (!dep)
443 continue;
444
445 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
446 if (ret)
447 return ret;
406 } 448 }
407 449
408 return 0; 450 return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
516 struct dwc3_trb *trb_st_hw; 558 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link; 559 struct dwc3_trb *trb_link;
518 560
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
523 dep->endpoint.desc = desc; 561 dep->endpoint.desc = desc;
524 dep->comp_desc = comp_desc; 562 dep->comp_desc = comp_desc;
525 dep->type = usb_endpoint_type(desc); 563 dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1636 } 1674 }
1637 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1675 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1638 1676
1639 dwc->start_config_issued = false;
1640
1641 /* Start with SuperSpeed Default */ 1677 /* Start with SuperSpeed Default */
1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1678 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1643 1679
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2237 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2273 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2238 2274
2239 dwc3_disconnect_gadget(dwc); 2275 dwc3_disconnect_gadget(dwc);
2240 dwc->start_config_issued = false;
2241 2276
2242 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2277 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2243 dwc->setup_packet_pending = false; 2278 dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2288 2323
2289 dwc3_stop_active_transfers(dwc); 2324 dwc3_stop_active_transfers(dwc);
2290 dwc3_clear_stall_all_ep(dwc); 2325 dwc3_clear_stall_all_ep(dwc);
2291 dwc->start_config_issued = false;
2292 2326
2293 /* Reset device address to zero */ 2327 /* Reset device address to zero */
2294 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2328 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
130 setup_can_stall : 1, 130 setup_can_stall : 1,
131 setup_out_ready : 1, 131 setup_out_ready : 1,
132 setup_out_error : 1, 132 setup_out_error : 1,
133 setup_abort : 1; 133 setup_abort : 1,
134 gadget_registered : 1;
134 unsigned setup_wLength; 135 unsigned setup_wLength;
135 136
136 /* the rest is basically write-once */ 137 /* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
1179 1180
1180 /* closing ep0 === shutdown all */ 1181 /* closing ep0 === shutdown all */
1181 1182
1182 usb_gadget_unregister_driver (&gadgetfs_driver); 1183 if (dev->gadget_registered)
1184 usb_gadget_unregister_driver (&gadgetfs_driver);
1183 1185
1184 /* at this point "good" hardware has disconnected the 1186 /* at this point "good" hardware has disconnected the
1185 * device from USB; the host won't see it any more. 1187 * device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1847 * kick in after the ep0 descriptor is closed. 1849 * kick in after the ep0 descriptor is closed.
1848 */ 1850 */
1849 value = len; 1851 value = len;
1852 dev->gadget_registered = true;
1850 } 1853 }
1851 return value; 1854 return value;
1852 1855
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2340{ 2340{
2341 struct qe_udc *udc; 2341 struct qe_udc *udc;
2342 struct device_node *np = ofdev->dev.of_node; 2342 struct device_node *np = ofdev->dev.of_node;
2343 unsigned int tmp_addr = 0; 2343 unsigned long tmp_addr = 0;
2344 struct usb_device_para __iomem *usbpram; 2344 struct usb_device_para __iomem *usbpram;
2345 unsigned int i; 2345 unsigned int i;
2346 u64 size; 2346 u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, 369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
370 0x50, 0x20, 0x70, 0x40, 0x90 }; 370 0x50, 0x20, 0x70, 0x40, 0x90 };
371 371
372 if (ep->dev->enhanced_mode) 372 if (ep->dev->enhanced_mode) {
373 reg = ep_enhanced[ep->num]; 373 reg = ep_enhanced[ep->num];
374 else{ 374 switch (ep->dev->gadget.speed) {
375 case USB_SPEED_SUPER:
376 reg += 2;
377 break;
378 case USB_SPEED_FULL:
379 reg += 1;
380 break;
381 case USB_SPEED_HIGH:
382 default:
383 break;
384 }
385 } else {
375 reg = (ep->num + 1) * 0x10; 386 reg = (ep->num + 1) * 0x10;
376 if (ep->dev->gadget.speed != USB_SPEED_HIGH) 387 if (ep->dev->gadget.speed != USB_SPEED_HIGH)
377 reg += 1; 388 reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
413 if (!driver->udc_name || strcmp(driver->udc_name, 413 if (!driver->udc_name || strcmp(driver->udc_name,
414 dev_name(&udc->dev)) == 0) { 414 dev_name(&udc->dev)) == 0) {
415 ret = udc_bind_to_driver(udc, driver); 415 ret = udc_bind_to_driver(udc, driver);
416 if (ret != -EPROBE_DEFER)
417 list_del(&driver->pending);
416 if (ret) 418 if (ret)
417 goto err4; 419 goto err4;
418 list_del(&driver->pending);
419 break; 420 break;
420 } 421 }
421 } 422 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
664 } 664 }
665 channel->desired_mode = mode; 665 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 666 musb_writew(epio, MUSB_TXCSR, csr);
667 667
668 return 0; 668 return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
2003 qh->offset, 2003 qh->offset,
2004 urb->transfer_buffer_length); 2004 urb->transfer_buffer_length);
2005 2005
2006 done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, 2006 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
2007 urb, xfer_len, 2007 xfer_len, iso_err))
2008 iso_err);
2009 if (done)
2010 goto finish; 2008 goto finish;
2011 else 2009 else
2012 dev_err(musb->controller, "error: rx_dma failed\n"); 2010 dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
757 otg->host = host; 757 otg->host = host;
758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n"); 758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
759 759
760 /* 760 pm_runtime_get_sync(otg->usb_phy->dev);
761 * Kick the state machine work, if peripheral is not supported 761 schedule_work(&motg->sm_work);
762 * or peripheral is already registered with us.
763 */
764 if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
765 pm_runtime_get_sync(otg->usb_phy->dev);
766 schedule_work(&motg->sm_work);
767 }
768 762
769 return 0; 763 return 0;
770} 764}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
827 dev_dbg(otg->usb_phy->dev, 821 dev_dbg(otg->usb_phy->dev,
828 "peripheral driver registered w/ tranceiver\n"); 822 "peripheral driver registered w/ tranceiver\n");
829 823
830 /* 824 pm_runtime_get_sync(otg->usb_phy->dev);
831 * Kick the state machine work, if host is not supported 825 schedule_work(&motg->sm_work);
832 * or host is already registered with us.
833 */
834 if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
835 pm_runtime_get_sync(otg->usb_phy->dev);
836 schedule_work(&motg->sm_work);
837 }
838 826
839 return 0; 827 return 0;
840} 828}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b8a7f9..7c319e7edda2 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,8 @@ static const struct usb_device_id id_table[] = {
163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
166 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 168 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
167 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 169 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
168 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ 170 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e512e0fc..8849439a8f18 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
315#define TOSHIBA_PRODUCT_G450 0x0d45 315#define TOSHIBA_PRODUCT_G450 0x0d45
316 316
317#define ALINK_VENDOR_ID 0x1e0e 317#define ALINK_VENDOR_ID 0x1e0e
318#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
318#define ALINK_PRODUCT_PH300 0x9100 319#define ALINK_PRODUCT_PH300 0x9100
319#define ALINK_PRODUCT_3GU 0x9200 320#define ALINK_PRODUCT_3GU 0x9200
320 321
@@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
607 .reserved = BIT(3) | BIT(4), 608 .reserved = BIT(3) | BIT(4),
608}; 609};
609 610
611static const struct option_blacklist_info simcom_sim7100e_blacklist = {
612 .reserved = BIT(5) | BIT(6),
613};
614
610static const struct option_blacklist_info telit_le910_blacklist = { 615static const struct option_blacklist_info telit_le910_blacklist = {
611 .sendsetup = BIT(0), 616 .sendsetup = BIT(0),
612 .reserved = BIT(1) | BIT(2), 617 .reserved = BIT(1) | BIT(2),
@@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
1122 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1127 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1123 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1128 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1124 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1129 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1130 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1131 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1125 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1132 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1126 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1133 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1127 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1134 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
@@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
1645 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 1652 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1646 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1653 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1647 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1654 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1655 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1656 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1648 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1657 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1649 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1658 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1650 }, 1659 },
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725c6b5b..6b2a06d09f2b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr)
152 152
153struct da8xx_fb_par { 153struct da8xx_fb_par {
154 struct device *dev; 154 struct device *dev;
155 resource_size_t p_palette_base; 155 dma_addr_t p_palette_base;
156 unsigned char *v_palette_base; 156 unsigned char *v_palette_base;
157 dma_addr_t vram_phys; 157 dma_addr_t vram_phys;
158 unsigned long vram_size; 158 unsigned long vram_size;
@@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device)
1428 1428
1429 par->vram_virt = dma_alloc_coherent(NULL, 1429 par->vram_virt = dma_alloc_coherent(NULL,
1430 par->vram_size, 1430 par->vram_size,
1431 (resource_size_t *) &par->vram_phys, 1431 &par->vram_phys,
1432 GFP_KERNEL | GFP_DMA); 1432 GFP_KERNEL | GFP_DMA);
1433 if (!par->vram_virt) { 1433 if (!par->vram_virt) {
1434 dev_err(&device->dev, 1434 dev_err(&device->dev,
@@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device)
1448 1448
1449 /* allocate palette buffer */ 1449 /* allocate palette buffer */
1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
1451 (resource_size_t *)&par->p_palette_base, 1451 &par->p_palette_base,
1452 GFP_KERNEL | GFP_DMA); 1452 GFP_KERNEL | GFP_DMA);
1453 if (!par->v_palette_base) { 1453 if (!par->v_palette_base) {
1454 dev_err(&device->dev, 1454 dev_err(&device->dev,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f26e39c..de2f3e793786 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
833static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
834{ 833{
835 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 834 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
836 835
@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
843 return 0; 842 return 0;
844} 843}
845 844
846static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) 845static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
847{ 846{
848 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 847 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
849 848
@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
855 854
856 return 0; 855 return 0;
857} 856}
858#else
859#define s6e8ax0_suspend NULL
860#define s6e8ax0_resume NULL
861#endif
862 857
863static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { 858static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
864 .name = "s6e8ax0", 859 .name = "s6e8ax0",
@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
867 .power_on = s6e8ax0_power_on, 862 .power_on = s6e8ax0_power_on,
868 .set_sequence = s6e8ax0_set_sequence, 863 .set_sequence = s6e8ax0_set_sequence,
869 .probe = s6e8ax0_probe, 864 .probe = s6e8ax0_probe,
870 .suspend = s6e8ax0_suspend, 865 .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
871 .resume = s6e8ax0_resume, 866 .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
872}; 867};
873 868
874static int s6e8ax0_init(void) 869static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index cee88603efc9..bb2f1e866020 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev)
902 goto failed_getclock; 902 goto failed_getclock;
903 } 903 }
904 904
905 /*
906 * The LCDC controller does not have an enable bit. The
907 * controller starts directly when the clocks are enabled.
908 * If the clocks are enabled when the controller is not yet
909 * programmed with proper register values (enabled at the
910 * bootloader, for example) then it just goes into some undefined
911 * state.
912 * To avoid this issue, let's enable and disable LCDC IPG clock
913 * so that we force some kind of 'reset' to the LCDC block.
914 */
915 ret = clk_prepare_enable(fbi->clk_ipg);
916 if (ret)
917 goto failed_getclock;
918 clk_disable_unprepare(fbi->clk_ipg);
919
905 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 920 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
906 if (IS_ERR(fbi->clk_ahb)) { 921 if (IS_ERR(fbi->clk_ahb)) {
907 ret = PTR_ERR(fbi->clk_ahb); 922 ret = PTR_ERR(fbi->clk_ahb);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index de54a4748065..b6f83d5df9fd 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev)
503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, 503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
504 res->start, resource_size(res)); 504 res->start, resource_size(res));
505 if (ctrl->reg_base == NULL) { 505 if (ctrl->reg_base == NULL) {
506 dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__, 506 dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
507 res->start, res->end);
508 ret = -ENOMEM; 507 ret = -ENOMEM;
509 goto failed; 508 goto failed;
510 } 509 }
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index c9293aea8ec3..a970edc2a6f8 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev)
123 123
124 /* Horizontal timings */ 124 /* Horizontal timings */
125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | 125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
126 (var->right_margin - 1) << 16 | (var->xres - 1)); 126 (var->left_margin - 1) << 16 | (var->xres - 1));
127 127
128 /* Vertical timings */ 128 /* Vertical timings */
129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | 129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
130 (var->lower_margin - 1) << 16 | (var->yres - 1)); 130 (var->upper_margin - 1) << 16 | (var->yres - 1));
131 131
132 /* Total length of frame */ 132 /* Total length of frame */
133 hlen = var->left_margin + var->right_margin + var->hsync_len + 133 hlen = var->left_margin + var->right_margin + var->hsync_len +
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
227 /* 227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside. 229 * to access the BARs where the MSI-X entries reside.
230 * But VF devices are unique in which the PF needs to be checked.
230 */ 231 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd); 232 pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 233 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO; 234 return -ENXIO;
234 235
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
332 struct xen_pcibk_dev_data *dev_data = NULL; 333 struct xen_pcibk_dev_data *dev_data = NULL;
333 struct xen_pci_op *op = &pdev->op; 334 struct xen_pci_op *op = &pdev->op;
334 int test_intx = 0; 335 int test_intx = 0;
336#ifdef CONFIG_PCI_MSI
337 unsigned int nr = 0;
338#endif
335 339
336 *op = pdev->sh_info->op; 340 *op = pdev->sh_info->op;
337 barrier(); 341 barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
360 op->err = xen_pcibk_disable_msi(pdev, dev, op); 364 op->err = xen_pcibk_disable_msi(pdev, dev, op);
361 break; 365 break;
362 case XEN_PCI_OP_enable_msix: 366 case XEN_PCI_OP_enable_msix:
367 nr = op->value;
363 op->err = xen_pcibk_enable_msix(pdev, dev, op); 368 op->err = xen_pcibk_enable_msix(pdev, dev, op);
364 break; 369 break;
365 case XEN_PCI_OP_disable_msix: 370 case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 387 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i; 388 unsigned int i;
384 389
385 for (i = 0; i < op->value; i++) 390 for (i = 0; i < nr; i++)
386 pdev->sh_info->op.msix_entries[i].vector = 391 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector; 392 op->msix_entries[i].vector;
388 } 393 }
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
849} 849}
850 850
851/* 851/*
852 Check for a translation entry being present
853*/
854static struct v2p_entry *scsiback_chk_translation_entry(
855 struct vscsibk_info *info, struct ids_tuple *v)
856{
857 struct list_head *head = &(info->v2p_entry_lists);
858 struct v2p_entry *entry;
859
860 list_for_each_entry(entry, head, l)
861 if ((entry->v.chn == v->chn) &&
862 (entry->v.tgt == v->tgt) &&
863 (entry->v.lun == v->lun))
864 return entry;
865
866 return NULL;
867}
868
869/*
852 Add a new translation entry 870 Add a new translation entry
853*/ 871*/
854static int scsiback_add_translation_entry(struct vscsibk_info *info, 872static int scsiback_add_translation_entry(struct vscsibk_info *info,
855 char *phy, struct ids_tuple *v) 873 char *phy, struct ids_tuple *v)
856{ 874{
857 int err = 0; 875 int err = 0;
858 struct v2p_entry *entry;
859 struct v2p_entry *new; 876 struct v2p_entry *new;
860 struct list_head *head = &(info->v2p_entry_lists);
861 unsigned long flags; 877 unsigned long flags;
862 char *lunp; 878 char *lunp;
863 unsigned long long unpacked_lun; 879 unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
917 spin_lock_irqsave(&info->v2p_lock, flags); 933 spin_lock_irqsave(&info->v2p_lock, flags);
918 934
919 /* Check double assignment to identical virtual ID */ 935 /* Check double assignment to identical virtual ID */
920 list_for_each_entry(entry, head, l) { 936 if (scsiback_chk_translation_entry(info, v)) {
921 if ((entry->v.chn == v->chn) && 937 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
922 (entry->v.tgt == v->tgt) && 938 err = -EEXIST;
923 (entry->v.lun == v->lun)) { 939 goto out;
924 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
925 err = -EEXIST;
926 goto out;
927 }
928
929 } 940 }
930 941
931 /* Create a new translation entry and add to the list */ 942 /* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
933 new->v = *v; 944 new->v = *v;
934 new->tpg = tpg; 945 new->tpg = tpg;
935 new->lun = unpacked_lun; 946 new->lun = unpacked_lun;
936 list_add_tail(&new->l, head); 947 list_add_tail(&new->l, &info->v2p_entry_lists);
937 948
938out: 949out:
939 spin_unlock_irqrestore(&info->v2p_lock, flags); 950 spin_unlock_irqrestore(&info->v2p_lock, flags);
940 951
941out_free: 952out_free:
942 mutex_lock(&tpg->tv_tpg_mutex); 953 if (err) {
943 tpg->tv_tpg_fe_count--; 954 mutex_lock(&tpg->tv_tpg_mutex);
944 mutex_unlock(&tpg->tv_tpg_mutex); 955 tpg->tv_tpg_fe_count--;
945 956 mutex_unlock(&tpg->tv_tpg_mutex);
946 if (err)
947 kfree(new); 957 kfree(new);
958 }
948 959
949 return err; 960 return err;
950} 961}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
956} 967}
957 968
958/* 969/*
959 Delete the translation entry specfied 970 Delete the translation entry specified
960*/ 971*/
961static int scsiback_del_translation_entry(struct vscsibk_info *info, 972static int scsiback_del_translation_entry(struct vscsibk_info *info,
962 struct ids_tuple *v) 973 struct ids_tuple *v)
963{ 974{
964 struct v2p_entry *entry; 975 struct v2p_entry *entry;
965 struct list_head *head = &(info->v2p_entry_lists);
966 unsigned long flags; 976 unsigned long flags;
977 int ret = 0;
967 978
968 spin_lock_irqsave(&info->v2p_lock, flags); 979 spin_lock_irqsave(&info->v2p_lock, flags);
969 /* Find out the translation entry specified */ 980 /* Find out the translation entry specified */
970 list_for_each_entry(entry, head, l) { 981 entry = scsiback_chk_translation_entry(info, v);
971 if ((entry->v.chn == v->chn) && 982 if (entry)
972 (entry->v.tgt == v->tgt) && 983 __scsiback_del_translation_entry(entry);
973 (entry->v.lun == v->lun)) { 984 else
974 goto found; 985 ret = -ENOENT;
975 }
976 }
977
978 spin_unlock_irqrestore(&info->v2p_lock, flags);
979 return 1;
980
981found:
982 /* Delete the translation entry specfied */
983 __scsiback_del_translation_entry(entry);
984 986
985 spin_unlock_irqrestore(&info->v2p_lock, flags); 987 spin_unlock_irqrestore(&info->v2p_lock, flags);
986 return 0; 988 return ret;
987} 989}
988 990
989static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 991static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
990 char *phy, struct ids_tuple *vir, int try) 992 char *phy, struct ids_tuple *vir, int try)
991{ 993{
994 struct v2p_entry *entry;
995 unsigned long flags;
996
997 if (try) {
998 spin_lock_irqsave(&info->v2p_lock, flags);
999 entry = scsiback_chk_translation_entry(info, vir);
1000 spin_unlock_irqrestore(&info->v2p_lock, flags);
1001 if (entry)
1002 return;
1003 }
992 if (!scsiback_add_translation_entry(info, phy, vir)) { 1004 if (!scsiback_add_translation_entry(info, phy, vir)) {
993 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1005 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
994 "%d", XenbusStateInitialised)) { 1006 "%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
188 188
189 if (len == 0) 189 if (len == 0)
190 return 0; 190 return 0;
191 if (len > XENSTORE_PAYLOAD_MAX)
192 return -EINVAL;
191 193
192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 194 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
193 if (rb == NULL) 195 if (rb == NULL)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0548c53f41d5..22fc7c802d69 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -511,8 +511,6 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 511 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
512 page->index, to); 512 page->index, to);
513 BUG_ON(to > PAGE_CACHE_SIZE); 513 BUG_ON(to > PAGE_CACHE_SIZE);
514 kmap(page);
515 data = page_address(page);
516 bsize = AFFS_SB(sb)->s_data_blksize; 514 bsize = AFFS_SB(sb)->s_data_blksize;
517 tmp = page->index << PAGE_CACHE_SHIFT; 515 tmp = page->index << PAGE_CACHE_SHIFT;
518 bidx = tmp / bsize; 516 bidx = tmp / bsize;
@@ -524,14 +522,15 @@ affs_do_readpage_ofs(struct page *page, unsigned to)
524 return PTR_ERR(bh); 522 return PTR_ERR(bh);
525 tmp = min(bsize - boff, to - pos); 523 tmp = min(bsize - boff, to - pos);
526 BUG_ON(pos + tmp > to || tmp > bsize); 524 BUG_ON(pos + tmp > to || tmp > bsize);
525 data = kmap_atomic(page);
527 memcpy(data + pos, AFFS_DATA(bh) + boff, tmp); 526 memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
527 kunmap_atomic(data);
528 affs_brelse(bh); 528 affs_brelse(bh);
529 bidx++; 529 bidx++;
530 pos += tmp; 530 pos += tmp;
531 boff = 0; 531 boff = 0;
532 } 532 }
533 flush_dcache_page(page); 533 flush_dcache_page(page);
534 kunmap(page);
535 return 0; 534 return 0;
536} 535}
537 536
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 051ea4809c14..7d914c67a9d0 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -653,7 +653,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
653 653
654 if ((current->flags & PF_RANDOMIZE) && 654 if ((current->flags & PF_RANDOMIZE) &&
655 !(current->personality & ADDR_NO_RANDOMIZE)) { 655 !(current->personality & ADDR_NO_RANDOMIZE)) {
656 random_variable = (unsigned long) get_random_int(); 656 random_variable = get_random_long();
657 random_variable &= STACK_RND_MASK; 657 random_variable &= STACK_RND_MASK;
658 random_variable <<= PAGE_SHIFT; 658 random_variable <<= PAGE_SHIFT;
659 } 659 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 39b3a174a425..826b164a4b5b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1201,7 +1201,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1201 bdev->bd_disk = disk; 1201 bdev->bd_disk = disk;
1202 bdev->bd_queue = disk->queue; 1202 bdev->bd_queue = disk->queue;
1203 bdev->bd_contains = bdev; 1203 bdev->bd_contains = bdev;
1204 bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0; 1204 if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
1205 bdev->bd_inode->i_flags = S_DAX;
1206 else
1207 bdev->bd_inode->i_flags = 0;
1208
1205 if (!partno) { 1209 if (!partno) {
1206 ret = -ENXIO; 1210 ret = -ENXIO;
1207 bdev->bd_part = disk_get_part(disk, partno); 1211 bdev->bd_part = disk_get_part(disk, partno);
@@ -1693,13 +1697,24 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
1693 return try_to_free_buffers(page); 1697 return try_to_free_buffers(page);
1694} 1698}
1695 1699
1700static int blkdev_writepages(struct address_space *mapping,
1701 struct writeback_control *wbc)
1702{
1703 if (dax_mapping(mapping)) {
1704 struct block_device *bdev = I_BDEV(mapping->host);
1705
1706 return dax_writeback_mapping_range(mapping, bdev, wbc);
1707 }
1708 return generic_writepages(mapping, wbc);
1709}
1710
1696static const struct address_space_operations def_blk_aops = { 1711static const struct address_space_operations def_blk_aops = {
1697 .readpage = blkdev_readpage, 1712 .readpage = blkdev_readpage,
1698 .readpages = blkdev_readpages, 1713 .readpages = blkdev_readpages,
1699 .writepage = blkdev_writepage, 1714 .writepage = blkdev_writepage,
1700 .write_begin = blkdev_write_begin, 1715 .write_begin = blkdev_write_begin,
1701 .write_end = blkdev_write_end, 1716 .write_end = blkdev_write_end,
1702 .writepages = generic_writepages, 1717 .writepages = blkdev_writepages,
1703 .releasepage = blkdev_releasepage, 1718 .releasepage = blkdev_releasepage,
1704 .direct_IO = blkdev_direct_IO, 1719 .direct_IO = blkdev_direct_IO,
1705 .is_dirty_writeback = buffer_check_dirty_writeback, 1720 .is_dirty_writeback = buffer_check_dirty_writeback,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b90cd3776f8e..f6dac40f87ff 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1406,7 +1406,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1406 read_extent_buffer(eb, dest + bytes_left, 1406 read_extent_buffer(eb, dest + bytes_left,
1407 name_off, name_len); 1407 name_off, name_len);
1408 if (eb != eb_in) { 1408 if (eb != eb_in) {
1409 btrfs_tree_read_unlock_blocking(eb); 1409 if (!path->skip_locking)
1410 btrfs_tree_read_unlock_blocking(eb);
1410 free_extent_buffer(eb); 1411 free_extent_buffer(eb);
1411 } 1412 }
1412 ret = btrfs_find_item(fs_root, path, parent, 0, 1413 ret = btrfs_find_item(fs_root, path, parent, 0,
@@ -1426,9 +1427,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1426 eb = path->nodes[0]; 1427 eb = path->nodes[0];
1427 /* make sure we can use eb after releasing the path */ 1428 /* make sure we can use eb after releasing the path */
1428 if (eb != eb_in) { 1429 if (eb != eb_in) {
1429 atomic_inc(&eb->refs); 1430 if (!path->skip_locking)
1430 btrfs_tree_read_lock(eb); 1431 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1431 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); 1432 path->nodes[0] = NULL;
1433 path->locks[0] = 0;
1432 } 1434 }
1433 btrfs_release_path(path); 1435 btrfs_release_path(path);
1434 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); 1436 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c473c42d7d6c..3346cd8f9910 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -637,11 +637,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
637 faili = nr_pages - 1; 637 faili = nr_pages - 1;
638 cb->nr_pages = nr_pages; 638 cb->nr_pages = nr_pages;
639 639
640 /* In the parent-locked case, we only locked the range we are 640 add_ra_bio_pages(inode, em_start + em_len, cb);
641 * interested in. In all other cases, we can opportunistically
642 * cache decompressed data that goes beyond the requested range. */
643 if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
644 add_ra_bio_pages(inode, em_start + em_len, cb);
645 641
646 /* include any pages we added in add_ra-bio_pages */ 642 /* include any pages we added in add_ra-bio_pages */
647 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; 643 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 0be47e4b8136..b57daa895cea 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1689,7 +1689,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
1689 * 1689 *
1690 */ 1690 */
1691int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 1691int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1692 struct list_head *ins_list) 1692 struct list_head *ins_list, bool *emitted)
1693{ 1693{
1694 struct btrfs_dir_item *di; 1694 struct btrfs_dir_item *di;
1695 struct btrfs_delayed_item *curr, *next; 1695 struct btrfs_delayed_item *curr, *next;
@@ -1733,6 +1733,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1733 1733
1734 if (over) 1734 if (over)
1735 return 1; 1735 return 1;
1736 *emitted = true;
1736 } 1737 }
1737 return 0; 1738 return 0;
1738} 1739}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f70119f25421..0167853c84ae 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
144int btrfs_should_delete_dir_index(struct list_head *del_list, 144int btrfs_should_delete_dir_index(struct list_head *del_list,
145 u64 index); 145 u64 index);
146int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, 146int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
147 struct list_head *ins_list); 147 struct list_head *ins_list, bool *emitted);
148 148
149/* for init */ 149/* for init */
150int __init btrfs_delayed_inode_init(void); 150int __init btrfs_delayed_inode_init(void);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2e7c97a3f344..392592dc7010 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2897,12 +2897,11 @@ static int __do_readpage(struct extent_io_tree *tree,
2897 struct block_device *bdev; 2897 struct block_device *bdev;
2898 int ret; 2898 int ret;
2899 int nr = 0; 2899 int nr = 0;
2900 int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
2901 size_t pg_offset = 0; 2900 size_t pg_offset = 0;
2902 size_t iosize; 2901 size_t iosize;
2903 size_t disk_io_size; 2902 size_t disk_io_size;
2904 size_t blocksize = inode->i_sb->s_blocksize; 2903 size_t blocksize = inode->i_sb->s_blocksize;
2905 unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED; 2904 unsigned long this_bio_flag = 0;
2906 2905
2907 set_page_extent_mapped(page); 2906 set_page_extent_mapped(page);
2908 2907
@@ -2942,18 +2941,16 @@ static int __do_readpage(struct extent_io_tree *tree,
2942 kunmap_atomic(userpage); 2941 kunmap_atomic(userpage);
2943 set_extent_uptodate(tree, cur, cur + iosize - 1, 2942 set_extent_uptodate(tree, cur, cur + iosize - 1,
2944 &cached, GFP_NOFS); 2943 &cached, GFP_NOFS);
2945 if (!parent_locked) 2944 unlock_extent_cached(tree, cur,
2946 unlock_extent_cached(tree, cur, 2945 cur + iosize - 1,
2947 cur + iosize - 1, 2946 &cached, GFP_NOFS);
2948 &cached, GFP_NOFS);
2949 break; 2947 break;
2950 } 2948 }
2951 em = __get_extent_map(inode, page, pg_offset, cur, 2949 em = __get_extent_map(inode, page, pg_offset, cur,
2952 end - cur + 1, get_extent, em_cached); 2950 end - cur + 1, get_extent, em_cached);
2953 if (IS_ERR_OR_NULL(em)) { 2951 if (IS_ERR_OR_NULL(em)) {
2954 SetPageError(page); 2952 SetPageError(page);
2955 if (!parent_locked) 2953 unlock_extent(tree, cur, end);
2956 unlock_extent(tree, cur, end);
2957 break; 2954 break;
2958 } 2955 }
2959 extent_offset = cur - em->start; 2956 extent_offset = cur - em->start;
@@ -3038,12 +3035,9 @@ static int __do_readpage(struct extent_io_tree *tree,
3038 3035
3039 set_extent_uptodate(tree, cur, cur + iosize - 1, 3036 set_extent_uptodate(tree, cur, cur + iosize - 1,
3040 &cached, GFP_NOFS); 3037 &cached, GFP_NOFS);
3041 if (parent_locked) 3038 unlock_extent_cached(tree, cur,
3042 free_extent_state(cached); 3039 cur + iosize - 1,
3043 else 3040 &cached, GFP_NOFS);
3044 unlock_extent_cached(tree, cur,
3045 cur + iosize - 1,
3046 &cached, GFP_NOFS);
3047 cur = cur + iosize; 3041 cur = cur + iosize;
3048 pg_offset += iosize; 3042 pg_offset += iosize;
3049 continue; 3043 continue;
@@ -3052,8 +3046,7 @@ static int __do_readpage(struct extent_io_tree *tree,
3052 if (test_range_bit(tree, cur, cur_end, 3046 if (test_range_bit(tree, cur, cur_end,
3053 EXTENT_UPTODATE, 1, NULL)) { 3047 EXTENT_UPTODATE, 1, NULL)) {
3054 check_page_uptodate(tree, page); 3048 check_page_uptodate(tree, page);
3055 if (!parent_locked) 3049 unlock_extent(tree, cur, cur + iosize - 1);
3056 unlock_extent(tree, cur, cur + iosize - 1);
3057 cur = cur + iosize; 3050 cur = cur + iosize;
3058 pg_offset += iosize; 3051 pg_offset += iosize;
3059 continue; 3052 continue;
@@ -3063,8 +3056,7 @@ static int __do_readpage(struct extent_io_tree *tree,
3063 */ 3056 */
3064 if (block_start == EXTENT_MAP_INLINE) { 3057 if (block_start == EXTENT_MAP_INLINE) {
3065 SetPageError(page); 3058 SetPageError(page);
3066 if (!parent_locked) 3059 unlock_extent(tree, cur, cur + iosize - 1);
3067 unlock_extent(tree, cur, cur + iosize - 1);
3068 cur = cur + iosize; 3060 cur = cur + iosize;
3069 pg_offset += iosize; 3061 pg_offset += iosize;
3070 continue; 3062 continue;
@@ -3083,8 +3075,7 @@ static int __do_readpage(struct extent_io_tree *tree,
3083 *bio_flags = this_bio_flag; 3075 *bio_flags = this_bio_flag;
3084 } else { 3076 } else {
3085 SetPageError(page); 3077 SetPageError(page);
3086 if (!parent_locked) 3078 unlock_extent(tree, cur, cur + iosize - 1);
3087 unlock_extent(tree, cur, cur + iosize - 1);
3088 } 3079 }
3089 cur = cur + iosize; 3080 cur = cur + iosize;
3090 pg_offset += iosize; 3081 pg_offset += iosize;
@@ -3213,20 +3204,6 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
3213 return ret; 3204 return ret;
3214} 3205}
3215 3206
3216int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3217 get_extent_t *get_extent, int mirror_num)
3218{
3219 struct bio *bio = NULL;
3220 unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
3221 int ret;
3222
3223 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3224 &bio_flags, READ, NULL);
3225 if (bio)
3226 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3227 return ret;
3228}
3229
3230static noinline void update_nr_written(struct page *page, 3207static noinline void update_nr_written(struct page *page,
3231 struct writeback_control *wbc, 3208 struct writeback_control *wbc,
3232 unsigned long nr_written) 3209 unsigned long nr_written)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0377413bd4b9..880d5292e972 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -29,7 +29,6 @@
29 */ 29 */
30#define EXTENT_BIO_COMPRESSED 1 30#define EXTENT_BIO_COMPRESSED 1
31#define EXTENT_BIO_TREE_LOG 2 31#define EXTENT_BIO_TREE_LOG 2
32#define EXTENT_BIO_PARENT_LOCKED 4
33#define EXTENT_BIO_FLAG_SHIFT 16 32#define EXTENT_BIO_FLAG_SHIFT 16
34 33
35/* these are bit numbers for test/set bit */ 34/* these are bit numbers for test/set bit */
@@ -210,8 +209,6 @@ static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
210int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); 209int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
211int extent_read_full_page(struct extent_io_tree *tree, struct page *page, 210int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
212 get_extent_t *get_extent, int mirror_num); 211 get_extent_t *get_extent, int mirror_num);
213int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
214 get_extent_t *get_extent, int mirror_num);
215int __init extent_io_init(void); 212int __init extent_io_init(void);
216void extent_io_exit(void); 213void extent_io_exit(void);
217 214
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5f06eb1f4384..d96f5cf38a2d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5717,6 +5717,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5717 char *name_ptr; 5717 char *name_ptr;
5718 int name_len; 5718 int name_len;
5719 int is_curr = 0; /* ctx->pos points to the current index? */ 5719 int is_curr = 0; /* ctx->pos points to the current index? */
5720 bool emitted;
5720 5721
5721 /* FIXME, use a real flag for deciding about the key type */ 5722 /* FIXME, use a real flag for deciding about the key type */
5722 if (root->fs_info->tree_root == root) 5723 if (root->fs_info->tree_root == root)
@@ -5745,6 +5746,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5745 if (ret < 0) 5746 if (ret < 0)
5746 goto err; 5747 goto err;
5747 5748
5749 emitted = false;
5748 while (1) { 5750 while (1) {
5749 leaf = path->nodes[0]; 5751 leaf = path->nodes[0];
5750 slot = path->slots[0]; 5752 slot = path->slots[0];
@@ -5824,6 +5826,7 @@ skip:
5824 5826
5825 if (over) 5827 if (over)
5826 goto nopos; 5828 goto nopos;
5829 emitted = true;
5827 di_len = btrfs_dir_name_len(leaf, di) + 5830 di_len = btrfs_dir_name_len(leaf, di) +
5828 btrfs_dir_data_len(leaf, di) + sizeof(*di); 5831 btrfs_dir_data_len(leaf, di) + sizeof(*di);
5829 di_cur += di_len; 5832 di_cur += di_len;
@@ -5836,11 +5839,20 @@ next:
5836 if (key_type == BTRFS_DIR_INDEX_KEY) { 5839 if (key_type == BTRFS_DIR_INDEX_KEY) {
5837 if (is_curr) 5840 if (is_curr)
5838 ctx->pos++; 5841 ctx->pos++;
5839 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); 5842 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
5840 if (ret) 5843 if (ret)
5841 goto nopos; 5844 goto nopos;
5842 } 5845 }
5843 5846
5847 /*
5848 * If we haven't emitted any dir entry, we must not touch ctx->pos as
5849 * it was was set to the termination value in previous call. We assume
5850 * that "." and ".." were emitted if we reach this point and set the
5851 * termination value as well for an empty directory.
5852 */
5853 if (ctx->pos > 2 && !emitted)
5854 goto nopos;
5855
5844 /* Reached end of directory/root. Bump pos past the last item. */ 5856 /* Reached end of directory/root. Bump pos past the last item. */
5845 ctx->pos++; 5857 ctx->pos++;
5846 5858
@@ -7974,6 +7986,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
7974 7986
7975 kfree(dip); 7987 kfree(dip);
7976 7988
7989 dio_bio->bi_error = bio->bi_error;
7977 dio_end_io(dio_bio, bio->bi_error); 7990 dio_end_io(dio_bio, bio->bi_error);
7978 7991
7979 if (io_bio->end_io) 7992 if (io_bio->end_io)
@@ -8028,6 +8041,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
8028 8041
8029 kfree(dip); 8042 kfree(dip);
8030 8043
8044 dio_bio->bi_error = bio->bi_error;
8031 dio_end_io(dio_bio, bio->bi_error); 8045 dio_end_io(dio_bio, bio->bi_error);
8032 bio_put(bio); 8046 bio_put(bio);
8033} 8047}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 952172ca7e45..48aee9846329 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2794,24 +2794,29 @@ out:
2794static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) 2794static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2795{ 2795{
2796 struct page *page; 2796 struct page *page;
2797 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2798 2797
2799 page = grab_cache_page(inode->i_mapping, index); 2798 page = grab_cache_page(inode->i_mapping, index);
2800 if (!page) 2799 if (!page)
2801 return NULL; 2800 return ERR_PTR(-ENOMEM);
2802 2801
2803 if (!PageUptodate(page)) { 2802 if (!PageUptodate(page)) {
2804 if (extent_read_full_page_nolock(tree, page, btrfs_get_extent, 2803 int ret;
2805 0)) 2804
2806 return NULL; 2805 ret = btrfs_readpage(NULL, page);
2806 if (ret)
2807 return ERR_PTR(ret);
2807 lock_page(page); 2808 lock_page(page);
2808 if (!PageUptodate(page)) { 2809 if (!PageUptodate(page)) {
2809 unlock_page(page); 2810 unlock_page(page);
2810 page_cache_release(page); 2811 page_cache_release(page);
2811 return NULL; 2812 return ERR_PTR(-EIO);
2813 }
2814 if (page->mapping != inode->i_mapping) {
2815 unlock_page(page);
2816 page_cache_release(page);
2817 return ERR_PTR(-EAGAIN);
2812 } 2818 }
2813 } 2819 }
2814 unlock_page(page);
2815 2820
2816 return page; 2821 return page;
2817} 2822}
@@ -2823,17 +2828,31 @@ static int gather_extent_pages(struct inode *inode, struct page **pages,
2823 pgoff_t index = off >> PAGE_CACHE_SHIFT; 2828 pgoff_t index = off >> PAGE_CACHE_SHIFT;
2824 2829
2825 for (i = 0; i < num_pages; i++) { 2830 for (i = 0; i < num_pages; i++) {
2831again:
2826 pages[i] = extent_same_get_page(inode, index + i); 2832 pages[i] = extent_same_get_page(inode, index + i);
2827 if (!pages[i]) 2833 if (IS_ERR(pages[i])) {
2828 return -ENOMEM; 2834 int err = PTR_ERR(pages[i]);
2835
2836 if (err == -EAGAIN)
2837 goto again;
2838 pages[i] = NULL;
2839 return err;
2840 }
2829 } 2841 }
2830 return 0; 2842 return 0;
2831} 2843}
2832 2844
2833static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) 2845static int lock_extent_range(struct inode *inode, u64 off, u64 len,
2846 bool retry_range_locking)
2834{ 2847{
2835 /* do any pending delalloc/csum calc on src, one way or 2848 /*
2836 another, and lock file content */ 2849 * Do any pending delalloc/csum calculations on inode, one way or
2850 * another, and lock file content.
2851 * The locking order is:
2852 *
2853 * 1) pages
2854 * 2) range in the inode's io tree
2855 */
2837 while (1) { 2856 while (1) {
2838 struct btrfs_ordered_extent *ordered; 2857 struct btrfs_ordered_extent *ordered;
2839 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); 2858 lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
@@ -2851,8 +2870,11 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2851 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); 2870 unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
2852 if (ordered) 2871 if (ordered)
2853 btrfs_put_ordered_extent(ordered); 2872 btrfs_put_ordered_extent(ordered);
2873 if (!retry_range_locking)
2874 return -EAGAIN;
2854 btrfs_wait_ordered_range(inode, off, len); 2875 btrfs_wait_ordered_range(inode, off, len);
2855 } 2876 }
2877 return 0;
2856} 2878}
2857 2879
2858static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2) 2880static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
@@ -2877,15 +2899,24 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2877 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 2899 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2878} 2900}
2879 2901
2880static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 2902static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2881 struct inode *inode2, u64 loff2, u64 len) 2903 struct inode *inode2, u64 loff2, u64 len,
2904 bool retry_range_locking)
2882{ 2905{
2906 int ret;
2907
2883 if (inode1 < inode2) { 2908 if (inode1 < inode2) {
2884 swap(inode1, inode2); 2909 swap(inode1, inode2);
2885 swap(loff1, loff2); 2910 swap(loff1, loff2);
2886 } 2911 }
2887 lock_extent_range(inode1, loff1, len); 2912 ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
2888 lock_extent_range(inode2, loff2, len); 2913 if (ret)
2914 return ret;
2915 ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
2916 if (ret)
2917 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
2918 loff1 + len - 1);
2919 return ret;
2889} 2920}
2890 2921
2891struct cmp_pages { 2922struct cmp_pages {
@@ -2901,11 +2932,15 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2901 2932
2902 for (i = 0; i < cmp->num_pages; i++) { 2933 for (i = 0; i < cmp->num_pages; i++) {
2903 pg = cmp->src_pages[i]; 2934 pg = cmp->src_pages[i];
2904 if (pg) 2935 if (pg) {
2936 unlock_page(pg);
2905 page_cache_release(pg); 2937 page_cache_release(pg);
2938 }
2906 pg = cmp->dst_pages[i]; 2939 pg = cmp->dst_pages[i];
2907 if (pg) 2940 if (pg) {
2941 unlock_page(pg);
2908 page_cache_release(pg); 2942 page_cache_release(pg);
2943 }
2909 } 2944 }
2910 kfree(cmp->src_pages); 2945 kfree(cmp->src_pages);
2911 kfree(cmp->dst_pages); 2946 kfree(cmp->dst_pages);
@@ -2966,6 +3001,8 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2966 3001
2967 src_page = cmp->src_pages[i]; 3002 src_page = cmp->src_pages[i];
2968 dst_page = cmp->dst_pages[i]; 3003 dst_page = cmp->dst_pages[i];
3004 ASSERT(PageLocked(src_page));
3005 ASSERT(PageLocked(dst_page));
2969 3006
2970 addr = kmap_atomic(src_page); 3007 addr = kmap_atomic(src_page);
2971 dst_addr = kmap_atomic(dst_page); 3008 dst_addr = kmap_atomic(dst_page);
@@ -3078,14 +3115,46 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
3078 goto out_unlock; 3115 goto out_unlock;
3079 } 3116 }
3080 3117
3118again:
3081 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp); 3119 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3082 if (ret) 3120 if (ret)
3083 goto out_unlock; 3121 goto out_unlock;
3084 3122
3085 if (same_inode) 3123 if (same_inode)
3086 lock_extent_range(src, same_lock_start, same_lock_len); 3124 ret = lock_extent_range(src, same_lock_start, same_lock_len,
3125 false);
3087 else 3126 else
3088 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 3127 ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
3128 false);
3129 /*
3130 * If one of the inodes has dirty pages in the respective range or
3131 * ordered extents, we need to flush dellaloc and wait for all ordered
3132 * extents in the range. We must unlock the pages and the ranges in the
3133 * io trees to avoid deadlocks when flushing delalloc (requires locking
3134 * pages) and when waiting for ordered extents to complete (they require
3135 * range locking).
3136 */
3137 if (ret == -EAGAIN) {
3138 /*
3139 * Ranges in the io trees already unlocked. Now unlock all
3140 * pages before waiting for all IO to complete.
3141 */
3142 btrfs_cmp_data_free(&cmp);
3143 if (same_inode) {
3144 btrfs_wait_ordered_range(src, same_lock_start,
3145 same_lock_len);
3146 } else {
3147 btrfs_wait_ordered_range(src, loff, len);
3148 btrfs_wait_ordered_range(dst, dst_loff, len);
3149 }
3150 goto again;
3151 }
3152 ASSERT(ret == 0);
3153 if (WARN_ON(ret)) {
3154 /* ranges in the io trees already unlocked */
3155 btrfs_cmp_data_free(&cmp);
3156 return ret;
3157 }
3089 3158
3090 /* pass original length for comparison so we stay within i_size */ 3159 /* pass original length for comparison so we stay within i_size */
3091 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); 3160 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
@@ -3795,9 +3864,15 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3795 u64 lock_start = min_t(u64, off, destoff); 3864 u64 lock_start = min_t(u64, off, destoff);
3796 u64 lock_len = max_t(u64, off, destoff) + len - lock_start; 3865 u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
3797 3866
3798 lock_extent_range(src, lock_start, lock_len); 3867 ret = lock_extent_range(src, lock_start, lock_len, true);
3799 } else { 3868 } else {
3800 btrfs_double_extent_lock(src, off, inode, destoff, len); 3869 ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
3870 true);
3871 }
3872 ASSERT(ret == 0);
3873 if (WARN_ON(ret)) {
3874 /* ranges in the io trees already unlocked */
3875 goto out_unlock;
3801 } 3876 }
3802 3877
3803 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 3878 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7dc886c9a78f..e956cba94338 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -175,7 +175,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
175 * string to the length of the original string to allow for worst case. 175 * string to the length of the original string to allow for worst case.
176 */ 176 */
177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN; 177 md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
178 mountdata = kzalloc(md_len + 1, GFP_KERNEL); 178 mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
179 if (mountdata == NULL) { 179 if (mountdata == NULL) {
180 rc = -ENOMEM; 180 rc = -ENOMEM;
181 goto compose_mount_options_err; 181 goto compose_mount_options_err;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fce8151..e682b36a210f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
714 714
715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL); 715 ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
716 if (!ses->auth_key.response) { 716 if (!ses->auth_key.response) {
717 rc = ENOMEM; 717 rc = -ENOMEM;
718 ses->auth_key.len = 0; 718 ses->auth_key.len = 0;
719 goto setup_ntlmv2_rsp_ret; 719 goto setup_ntlmv2_rsp_ret;
720 } 720 }
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4fbd92d2e113..a763cd3d9e7c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2999,8 +2999,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
2999 if (ses_init_buf) { 2999 if (ses_init_buf) {
3000 ses_init_buf->trailer.session_req.called_len = 32; 3000 ses_init_buf->trailer.session_req.called_len = 32;
3001 3001
3002 if (server->server_RFC1001_name && 3002 if (server->server_RFC1001_name[0] != 0)
3003 server->server_RFC1001_name[0] != 0)
3004 rfc1002mangle(ses_init_buf->trailer. 3003 rfc1002mangle(ses_init_buf->trailer.
3005 session_req.called_name, 3004 session_req.called_name,
3006 server->server_RFC1001_name, 3005 server->server_RFC1001_name,
diff --git a/fs/dax.c b/fs/dax.c
index fc2e3141138b..711172450da6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -79,15 +79,14 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
79} 79}
80 80
81/* 81/*
82 * dax_clear_blocks() is called from within transaction context from XFS, 82 * dax_clear_sectors() is called from within transaction context from XFS,
83 * and hence this means the stack from this point must follow GFP_NOFS 83 * and hence this means the stack from this point must follow GFP_NOFS
84 * semantics for all operations. 84 * semantics for all operations.
85 */ 85 */
86int dax_clear_blocks(struct inode *inode, sector_t block, long _size) 86int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
87{ 87{
88 struct block_device *bdev = inode->i_sb->s_bdev;
89 struct blk_dax_ctl dax = { 88 struct blk_dax_ctl dax = {
90 .sector = block << (inode->i_blkbits - 9), 89 .sector = _sector,
91 .size = _size, 90 .size = _size,
92 }; 91 };
93 92
@@ -109,7 +108,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
109 wmb_pmem(); 108 wmb_pmem();
110 return 0; 109 return 0;
111} 110}
112EXPORT_SYMBOL_GPL(dax_clear_blocks); 111EXPORT_SYMBOL_GPL(dax_clear_sectors);
113 112
114/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ 113/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
115static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first, 114static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
@@ -485,11 +484,10 @@ static int dax_writeback_one(struct block_device *bdev,
485 * end]. This is required by data integrity operations to ensure file data is 484 * end]. This is required by data integrity operations to ensure file data is
486 * on persistent storage prior to completion of the operation. 485 * on persistent storage prior to completion of the operation.
487 */ 486 */
488int dax_writeback_mapping_range(struct address_space *mapping, loff_t start, 487int dax_writeback_mapping_range(struct address_space *mapping,
489 loff_t end) 488 struct block_device *bdev, struct writeback_control *wbc)
490{ 489{
491 struct inode *inode = mapping->host; 490 struct inode *inode = mapping->host;
492 struct block_device *bdev = inode->i_sb->s_bdev;
493 pgoff_t start_index, end_index, pmd_index; 491 pgoff_t start_index, end_index, pmd_index;
494 pgoff_t indices[PAGEVEC_SIZE]; 492 pgoff_t indices[PAGEVEC_SIZE];
495 struct pagevec pvec; 493 struct pagevec pvec;
@@ -500,8 +498,11 @@ int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
500 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) 498 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
501 return -EIO; 499 return -EIO;
502 500
503 start_index = start >> PAGE_CACHE_SHIFT; 501 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
504 end_index = end >> PAGE_CACHE_SHIFT; 502 return 0;
503
504 start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
505 end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
505 pmd_index = DAX_PMD_INDEX(start_index); 506 pmd_index = DAX_PMD_INDEX(start_index);
506 507
507 rcu_read_lock(); 508 rcu_read_lock();
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 1f107fd51328..655f21f99160 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
575 mutex_unlock(&allocated_ptys_lock); 575 mutex_unlock(&allocated_ptys_lock);
576} 576}
577 577
578/*
579 * pty code needs to hold extra references in case of last /dev/tty close
580 */
581
582void devpts_add_ref(struct inode *ptmx_inode)
583{
584 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
585
586 atomic_inc(&sb->s_active);
587 ihold(ptmx_inode);
588}
589
590void devpts_del_ref(struct inode *ptmx_inode)
591{
592 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
593
594 iput(ptmx_inode);
595 deactivate_super(sb);
596}
597
578/** 598/**
579 * devpts_pty_new -- create a new inode in /dev/pts/ 599 * devpts_pty_new -- create a new inode in /dev/pts/
580 * @ptmx_inode: inode of the master 600 * @ptmx_inode: inode of the master
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1b2f7ffc8b84..d6a9012d42ad 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
472 dio->io_error = -EIO; 472 dio->io_error = -EIO;
473 473
474 if (dio->is_async && dio->rw == READ && dio->should_dirty) { 474 if (dio->is_async && dio->rw == READ && dio->should_dirty) {
475 bio_check_pages_dirty(bio); /* transfers ownership */
476 err = bio->bi_error; 475 err = bio->bi_error;
476 bio_check_pages_dirty(bio); /* transfers ownership */
477 } else { 477 } else {
478 bio_for_each_segment_all(bvec, bio, i) { 478 bio_for_each_segment_all(bvec, bio, i) {
479 struct page *page = bvec->bv_page; 479 struct page *page = bvec->bv_page;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index c424e4813ec8..d48e0d261d78 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -10,6 +10,7 @@
10#include <linux/efi.h> 10#include <linux/efi.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/mount.h>
13 14
14#include "internal.h" 15#include "internal.h"
15 16
@@ -103,9 +104,78 @@ out_free:
103 return size; 104 return size;
104} 105}
105 106
107static int
108efivarfs_ioc_getxflags(struct file *file, void __user *arg)
109{
110 struct inode *inode = file->f_mapping->host;
111 unsigned int i_flags;
112 unsigned int flags = 0;
113
114 i_flags = inode->i_flags;
115 if (i_flags & S_IMMUTABLE)
116 flags |= FS_IMMUTABLE_FL;
117
118 if (copy_to_user(arg, &flags, sizeof(flags)))
119 return -EFAULT;
120 return 0;
121}
122
123static int
124efivarfs_ioc_setxflags(struct file *file, void __user *arg)
125{
126 struct inode *inode = file->f_mapping->host;
127 unsigned int flags;
128 unsigned int i_flags = 0;
129 int error;
130
131 if (!inode_owner_or_capable(inode))
132 return -EACCES;
133
134 if (copy_from_user(&flags, arg, sizeof(flags)))
135 return -EFAULT;
136
137 if (flags & ~FS_IMMUTABLE_FL)
138 return -EOPNOTSUPP;
139
140 if (!capable(CAP_LINUX_IMMUTABLE))
141 return -EPERM;
142
143 if (flags & FS_IMMUTABLE_FL)
144 i_flags |= S_IMMUTABLE;
145
146
147 error = mnt_want_write_file(file);
148 if (error)
149 return error;
150
151 inode_lock(inode);
152 inode_set_flags(inode, i_flags, S_IMMUTABLE);
153 inode_unlock(inode);
154
155 mnt_drop_write_file(file);
156
157 return 0;
158}
159
160long
161efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
162{
163 void __user *arg = (void __user *)p;
164
165 switch (cmd) {
166 case FS_IOC_GETFLAGS:
167 return efivarfs_ioc_getxflags(file, arg);
168 case FS_IOC_SETFLAGS:
169 return efivarfs_ioc_setxflags(file, arg);
170 }
171
172 return -ENOTTY;
173}
174
106const struct file_operations efivarfs_file_operations = { 175const struct file_operations efivarfs_file_operations = {
107 .open = simple_open, 176 .open = simple_open,
108 .read = efivarfs_file_read, 177 .read = efivarfs_file_read,
109 .write = efivarfs_file_write, 178 .write = efivarfs_file_write,
110 .llseek = no_llseek, 179 .llseek = no_llseek,
180 .unlocked_ioctl = efivarfs_file_ioctl,
111}; 181};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 3381b9da9ee6..e2ab6d0497f2 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -15,7 +15,8 @@
15#include "internal.h" 15#include "internal.h"
16 16
17struct inode *efivarfs_get_inode(struct super_block *sb, 17struct inode *efivarfs_get_inode(struct super_block *sb,
18 const struct inode *dir, int mode, dev_t dev) 18 const struct inode *dir, int mode,
19 dev_t dev, bool is_removable)
19{ 20{
20 struct inode *inode = new_inode(sb); 21 struct inode *inode = new_inode(sb);
21 22
@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
23 inode->i_ino = get_next_ino(); 24 inode->i_ino = get_next_ino();
24 inode->i_mode = mode; 25 inode->i_mode = mode;
25 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 26 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
27 inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
26 switch (mode & S_IFMT) { 28 switch (mode & S_IFMT) {
27 case S_IFREG: 29 case S_IFREG:
28 inode->i_fop = &efivarfs_file_operations; 30 inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
102static int efivarfs_create(struct inode *dir, struct dentry *dentry, 104static int efivarfs_create(struct inode *dir, struct dentry *dentry,
103 umode_t mode, bool excl) 105 umode_t mode, bool excl)
104{ 106{
105 struct inode *inode; 107 struct inode *inode = NULL;
106 struct efivar_entry *var; 108 struct efivar_entry *var;
107 int namelen, i = 0, err = 0; 109 int namelen, i = 0, err = 0;
110 bool is_removable = false;
108 111
109 if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) 112 if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
110 return -EINVAL; 113 return -EINVAL;
111 114
112 inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
113 if (!inode)
114 return -ENOMEM;
115
116 var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); 115 var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
117 if (!var) { 116 if (!var)
118 err = -ENOMEM; 117 return -ENOMEM;
119 goto out;
120 }
121 118
122 /* length of the variable name itself: remove GUID and separator */ 119 /* length of the variable name itself: remove GUID and separator */
123 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; 120 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
125 efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, 122 efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
126 &var->var.VendorGuid); 123 &var->var.VendorGuid);
127 124
125 if (efivar_variable_is_removable(var->var.VendorGuid,
126 dentry->d_name.name, namelen))
127 is_removable = true;
128
129 inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
130 if (!inode) {
131 err = -ENOMEM;
132 goto out;
133 }
134
128 for (i = 0; i < namelen; i++) 135 for (i = 0; i < namelen; i++)
129 var->var.VariableName[i] = dentry->d_name.name[i]; 136 var->var.VariableName[i] = dentry->d_name.name[i];
130 137
@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
138out: 145out:
139 if (err) { 146 if (err) {
140 kfree(var); 147 kfree(var);
141 iput(inode); 148 if (inode)
149 iput(inode);
142 } 150 }
143 return err; 151 return err;
144} 152}
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index b5ff16addb7c..b4505188e799 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
15extern const struct inode_operations efivarfs_dir_inode_operations; 15extern const struct inode_operations efivarfs_dir_inode_operations;
16extern bool efivarfs_valid_name(const char *str, int len); 16extern bool efivarfs_valid_name(const char *str, int len);
17extern struct inode *efivarfs_get_inode(struct super_block *sb, 17extern struct inode *efivarfs_get_inode(struct super_block *sb,
18 const struct inode *dir, int mode, dev_t dev); 18 const struct inode *dir, int mode, dev_t dev,
19 bool is_removable);
19 20
20extern struct list_head efivarfs_list; 21extern struct list_head efivarfs_list;
21 22
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index b8a564f29107..dd029d13ea61 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
118 struct dentry *dentry, *root = sb->s_root; 118 struct dentry *dentry, *root = sb->s_root;
119 unsigned long size = 0; 119 unsigned long size = 0;
120 char *name; 120 char *name;
121 int len, i; 121 int len;
122 int err = -ENOMEM; 122 int err = -ENOMEM;
123 bool is_removable = false;
123 124
124 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 125 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
125 if (!entry) 126 if (!entry)
@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
128 memcpy(entry->var.VariableName, name16, name_size); 129 memcpy(entry->var.VariableName, name16, name_size);
129 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); 130 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
130 131
131 len = ucs2_strlen(entry->var.VariableName); 132 len = ucs2_utf8size(entry->var.VariableName);
132 133
133 /* name, plus '-', plus GUID, plus NUL*/ 134 /* name, plus '-', plus GUID, plus NUL*/
134 name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); 135 name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
135 if (!name) 136 if (!name)
136 goto fail; 137 goto fail;
137 138
138 for (i = 0; i < len; i++) 139 ucs2_as_utf8(name, entry->var.VariableName, len);
139 name[i] = entry->var.VariableName[i] & 0xFF; 140
141 if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
142 is_removable = true;
140 143
141 name[len] = '-'; 144 name[len] = '-';
142 145
@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
144 147
145 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; 148 name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
146 149
147 inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0); 150 inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
151 is_removable);
148 if (!inode) 152 if (!inode)
149 goto fail_name; 153 goto fail_name;
150 154
@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
200 sb->s_d_op = &efivarfs_d_ops; 204 sb->s_d_op = &efivarfs_d_ops;
201 sb->s_time_gran = 1; 205 sb->s_time_gran = 1;
202 206
203 inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0); 207 inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
204 if (!inode) 208 if (!inode)
205 return -ENOMEM; 209 return -ENOMEM;
206 inode->i_op = &efivarfs_dir_inode_operations; 210 inode->i_op = &efivarfs_dir_inode_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2c88d683cd91..c1400b109805 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -80,23 +80,6 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
80 return ret; 80 return ret;
81} 81}
82 82
83static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
84{
85 struct inode *inode = file_inode(vma->vm_file);
86 struct ext2_inode_info *ei = EXT2_I(inode);
87 int ret;
88
89 sb_start_pagefault(inode->i_sb);
90 file_update_time(vma->vm_file);
91 down_read(&ei->dax_sem);
92
93 ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
94
95 up_read(&ei->dax_sem);
96 sb_end_pagefault(inode->i_sb);
97 return ret;
98}
99
100static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma, 83static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
101 struct vm_fault *vmf) 84 struct vm_fault *vmf)
102{ 85{
@@ -124,7 +107,7 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
124static const struct vm_operations_struct ext2_dax_vm_ops = { 107static const struct vm_operations_struct ext2_dax_vm_ops = {
125 .fault = ext2_dax_fault, 108 .fault = ext2_dax_fault,
126 .pmd_fault = ext2_dax_pmd_fault, 109 .pmd_fault = ext2_dax_pmd_fault,
127 .page_mkwrite = ext2_dax_mkwrite, 110 .page_mkwrite = ext2_dax_fault,
128 .pfn_mkwrite = ext2_dax_pfn_mkwrite, 111 .pfn_mkwrite = ext2_dax_pfn_mkwrite,
129}; 112};
130 113
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 338eefda70c6..6bd58e6ff038 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -737,8 +737,10 @@ static int ext2_get_blocks(struct inode *inode,
737 * so that it's not found by another thread before it's 737 * so that it's not found by another thread before it's
738 * initialised 738 * initialised
739 */ 739 */
740 err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key), 740 err = dax_clear_sectors(inode->i_sb->s_bdev,
741 1 << inode->i_blkbits); 741 le32_to_cpu(chain[depth-1].key) <<
742 (inode->i_blkbits - 9),
743 1 << inode->i_blkbits);
742 if (err) { 744 if (err) {
743 mutex_unlock(&ei->truncate_mutex); 745 mutex_unlock(&ei->truncate_mutex);
744 goto cleanup; 746 goto cleanup;
@@ -874,6 +876,14 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
874static int 876static int
875ext2_writepages(struct address_space *mapping, struct writeback_control *wbc) 877ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
876{ 878{
879#ifdef CONFIG_FS_DAX
880 if (dax_mapping(mapping)) {
881 return dax_writeback_mapping_range(mapping,
882 mapping->host->i_sb->s_bdev,
883 wbc);
884 }
885#endif
886
877 return mpage_writepages(mapping, wbc, ext2_get_block); 887 return mpage_writepages(mapping, wbc, ext2_get_block);
878} 888}
879 889
@@ -1296,7 +1306,7 @@ void ext2_set_inode_flags(struct inode *inode)
1296 inode->i_flags |= S_NOATIME; 1306 inode->i_flags |= S_NOATIME;
1297 if (flags & EXT2_DIRSYNC_FL) 1307 if (flags & EXT2_DIRSYNC_FL)
1298 inode->i_flags |= S_DIRSYNC; 1308 inode->i_flags |= S_DIRSYNC;
1299 if (test_opt(inode->i_sb, DAX)) 1309 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
1300 inode->i_flags |= S_DAX; 1310 inode->i_flags |= S_DAX;
1301} 1311}
1302 1312
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a60678..fe1f50fe764f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
191 /* If checksum is bad mark all blocks used to prevent allocation 191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */ 192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194 ext4_error(sb, "Checksum bad for group %u", block_group);
195 grp = ext4_get_group_info(sb, block_group); 194 grp = ext4_get_group_info(sb, block_group);
196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 195 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
197 percpu_counter_sub(&sbi->s_freeclusters_counter, 196 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
442 } 441 }
443 ext4_lock_group(sb, block_group); 442 ext4_lock_group(sb, block_group);
444 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 443 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
445
446 err = ext4_init_block_bitmap(sb, bh, block_group, desc); 444 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
447 set_bitmap_uptodate(bh); 445 set_bitmap_uptodate(bh);
448 set_buffer_uptodate(bh); 446 set_buffer_uptodate(bh);
449 ext4_unlock_group(sb, block_group); 447 ext4_unlock_group(sb, block_group);
450 unlock_buffer(bh); 448 unlock_buffer(bh);
451 if (err) 449 if (err) {
450 ext4_error(sb, "Failed to init block bitmap for group "
451 "%u: %d", block_group, err);
452 goto out; 452 goto out;
453 }
453 goto verify; 454 goto verify;
454 } 455 }
455 ext4_unlock_group(sb, block_group); 456 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c8021208a7eb..38f7562489bb 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -467,3 +467,59 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
467 return size; 467 return size;
468 return 0; 468 return 0;
469} 469}
470
471/*
472 * Validate dentries for encrypted directories to make sure we aren't
473 * potentially caching stale data after a key has been added or
474 * removed.
475 */
476static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
477{
478 struct inode *dir = d_inode(dentry->d_parent);
479 struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
480 int dir_has_key, cached_with_key;
481
482 if (!ext4_encrypted_inode(dir))
483 return 0;
484
485 if (ci && ci->ci_keyring_key &&
486 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
487 (1 << KEY_FLAG_REVOKED) |
488 (1 << KEY_FLAG_DEAD))))
489 ci = NULL;
490
491 /* this should eventually be an flag in d_flags */
492 cached_with_key = dentry->d_fsdata != NULL;
493 dir_has_key = (ci != NULL);
494
495 /*
496 * If the dentry was cached without the key, and it is a
497 * negative dentry, it might be a valid name. We can't check
498 * if the key has since been made available due to locking
499 * reasons, so we fail the validation so ext4_lookup() can do
500 * this check.
501 *
502 * We also fail the validation if the dentry was created with
503 * the key present, but we no longer have the key, or vice versa.
504 */
505 if ((!cached_with_key && d_is_negative(dentry)) ||
506 (!cached_with_key && dir_has_key) ||
507 (cached_with_key && !dir_has_key)) {
508#if 0 /* Revalidation debug */
509 char buf[80];
510 char *cp = simple_dname(dentry, buf, sizeof(buf));
511
512 if (IS_ERR(cp))
513 cp = (char *) "???";
514 pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
515 cached_with_key, d_is_negative(dentry),
516 dir_has_key);
517#endif
518 return 0;
519 }
520 return 1;
521}
522
523const struct dentry_operations ext4_encrypted_d_ops = {
524 .d_revalidate = ext4_d_revalidate,
525};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1d1bca74f844..33f5e2a50cf8 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
111 int dir_has_error = 0; 111 int dir_has_error = 0;
112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}; 112 struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
113 113
114 if (ext4_encrypted_inode(inode)) {
115 err = ext4_get_encryption_info(inode);
116 if (err && err != -ENOKEY)
117 return err;
118 }
119
114 if (is_dx_dir(inode)) { 120 if (is_dx_dir(inode)) {
115 err = ext4_dx_readdir(file, ctx); 121 err = ext4_dx_readdir(file, ctx);
116 if (err != ERR_BAD_DX_DIR) { 122 if (err != ERR_BAD_DX_DIR) {
@@ -157,8 +163,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
157 index, 1); 163 index, 1);
158 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 164 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
159 bh = ext4_bread(NULL, inode, map.m_lblk, 0); 165 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
160 if (IS_ERR(bh)) 166 if (IS_ERR(bh)) {
161 return PTR_ERR(bh); 167 err = PTR_ERR(bh);
168 bh = NULL;
169 goto errout;
170 }
162 } 171 }
163 172
164 if (!bh) { 173 if (!bh) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0662b285dc8a..157b458a69d4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2302,6 +2302,7 @@ struct page *ext4_encrypt(struct inode *inode,
2302int ext4_decrypt(struct page *page); 2302int ext4_decrypt(struct page *page);
2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, 2303int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
2304 ext4_fsblk_t pblk, ext4_lblk_t len); 2304 ext4_fsblk_t pblk, ext4_lblk_t len);
2305extern const struct dentry_operations ext4_encrypted_d_ops;
2305 2306
2306#ifdef CONFIG_EXT4_FS_ENCRYPTION 2307#ifdef CONFIG_EXT4_FS_ENCRYPTION
2307int ext4_init_crypto(void); 2308int ext4_init_crypto(void);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ffabaf90aa5..3753ceb0b0dd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3928,7 +3928,7 @@ static int
3928convert_initialized_extent(handle_t *handle, struct inode *inode, 3928convert_initialized_extent(handle_t *handle, struct inode *inode,
3929 struct ext4_map_blocks *map, 3929 struct ext4_map_blocks *map,
3930 struct ext4_ext_path **ppath, int flags, 3930 struct ext4_ext_path **ppath, int flags,
3931 unsigned int allocated, ext4_fsblk_t newblock) 3931 unsigned int allocated)
3932{ 3932{
3933 struct ext4_ext_path *path = *ppath; 3933 struct ext4_ext_path *path = *ppath;
3934 struct ext4_extent *ex; 3934 struct ext4_extent *ex;
@@ -4347,7 +4347,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4347 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4348 allocated = convert_initialized_extent( 4348 allocated = convert_initialized_extent(
4349 handle, inode, map, &path, 4349 handle, inode, map, &path,
4350 flags, allocated, newblock); 4350 flags, allocated);
4351 goto out2; 4351 goto out2;
4352 } else if (!ext4_ext_is_unwritten(ex)) 4352 } else if (!ext4_ext_is_unwritten(ex))
4353 goto out; 4353 goto out;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1126436dada1..4cd318f31cbe 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -262,23 +262,8 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
262 return result; 262 return result;
263} 263}
264 264
265static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
266{
267 int err;
268 struct inode *inode = file_inode(vma->vm_file);
269
270 sb_start_pagefault(inode->i_sb);
271 file_update_time(vma->vm_file);
272 down_read(&EXT4_I(inode)->i_mmap_sem);
273 err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
274 up_read(&EXT4_I(inode)->i_mmap_sem);
275 sb_end_pagefault(inode->i_sb);
276
277 return err;
278}
279
280/* 265/*
281 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite() 266 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
282 * handler we check for races agaist truncate. Note that since we cycle through 267 * handler we check for races agaist truncate. Note that since we cycle through
283 * i_mmap_sem, we are sure that also any hole punching that began before we 268 * i_mmap_sem, we are sure that also any hole punching that began before we
284 * were called is finished by now and so if it included part of the file we 269 * were called is finished by now and so if it included part of the file we
@@ -311,7 +296,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
311static const struct vm_operations_struct ext4_dax_vm_ops = { 296static const struct vm_operations_struct ext4_dax_vm_ops = {
312 .fault = ext4_dax_fault, 297 .fault = ext4_dax_fault,
313 .pmd_fault = ext4_dax_pmd_fault, 298 .pmd_fault = ext4_dax_pmd_fault,
314 .page_mkwrite = ext4_dax_mkwrite, 299 .page_mkwrite = ext4_dax_fault,
315 .pfn_mkwrite = ext4_dax_pfn_mkwrite, 300 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
316}; 301};
317#else 302#else
@@ -350,6 +335,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
350 struct super_block *sb = inode->i_sb; 335 struct super_block *sb = inode->i_sb;
351 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 336 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
352 struct vfsmount *mnt = filp->f_path.mnt; 337 struct vfsmount *mnt = filp->f_path.mnt;
338 struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
353 struct path path; 339 struct path path;
354 char buf[64], *cp; 340 char buf[64], *cp;
355 int ret; 341 int ret;
@@ -393,6 +379,14 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
393 if (ext4_encryption_info(inode) == NULL) 379 if (ext4_encryption_info(inode) == NULL)
394 return -ENOKEY; 380 return -ENOKEY;
395 } 381 }
382 if (ext4_encrypted_inode(dir) &&
383 !ext4_is_child_context_consistent_with_parent(dir, inode)) {
384 ext4_warning(inode->i_sb,
385 "Inconsistent encryption contexts: %lu/%lu\n",
386 (unsigned long) dir->i_ino,
387 (unsigned long) inode->i_ino);
388 return -EPERM;
389 }
396 /* 390 /*
397 * Set up the jbd2_inode if we are opening the inode for 391 * Set up the jbd2_inode if we are opening the inode for
398 * writing and the journal is present 392 * writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3fcfd50a2e8a..acc0ad56bf2f 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@ static int ext4_init_inode_bitmap(struct super_block *sb,
76 /* If checksum is bad mark all blocks and inodes use to prevent 76 /* If checksum is bad mark all blocks and inodes use to prevent
77 * allocation, essentially implementing a per-group read-only flag. */ 77 * allocation, essentially implementing a per-group read-only flag. */
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 ext4_error(sb, "Checksum bad for group %u", block_group);
80 grp = ext4_get_group_info(sb, block_group); 79 grp = ext4_get_group_info(sb, block_group);
81 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 80 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
82 percpu_counter_sub(&sbi->s_freeclusters_counter, 81 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
191 set_buffer_verified(bh); 190 set_buffer_verified(bh);
192 ext4_unlock_group(sb, block_group); 191 ext4_unlock_group(sb, block_group);
193 unlock_buffer(bh); 192 unlock_buffer(bh);
194 if (err) 193 if (err) {
194 ext4_error(sb, "Failed to init inode bitmap for group "
195 "%u: %d", block_group, err);
195 goto out; 196 goto out;
197 }
196 return bh; 198 return bh;
197 } 199 }
198 ext4_unlock_group(sb, block_group); 200 ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 83bc8bfb3bea..aee960b1af34 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -686,6 +686,34 @@ out_sem:
686 return retval; 686 return retval;
687} 687}
688 688
689/*
690 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
691 * we have to be careful as someone else may be manipulating b_state as well.
692 */
693static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
694{
695 unsigned long old_state;
696 unsigned long new_state;
697
698 flags &= EXT4_MAP_FLAGS;
699
700 /* Dummy buffer_head? Set non-atomically. */
701 if (!bh->b_page) {
702 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
703 return;
704 }
705 /*
706 * Someone else may be modifying b_state. Be careful! This is ugly but
707 * once we get rid of using bh as a container for mapping information
708 * to pass to / from get_block functions, this can go away.
709 */
710 do {
711 old_state = READ_ONCE(bh->b_state);
712 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
713 } while (unlikely(
714 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
715}
716
689/* Maximum number of blocks we map for direct IO at once. */ 717/* Maximum number of blocks we map for direct IO at once. */
690#define DIO_MAX_BLOCKS 4096 718#define DIO_MAX_BLOCKS 4096
691 719
@@ -722,7 +750,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
722 ext4_io_end_t *io_end = ext4_inode_aio(inode); 750 ext4_io_end_t *io_end = ext4_inode_aio(inode);
723 751
724 map_bh(bh, inode->i_sb, map.m_pblk); 752 map_bh(bh, inode->i_sb, map.m_pblk);
725 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 753 ext4_update_bh_state(bh, map.m_flags);
726 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) 754 if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
727 set_buffer_defer_completion(bh); 755 set_buffer_defer_completion(bh);
728 bh->b_size = inode->i_sb->s_blocksize * map.m_len; 756 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -1685,7 +1713,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1685 return ret; 1713 return ret;
1686 1714
1687 map_bh(bh, inode->i_sb, map.m_pblk); 1715 map_bh(bh, inode->i_sb, map.m_pblk);
1688 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; 1716 ext4_update_bh_state(bh, map.m_flags);
1689 1717
1690 if (buffer_unwritten(bh)) { 1718 if (buffer_unwritten(bh)) {
1691 /* A delayed write to unwritten bh should be marked 1719 /* A delayed write to unwritten bh should be marked
@@ -2450,6 +2478,10 @@ static int ext4_writepages(struct address_space *mapping,
2450 2478
2451 trace_ext4_writepages(inode, wbc); 2479 trace_ext4_writepages(inode, wbc);
2452 2480
2481 if (dax_mapping(mapping))
2482 return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
2483 wbc);
2484
2453 /* 2485 /*
2454 * No pages to write? This is mainly a kludge to avoid starting 2486 * No pages to write? This is mainly a kludge to avoid starting
2455 * a transaction for special inodes like journal inode on last iput() 2487 * a transaction for special inodes like journal inode on last iput()
@@ -3253,29 +3285,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
3253 * case, we allocate an io_end structure to hook to the iocb. 3285 * case, we allocate an io_end structure to hook to the iocb.
3254 */ 3286 */
3255 iocb->private = NULL; 3287 iocb->private = NULL;
3256 ext4_inode_aio_set(inode, NULL);
3257 if (!is_sync_kiocb(iocb)) {
3258 io_end = ext4_init_io_end(inode, GFP_NOFS);
3259 if (!io_end) {
3260 ret = -ENOMEM;
3261 goto retake_lock;
3262 }
3263 /*
3264 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3265 */
3266 iocb->private = ext4_get_io_end(io_end);
3267 /*
3268 * we save the io structure for current async direct
3269 * IO, so that later ext4_map_blocks() could flag the
3270 * io structure whether there is a unwritten extents
3271 * needs to be converted when IO is completed.
3272 */
3273 ext4_inode_aio_set(inode, io_end);
3274 }
3275
3276 if (overwrite) { 3288 if (overwrite) {
3277 get_block_func = ext4_get_block_overwrite; 3289 get_block_func = ext4_get_block_overwrite;
3278 } else { 3290 } else {
3291 ext4_inode_aio_set(inode, NULL);
3292 if (!is_sync_kiocb(iocb)) {
3293 io_end = ext4_init_io_end(inode, GFP_NOFS);
3294 if (!io_end) {
3295 ret = -ENOMEM;
3296 goto retake_lock;
3297 }
3298 /*
3299 * Grab reference for DIO. Will be dropped in
3300 * ext4_end_io_dio()
3301 */
3302 iocb->private = ext4_get_io_end(io_end);
3303 /*
3304 * we save the io structure for current async direct
3305 * IO, so that later ext4_map_blocks() could flag the
3306 * io structure whether there is a unwritten extents
3307 * needs to be converted when IO is completed.
3308 */
3309 ext4_inode_aio_set(inode, io_end);
3310 }
3279 get_block_func = ext4_get_block_write; 3311 get_block_func = ext4_get_block_write;
3280 dio_flags = DIO_LOCKING; 3312 dio_flags = DIO_LOCKING;
3281 } 3313 }
@@ -4127,7 +4159,7 @@ void ext4_set_inode_flags(struct inode *inode)
4127 new_fl |= S_NOATIME; 4159 new_fl |= S_NOATIME;
4128 if (flags & EXT4_DIRSYNC_FL) 4160 if (flags & EXT4_DIRSYNC_FL)
4129 new_fl |= S_DIRSYNC; 4161 new_fl |= S_DIRSYNC;
4130 if (test_opt(inode->i_sb, DAX)) 4162 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
4131 new_fl |= S_DAX; 4163 new_fl |= S_DAX;
4132 inode_set_flags(inode, new_fl, 4164 inode_set_flags(inode, new_fl,
4133 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); 4165 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f6c36922c24..eae5917c534e 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -208,7 +208,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
208{ 208{
209 struct ext4_inode_info *ei = EXT4_I(inode); 209 struct ext4_inode_info *ei = EXT4_I(inode);
210 handle_t *handle = NULL; 210 handle_t *handle = NULL;
211 int err = EPERM, migrate = 0; 211 int err = -EPERM, migrate = 0;
212 struct ext4_iloc iloc; 212 struct ext4_iloc iloc;
213 unsigned int oldflags, mask, i; 213 unsigned int oldflags, mask, i;
214 unsigned int jflag; 214 unsigned int jflag;
@@ -583,6 +583,11 @@ group_extend_out:
583 "Online defrag not supported with bigalloc"); 583 "Online defrag not supported with bigalloc");
584 err = -EOPNOTSUPP; 584 err = -EOPNOTSUPP;
585 goto mext_out; 585 goto mext_out;
586 } else if (IS_DAX(inode)) {
587 ext4_msg(sb, KERN_ERR,
588 "Online defrag not supported with DAX");
589 err = -EOPNOTSUPP;
590 goto mext_out;
586 } 591 }
587 592
588 err = mnt_want_write_file(filp); 593 err = mnt_want_write_file(filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 61eaf74dca37..4424b7bf8ac6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2285,7 +2285,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2285 if (group == 0) 2285 if (group == 0)
2286 seq_puts(seq, "#group: free frags first [" 2286 seq_puts(seq, "#group: free frags first ["
2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2287 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]"); 2288 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2289 2289
2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2290 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2291 sizeof(struct ext4_group_info); 2291 sizeof(struct ext4_group_info);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f11709ae6..e032a0423e35 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -265,11 +265,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
265 ext4_lblk_t orig_blk_offset, donor_blk_offset; 265 ext4_lblk_t orig_blk_offset, donor_blk_offset;
266 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 266 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
267 unsigned int tmp_data_size, data_size, replaced_size; 267 unsigned int tmp_data_size, data_size, replaced_size;
268 int err2, jblocks, retries = 0; 268 int i, err2, jblocks, retries = 0;
269 int replaced_count = 0; 269 int replaced_count = 0;
270 int from = data_offset_in_page << orig_inode->i_blkbits; 270 int from = data_offset_in_page << orig_inode->i_blkbits;
271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 271 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
272 struct super_block *sb = orig_inode->i_sb; 272 struct super_block *sb = orig_inode->i_sb;
273 struct buffer_head *bh = NULL;
273 274
274 /* 275 /*
275 * It needs twice the amount of ordinary journal buffers because 276 * It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@ data_copy:
380 } 381 }
381 /* Perform all necessary steps similar write_begin()/write_end() 382 /* Perform all necessary steps similar write_begin()/write_end()
382 * but keeping in mind that i_size will not change */ 383 * but keeping in mind that i_size will not change */
383 *err = __block_write_begin(pagep[0], from, replaced_size, 384 if (!page_has_buffers(pagep[0]))
384 ext4_get_block); 385 create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
386 bh = page_buffers(pagep[0]);
387 for (i = 0; i < data_offset_in_page; i++)
388 bh = bh->b_this_page;
389 for (i = 0; i < block_len_in_page; i++) {
390 *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
391 if (*err < 0)
392 break;
393 }
385 if (!*err) 394 if (!*err)
386 *err = block_commit_write(pagep[0], from, from + replaced_size); 395 *err = block_commit_write(pagep[0], from, from + replaced_size);
387 396
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 06574dd77614..48e4b8907826 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1558,6 +1558,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1558 struct ext4_dir_entry_2 *de; 1558 struct ext4_dir_entry_2 *de;
1559 struct buffer_head *bh; 1559 struct buffer_head *bh;
1560 1560
1561 if (ext4_encrypted_inode(dir)) {
1562 int res = ext4_get_encryption_info(dir);
1563
1564 /*
1565 * This should be a properly defined flag for
1566 * dentry->d_flags when we uplift this to the VFS.
1567 * d_fsdata is set to (void *) 1 if if the dentry is
1568 * created while the directory was encrypted and we
1569 * don't have access to the key.
1570 */
1571 dentry->d_fsdata = NULL;
1572 if (ext4_encryption_info(dir))
1573 dentry->d_fsdata = (void *) 1;
1574 d_set_d_op(dentry, &ext4_encrypted_d_ops);
1575 if (res && res != -ENOKEY)
1576 return ERR_PTR(res);
1577 }
1578
1561 if (dentry->d_name.len > EXT4_NAME_LEN) 1579 if (dentry->d_name.len > EXT4_NAME_LEN)
1562 return ERR_PTR(-ENAMETOOLONG); 1580 return ERR_PTR(-ENAMETOOLONG);
1563 1581
@@ -1585,11 +1603,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1585 return ERR_PTR(-EFSCORRUPTED); 1603 return ERR_PTR(-EFSCORRUPTED);
1586 } 1604 }
1587 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) && 1605 if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
1588 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1606 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
1589 S_ISLNK(inode->i_mode)) &&
1590 !ext4_is_child_context_consistent_with_parent(dir, 1607 !ext4_is_child_context_consistent_with_parent(dir,
1591 inode)) { 1608 inode)) {
1609 int nokey = ext4_encrypted_inode(inode) &&
1610 !ext4_encryption_info(inode);
1611
1592 iput(inode); 1612 iput(inode);
1613 if (nokey)
1614 return ERR_PTR(-ENOKEY);
1593 ext4_warning(inode->i_sb, 1615 ext4_warning(inode->i_sb,
1594 "Inconsistent encryption contexts: %lu/%lu\n", 1616 "Inconsistent encryption contexts: %lu/%lu\n",
1595 (unsigned long) dir->i_ino, 1617 (unsigned long) dir->i_ino,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7acc315..34038e3598d5 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
198 if (flex_gd == NULL) 198 if (flex_gd == NULL)
199 goto out3; 199 goto out3;
200 200
201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data)) 201 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
202 goto out2; 202 goto out2;
203 flex_gd->count = flexbg_size; 203 flex_gd->count = flexbg_size;
204 204
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6915c950e6e8..1f76d8950a57 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
317 struct inode_switch_wbs_context *isw = 317 struct inode_switch_wbs_context *isw =
318 container_of(work, struct inode_switch_wbs_context, work); 318 container_of(work, struct inode_switch_wbs_context, work);
319 struct inode *inode = isw->inode; 319 struct inode *inode = isw->inode;
320 struct super_block *sb = inode->i_sb;
320 struct address_space *mapping = inode->i_mapping; 321 struct address_space *mapping = inode->i_mapping;
321 struct bdi_writeback *old_wb = inode->i_wb; 322 struct bdi_writeback *old_wb = inode->i_wb;
322 struct bdi_writeback *new_wb = isw->new_wb; 323 struct bdi_writeback *new_wb = isw->new_wb;
@@ -423,6 +424,7 @@ skip_switch:
423 wb_put(new_wb); 424 wb_put(new_wb);
424 425
425 iput(inode); 426 iput(inode);
427 deactivate_super(sb);
426 kfree(isw); 428 kfree(isw);
427} 429}
428 430
@@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
469 471
470 /* while holding I_WB_SWITCH, no one else can update the association */ 472 /* while holding I_WB_SWITCH, no one else can update the association */
471 spin_lock(&inode->i_lock); 473 spin_lock(&inode->i_lock);
474
472 if (inode->i_state & (I_WB_SWITCH | I_FREEING) || 475 if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
473 inode_to_wb(inode) == isw->new_wb) { 476 inode_to_wb(inode) == isw->new_wb)
474 spin_unlock(&inode->i_lock); 477 goto out_unlock;
475 goto out_free; 478
476 } 479 if (!atomic_inc_not_zero(&inode->i_sb->s_active))
480 goto out_unlock;
481
477 inode->i_state |= I_WB_SWITCH; 482 inode->i_state |= I_WB_SWITCH;
478 spin_unlock(&inode->i_lock); 483 spin_unlock(&inode->i_lock);
479 484
@@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
489 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); 494 call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
490 return; 495 return;
491 496
497out_unlock:
498 spin_unlock(&inode->i_lock);
492out_free: 499out_free:
493 if (isw->new_wb) 500 if (isw->new_wb)
494 wb_put(isw->new_wb); 501 wb_put(isw->new_wb);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 506765afa1a3..bb8d67e2740a 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -376,12 +376,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
376 struct inode *inode = d_inode(dentry); 376 struct inode *inode = d_inode(dentry);
377 dnode_secno dno; 377 dnode_secno dno;
378 int r; 378 int r;
379 int rep = 0;
380 int err; 379 int err;
381 380
382 hpfs_lock(dir->i_sb); 381 hpfs_lock(dir->i_sb);
383 hpfs_adjust_length(name, &len); 382 hpfs_adjust_length(name, &len);
384again: 383
385 err = -ENOENT; 384 err = -ENOENT;
386 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh); 385 de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
387 if (!de) 386 if (!de)
@@ -401,33 +400,9 @@ again:
401 hpfs_error(dir->i_sb, "there was error when removing dirent"); 400 hpfs_error(dir->i_sb, "there was error when removing dirent");
402 err = -EFSERROR; 401 err = -EFSERROR;
403 break; 402 break;
404 case 2: /* no space for deleting, try to truncate file */ 403 case 2: /* no space for deleting */
405
406 err = -ENOSPC; 404 err = -ENOSPC;
407 if (rep++) 405 break;
408 break;
409
410 dentry_unhash(dentry);
411 if (!d_unhashed(dentry)) {
412 hpfs_unlock(dir->i_sb);
413 return -ENOSPC;
414 }
415 if (generic_permission(inode, MAY_WRITE) ||
416 !S_ISREG(inode->i_mode) ||
417 get_write_access(inode)) {
418 d_rehash(dentry);
419 } else {
420 struct iattr newattrs;
421 /*pr_info("truncating file before delete.\n");*/
422 newattrs.ia_size = 0;
423 newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
424 err = notify_change(dentry, &newattrs, NULL);
425 put_write_access(inode);
426 if (!err)
427 goto again;
428 }
429 hpfs_unlock(dir->i_sb);
430 return -ENOSPC;
431 default: 406 default:
432 drop_nlink(inode); 407 drop_nlink(inode);
433 err = 0; 408 err = 0;
diff --git a/fs/inode.c b/fs/inode.c
index 9f62db3bcc3e..69b8b526c194 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
154 inode->i_rdev = 0; 154 inode->i_rdev = 0;
155 inode->dirtied_when = 0; 155 inode->dirtied_when = 0;
156 156
157#ifdef CONFIG_CGROUP_WRITEBACK
158 inode->i_wb_frn_winner = 0;
159 inode->i_wb_frn_avg_time = 0;
160 inode->i_wb_frn_history = 0;
161#endif
162
157 if (security_inode_alloc(inode)) 163 if (security_inode_alloc(inode))
158 goto out; 164 goto out;
159 spin_lock_init(&inode->i_lock); 165 spin_lock_init(&inode->i_lock);
diff --git a/fs/namei.c b/fs/namei.c
index f624d132e01e..9c590e0f66e9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1712,6 +1712,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
1712 return 0; 1712 return 0;
1713 if (!follow) 1713 if (!follow)
1714 return 0; 1714 return 0;
1715 /* make sure that d_is_symlink above matches inode */
1716 if (nd->flags & LOOKUP_RCU) {
1717 if (read_seqcount_retry(&link->dentry->d_seq, seq))
1718 return -ECHILD;
1719 }
1715 return pick_link(nd, link, inode, seq); 1720 return pick_link(nd, link, inode, seq);
1716} 1721}
1717 1722
@@ -1743,11 +1748,11 @@ static int walk_component(struct nameidata *nd, int flags)
1743 if (err < 0) 1748 if (err < 0)
1744 return err; 1749 return err;
1745 1750
1746 inode = d_backing_inode(path.dentry);
1747 seq = 0; /* we are already out of RCU mode */ 1751 seq = 0; /* we are already out of RCU mode */
1748 err = -ENOENT; 1752 err = -ENOENT;
1749 if (d_is_negative(path.dentry)) 1753 if (d_is_negative(path.dentry))
1750 goto out_path_put; 1754 goto out_path_put;
1755 inode = d_backing_inode(path.dentry);
1751 } 1756 }
1752 1757
1753 if (flags & WALK_PUT) 1758 if (flags & WALK_PUT)
@@ -3192,12 +3197,12 @@ retry_lookup:
3192 return error; 3197 return error;
3193 3198
3194 BUG_ON(nd->flags & LOOKUP_RCU); 3199 BUG_ON(nd->flags & LOOKUP_RCU);
3195 inode = d_backing_inode(path.dentry);
3196 seq = 0; /* out of RCU mode, so the value doesn't matter */ 3200 seq = 0; /* out of RCU mode, so the value doesn't matter */
3197 if (unlikely(d_is_negative(path.dentry))) { 3201 if (unlikely(d_is_negative(path.dentry))) {
3198 path_to_nameidata(&path, nd); 3202 path_to_nameidata(&path, nd);
3199 return -ENOENT; 3203 return -ENOENT;
3200 } 3204 }
3205 inode = d_backing_inode(path.dentry);
3201finish_lookup: 3206finish_lookup:
3202 if (nd->depth) 3207 if (nd->depth)
3203 put_link(nd); 3208 put_link(nd);
@@ -3206,11 +3211,6 @@ finish_lookup:
3206 if (unlikely(error)) 3211 if (unlikely(error))
3207 return error; 3212 return error;
3208 3213
3209 if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
3210 path_to_nameidata(&path, nd);
3211 return -ELOOP;
3212 }
3213
3214 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { 3214 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
3215 path_to_nameidata(&path, nd); 3215 path_to_nameidata(&path, nd);
3216 } else { 3216 } else {
@@ -3229,6 +3229,10 @@ finish_open:
3229 return error; 3229 return error;
3230 } 3230 }
3231 audit_inode(nd->name, nd->path.dentry, 0); 3231 audit_inode(nd->name, nd->path.dentry, 0);
3232 if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
3233 error = -ELOOP;
3234 goto out;
3235 }
3232 error = -EISDIR; 3236 error = -EISDIR;
3233 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) 3237 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
3234 goto out; 3238 goto out;
@@ -3273,6 +3277,10 @@ opened:
3273 goto exit_fput; 3277 goto exit_fput;
3274 } 3278 }
3275out: 3279out:
3280 if (unlikely(error > 0)) {
3281 WARN_ON(1);
3282 error = -EINVAL;
3283 }
3276 if (got_write) 3284 if (got_write)
3277 mnt_drop_write(nd->path.mnt); 3285 mnt_drop_write(nd->path.mnt);
3278 path_put(&save_parent); 3286 path_put(&save_parent);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf114676e..7115c5d7d373 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
91#include <linux/fsnotify_backend.h> 91#include <linux/fsnotify_backend.h>
92#include "fsnotify.h" 92#include "fsnotify.h"
93 93
94#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
95
94struct srcu_struct fsnotify_mark_srcu; 96struct srcu_struct fsnotify_mark_srcu;
97static DEFINE_SPINLOCK(destroy_lock);
98static LIST_HEAD(destroy_list);
99
100static void fsnotify_mark_destroy(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
95 102
96void fsnotify_get_mark(struct fsnotify_mark *mark) 103void fsnotify_get_mark(struct fsnotify_mark *mark)
97{ 104{
@@ -165,19 +172,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
165 atomic_dec(&group->num_marks); 172 atomic_dec(&group->num_marks);
166} 173}
167 174
168static void
169fsnotify_mark_free_rcu(struct rcu_head *rcu)
170{
171 struct fsnotify_mark *mark;
172
173 mark = container_of(rcu, struct fsnotify_mark, g_rcu);
174 fsnotify_put_mark(mark);
175}
176
177/* 175/*
178 * Free fsnotify mark. The freeing is actually happening from a call_srcu 176 * Free fsnotify mark. The freeing is actually happening from a kthread which
179 * callback. Caller must have a reference to the mark or be protected by 177 * first waits for srcu period end. Caller must have a reference to the mark
180 * fsnotify_mark_srcu. 178 * or be protected by fsnotify_mark_srcu.
181 */ 179 */
182void fsnotify_free_mark(struct fsnotify_mark *mark) 180void fsnotify_free_mark(struct fsnotify_mark *mark)
183{ 181{
@@ -192,7 +190,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
192 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; 190 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
193 spin_unlock(&mark->lock); 191 spin_unlock(&mark->lock);
194 192
195 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 193 spin_lock(&destroy_lock);
194 list_add(&mark->g_list, &destroy_list);
195 spin_unlock(&destroy_lock);
196 queue_delayed_work(system_unbound_wq, &reaper_work,
197 FSNOTIFY_REAPER_DELAY);
196 198
197 /* 199 /*
198 * Some groups like to know that marks are being freed. This is a 200 * Some groups like to know that marks are being freed. This is a
@@ -388,7 +390,12 @@ err:
388 390
389 spin_unlock(&mark->lock); 391 spin_unlock(&mark->lock);
390 392
391 call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu); 393 spin_lock(&destroy_lock);
394 list_add(&mark->g_list, &destroy_list);
395 spin_unlock(&destroy_lock);
396 queue_delayed_work(system_unbound_wq, &reaper_work,
397 FSNOTIFY_REAPER_DELAY);
398
392 return ret; 399 return ret;
393} 400}
394 401
@@ -491,3 +498,21 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
491 atomic_set(&mark->refcnt, 1); 498 atomic_set(&mark->refcnt, 1);
492 mark->free_mark = free_mark; 499 mark->free_mark = free_mark;
493} 500}
501
502static void fsnotify_mark_destroy(struct work_struct *work)
503{
504 struct fsnotify_mark *mark, *next;
505 struct list_head private_destroy_list;
506
507 spin_lock(&destroy_lock);
508 /* exchange the list head */
509 list_replace_init(&destroy_list, &private_destroy_list);
510 spin_unlock(&destroy_lock);
511
512 synchronize_srcu(&fsnotify_mark_srcu);
513
514 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
515 list_del_init(&mark->g_list);
516 fsnotify_put_mark(mark);
517 }
518}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 794fd1587f34..cda0361e95a4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -956,6 +956,7 @@ clean_orphan:
956 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, 956 tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
957 update_isize, end); 957 update_isize, end);
958 if (tmp_ret < 0) { 958 if (tmp_ret < 0) {
959 ocfs2_inode_unlock(inode, 1);
959 ret = tmp_ret; 960 ret = tmp_ret;
960 mlog_errno(ret); 961 mlog_errno(ret);
961 brelse(di_bh); 962 brelse(di_bh);
diff --git a/fs/pnode.c b/fs/pnode.c
index 6367e1e435c6..c524fdddc7fb 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -202,6 +202,11 @@ static struct mount *last_dest, *last_source, *dest_master;
202static struct mountpoint *mp; 202static struct mountpoint *mp;
203static struct hlist_head *list; 203static struct hlist_head *list;
204 204
205static inline bool peers(struct mount *m1, struct mount *m2)
206{
207 return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
208}
209
205static int propagate_one(struct mount *m) 210static int propagate_one(struct mount *m)
206{ 211{
207 struct mount *child; 212 struct mount *child;
@@ -212,7 +217,7 @@ static int propagate_one(struct mount *m)
212 /* skip if mountpoint isn't covered by it */ 217 /* skip if mountpoint isn't covered by it */
213 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) 218 if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
214 return 0; 219 return 0;
215 if (m->mnt_group_id == last_dest->mnt_group_id) { 220 if (peers(m, last_dest)) {
216 type = CL_MAKE_SHARED; 221 type = CL_MAKE_SHARED;
217 } else { 222 } else {
218 struct mount *n, *p; 223 struct mount *n, *p;
@@ -223,7 +228,7 @@ static int propagate_one(struct mount *m)
223 last_source = last_source->mnt_master; 228 last_source = last_source->mnt_master;
224 last_dest = last_source->mnt_parent; 229 last_dest = last_source->mnt_parent;
225 } 230 }
226 if (n->mnt_group_id != last_dest->mnt_group_id) { 231 if (!peers(n, last_dest)) {
227 last_source = last_source->mnt_master; 232 last_source = last_source->mnt_master;
228 last_dest = last_source->mnt_parent; 233 last_dest = last_source->mnt_parent;
229 } 234 }
diff --git a/fs/read_write.c b/fs/read_write.c
index 324ec271cc4e..dadf24e5c95b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
17#include <linux/splice.h> 17#include <linux/splice.h>
18#include <linux/compat.h> 18#include <linux/compat.h>
19#include <linux/mount.h> 19#include <linux/mount.h>
20#include <linux/fs.h>
20#include "internal.h" 21#include "internal.h"
21 22
22#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -183,7 +184,7 @@ loff_t no_seek_end_llseek(struct file *file, loff_t offset, int whence)
183 switch (whence) { 184 switch (whence) {
184 case SEEK_SET: case SEEK_CUR: 185 case SEEK_SET: case SEEK_CUR:
185 return generic_file_llseek_size(file, offset, whence, 186 return generic_file_llseek_size(file, offset, whence,
186 ~0ULL, 0); 187 OFFSET_MAX, 0);
187 default: 188 default:
188 return -EINVAL; 189 return -EINVAL;
189 } 190 }
@@ -1532,10 +1533,12 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1532 1533
1533 if (!(file_in->f_mode & FMODE_READ) || 1534 if (!(file_in->f_mode & FMODE_READ) ||
1534 !(file_out->f_mode & FMODE_WRITE) || 1535 !(file_out->f_mode & FMODE_WRITE) ||
1535 (file_out->f_flags & O_APPEND) || 1536 (file_out->f_flags & O_APPEND))
1536 !file_in->f_op->clone_file_range)
1537 return -EBADF; 1537 return -EBADF;
1538 1538
1539 if (!file_in->f_op->clone_file_range)
1540 return -EOPNOTSUPP;
1541
1539 ret = clone_verify_area(file_in, pos_in, len, false); 1542 ret = clone_verify_area(file_in, pos_in, len, false);
1540 if (ret) 1543 if (ret)
1541 return ret; 1544 return ret;
diff --git a/fs/xattr.c b/fs/xattr.c
index 07d0e47f6a7f..4861322e28e8 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -940,7 +940,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
940 bool trusted = capable(CAP_SYS_ADMIN); 940 bool trusted = capable(CAP_SYS_ADMIN);
941 struct simple_xattr *xattr; 941 struct simple_xattr *xattr;
942 ssize_t remaining_size = size; 942 ssize_t remaining_size = size;
943 int err; 943 int err = 0;
944 944
945#ifdef CONFIG_FS_POSIX_ACL 945#ifdef CONFIG_FS_POSIX_ACL
946 if (inode->i_acl) { 946 if (inode->i_acl) {
@@ -965,11 +965,11 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
965 965
966 err = xattr_list_one(&buffer, &remaining_size, xattr->name); 966 err = xattr_list_one(&buffer, &remaining_size, xattr->name);
967 if (err) 967 if (err)
968 return err; 968 break;
969 } 969 }
970 spin_unlock(&xattrs->lock); 970 spin_unlock(&xattrs->lock);
971 971
972 return size - remaining_size; 972 return err ? err : size - remaining_size;
973} 973}
974 974
975/* 975/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 379c089fb051..a9ebabfe7587 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -55,7 +55,7 @@ xfs_count_page_state(
55 } while ((bh = bh->b_this_page) != head); 55 } while ((bh = bh->b_this_page) != head);
56} 56}
57 57
58STATIC struct block_device * 58struct block_device *
59xfs_find_bdev_for_inode( 59xfs_find_bdev_for_inode(
60 struct inode *inode) 60 struct inode *inode)
61{ 61{
@@ -1208,6 +1208,10 @@ xfs_vm_writepages(
1208 struct writeback_control *wbc) 1208 struct writeback_control *wbc)
1209{ 1209{
1210 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1210 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1211 if (dax_mapping(mapping))
1212 return dax_writeback_mapping_range(mapping,
1213 xfs_find_bdev_for_inode(mapping->host), wbc);
1214
1211 return generic_writepages(mapping, wbc); 1215 return generic_writepages(mapping, wbc);
1212} 1216}
1213 1217
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f6ffc9ae5ceb..a4343c63fb38 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,5 +62,6 @@ int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
62 struct buffer_head *map_bh, int create); 62 struct buffer_head *map_bh, int create);
63 63
64extern void xfs_count_page_state(struct page *, int *, int *); 64extern void xfs_count_page_state(struct page *, int *, int *);
65extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
65 66
66#endif /* __XFS_AOPS_H__ */ 67#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 45ec9e40150c..6c876012b2e5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -75,7 +75,8 @@ xfs_zero_extent(
75 ssize_t size = XFS_FSB_TO_B(mp, count_fsb); 75 ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
76 76
77 if (IS_DAX(VFS_I(ip))) 77 if (IS_DAX(VFS_I(ip)))
78 return dax_clear_blocks(VFS_I(ip), block, size); 78 return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
79 sector, size);
79 80
80 /* 81 /*
81 * let the block layer decide on the fastest method of 82 * let the block layer decide on the fastest method of
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index da37beb76f6e..594f7e63b432 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -4491,7 +4491,7 @@ xlog_recover_process(
4491 * know precisely what failed. 4491 * know precisely what failed.
4492 */ 4492 */
4493 if (pass == XLOG_RECOVER_CRCPASS) { 4493 if (pass == XLOG_RECOVER_CRCPASS) {
4494 if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc)) 4494 if (rhead->h_crc && crc != rhead->h_crc)
4495 return -EFSBADCRC; 4495 return -EFSBADCRC;
4496 return 0; 4496 return 0;
4497 } 4497 }
@@ -4502,7 +4502,7 @@ xlog_recover_process(
4502 * zero CRC check prevents warnings from being emitted when upgrading 4502 * zero CRC check prevents warnings from being emitted when upgrading
4503 * the kernel from one that does not add CRCs by default. 4503 * the kernel from one that does not add CRCs by default.
4504 */ 4504 */
4505 if (crc != le32_to_cpu(rhead->h_crc)) { 4505 if (crc != rhead->h_crc) {
4506 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { 4506 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4507 xfs_alert(log->l_mp, 4507 xfs_alert(log->l_mp,
4508 "log record CRC mismatch: found 0x%x, expected 0x%x.", 4508 "log record CRC mismatch: found 0x%x, expected 0x%x.",
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0419485891f2..0f1c6f315cdc 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
75 */ 75 */
76static inline cputime_t timespec_to_cputime(const struct timespec *val) 76static inline cputime_t timespec_to_cputime(const struct timespec *val)
77{ 77{
78 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; 78 u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
79 return (__force cputime_t) ret; 79 return (__force cputime_t) ret;
80} 80}
81static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) 81static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
91 */ 91 */
92static inline cputime_t timeval_to_cputime(const struct timeval *val) 92static inline cputime_t timeval_to_cputime(const struct timeval *val)
93{ 93{
94 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; 94 u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
95 val->tv_usec * NSEC_PER_USEC;
95 return (__force cputime_t) ret; 96 return (__force cputime_t) ret;
96} 97}
97static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) 98static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 0b3c0d39ef75..c370b261c720 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
239 pmd_t *pmdp); 239 pmd_t *pmdp);
240#endif 240#endif
241 241
242#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
243static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
244 unsigned long address, pmd_t *pmdp)
245{
246
247}
248#endif
249
242#ifndef __HAVE_ARCH_PTE_SAME 250#ifndef __HAVE_ARCH_PTE_SAME
243static inline int pte_same(pte_t pte_a, pte_t pte_b) 251static inline int pte_same(pte_t pte_a, pte_t pte_b)
244{ 252{
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c65a212db77e..c5b4b81a831b 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1166,6 +1166,7 @@ struct drm_connector {
1166 struct drm_mode_object base; 1166 struct drm_mode_object base;
1167 1167
1168 char *name; 1168 char *name;
1169 int connector_id;
1169 int connector_type; 1170 int connector_type;
1170 int connector_type_id; 1171 int connector_type_id;
1171 bool interlace_allowed; 1172 bool interlace_allowed;
@@ -2047,6 +2048,7 @@ struct drm_mode_config {
2047 struct list_head fb_list; 2048 struct list_head fb_list;
2048 2049
2049 int num_connector; 2050 int num_connector;
2051 struct ida connector_ida;
2050 struct list_head connector_list; 2052 struct list_head connector_list;
2051 int num_encoder; 2053 int num_encoder;
2052 struct list_head encoder_list; 2054 struct list_head encoder_list;
@@ -2200,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector);
2200void drm_connector_unregister(struct drm_connector *connector); 2202void drm_connector_unregister(struct drm_connector *connector);
2201 2203
2202extern void drm_connector_cleanup(struct drm_connector *connector); 2204extern void drm_connector_cleanup(struct drm_connector *connector);
2203extern unsigned int drm_connector_index(struct drm_connector *connector); 2205static inline unsigned drm_connector_index(struct drm_connector *connector)
2206{
2207 return connector->connector_id;
2208}
2209
2204/* helper to unplug all connectors from sysfs for device */ 2210/* helper to unplug all connectors from sysfs for device */
2205extern void drm_connector_unplug_all(struct drm_device *dev); 2211extern void drm_connector_unplug_all(struct drm_device *dev);
2206 2212
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6f45aea49e4f..0a05b0d36ae7 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -126,7 +126,7 @@
126/* 104 */ 126/* 104 */
127/* 105 */ 127/* 105 */
128#define TEGRA210_CLK_D_AUDIO 106 128#define TEGRA210_CLK_D_AUDIO 106
129/* 107 ( affects abp -> ape) */ 129#define TEGRA210_CLK_APB2APE 107
130/* 108 */ 130/* 108 */
131/* 109 */ 131/* 109 */
132/* 110 */ 132/* 110 */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 29189aeace19..4571ef1a12a9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
682/* 682/*
683 * q->prep_rq_fn return values 683 * q->prep_rq_fn return values
684 */ 684 */
685#define BLKPREP_OK 0 /* serve it */ 685enum {
686#define BLKPREP_KILL 1 /* fatal error, kill */ 686 BLKPREP_OK, /* serve it */
687#define BLKPREP_DEFER 2 /* leave on queue */ 687 BLKPREP_KILL, /* fatal error, kill, return -EIO */
688 BLKPREP_DEFER, /* leave on queue */
689 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
690};
688 691
689extern unsigned long blk_max_low_pfn, blk_max_pfn; 692extern unsigned long blk_max_low_pfn, blk_max_pfn;
690 693
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 7f540f7f588d..789471dba6fb 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
127 */ 127 */
128 u64 serial_nr; 128 u64 serial_nr;
129 129
130 /*
131 * Incremented by online self and children. Used to guarantee that
132 * parents are not offlined before their children.
133 */
134 atomic_t online_cnt;
135
130 /* percpu_ref killing and RCU release */ 136 /* percpu_ref killing and RCU release */
131 struct rcu_head rcu_head; 137 struct rcu_head rcu_head;
132 struct work_struct destroy_work; 138 struct work_struct destroy_work;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 00b042c49ccd..48f5aab117ae 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144 */ 144 */
145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 145#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
146#define __trace_if(cond) \ 146#define __trace_if(cond) \
147 if (__builtin_constant_p((cond)) ? !!(cond) : \ 147 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
148 ({ \ 148 ({ \
149 int ______r; \ 149 int ______r; \
150 static struct ftrace_branch_data \ 150 static struct ftrace_branch_data \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868ccb493..fea160ee5803 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
140#else /* !CONFIG_CPUSETS */ 142#else /* !CONFIG_CPUSETS */
141 143
142static inline bool cpusets_enabled(void) { return false; } 144static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
243 return false; 245 return false;
244} 246}
245 247
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
246#endif /* !CONFIG_CPUSETS */ 252#endif /* !CONFIG_CPUSETS */
247 253
248#endif /* _LINUX_CPUSET_H */ 254#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 818e45078929..636dd59ab505 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,7 +7,7 @@
7 7
8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, 8ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
9 get_block_t, dio_iodone_t, int flags); 9 get_block_t, dio_iodone_t, int flags);
10int dax_clear_blocks(struct inode *, sector_t block, long size); 10int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); 11int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
12int dax_truncate_page(struct inode *, loff_t from, get_block_t); 12int dax_truncate_page(struct inode *, loff_t from, get_block_t);
13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, 13int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@ static inline bool dax_mapping(struct address_space *mapping)
52{ 52{
53 return mapping->host && IS_DAX(mapping->host); 53 return mapping->host && IS_DAX(mapping->host);
54} 54}
55int dax_writeback_mapping_range(struct address_space *mapping, loff_t start, 55
56 loff_t end); 56struct writeback_control;
57int dax_writeback_mapping_range(struct address_space *mapping,
58 struct block_device *bdev, struct writeback_control *wbc);
57#endif 59#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 251a2090a554..e0ee0b3000b2 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
19 19
20int devpts_new_index(struct inode *ptmx_inode); 20int devpts_new_index(struct inode *ptmx_inode);
21void devpts_kill_index(struct inode *ptmx_inode, int idx); 21void devpts_kill_index(struct inode *ptmx_inode, int idx);
22void devpts_add_ref(struct inode *ptmx_inode);
23void devpts_del_ref(struct inode *ptmx_inode);
22/* mknod in devpts */ 24/* mknod in devpts */
23struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 25struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
24 void *priv); 26 void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
32/* Dummy stubs in the no-pty case */ 34/* Dummy stubs in the no-pty case */
33static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; } 35static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
34static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { } 36static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
37static inline void devpts_add_ref(struct inode *ptmx_inode) { }
38static inline void devpts_del_ref(struct inode *ptmx_inode) { }
35static inline struct inode *devpts_pty_new(struct inode *ptmx_inode, 39static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
36 dev_t device, int index, void *priv) 40 dev_t device, int index, void *priv)
37{ 41{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 569b5a866bb1..47be3ad7d3e5 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
1199struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, 1199struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
1200 struct list_head *head, bool remove); 1200 struct list_head *head, bool remove);
1201 1201
1202bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len); 1202bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
1203 unsigned long data_size);
1204bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
1205 size_t len);
1203 1206
1204extern struct work_struct efivar_work; 1207extern struct work_struct efivar_work;
1205void efivar_run_worker(void); 1208void efivar_run_worker(void);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f45aa4..533c4408529a 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@ struct fsnotify_mark {
220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing 220 /* List of marks by group->i_fsnotify_marks. Also reused for queueing
221 * mark into destroy_list when it's waiting for the end of SRCU period 221 * mark into destroy_list when it's waiting for the end of SRCU period
222 * before it can be freed. [group->mark_mutex] */ 222 * before it can be freed. [group->mark_mutex] */
223 union { 223 struct list_head g_list;
224 struct list_head g_list;
225 struct rcu_head g_rcu;
226 };
227 /* Protects inode / mnt pointers, flags, masks */ 224 /* Protects inode / mnt pointers, flags, masks */
228 spinlock_t lock; 225 spinlock_t lock;
229 /* List of marks for inode / vfsmount [obj_lock] */ 226 /* List of marks for inode / vfsmount [obj_lock] */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 81de7123959d..c2b340e23f62 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -603,6 +603,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
603 603
604extern int skip_trace(unsigned long ip); 604extern int skip_trace(unsigned long ip);
605extern void ftrace_module_init(struct module *mod); 605extern void ftrace_module_init(struct module *mod);
606extern void ftrace_module_enable(struct module *mod);
606extern void ftrace_release_mod(struct module *mod); 607extern void ftrace_release_mod(struct module *mod);
607 608
608extern void ftrace_disable_daemon(void); 609extern void ftrace_disable_daemon(void);
@@ -612,8 +613,9 @@ static inline int skip_trace(unsigned long ip) { return 0; }
612static inline int ftrace_force_update(void) { return 0; } 613static inline int ftrace_force_update(void) { return 0; }
613static inline void ftrace_disable_daemon(void) { } 614static inline void ftrace_disable_daemon(void) { }
614static inline void ftrace_enable_daemon(void) { } 615static inline void ftrace_enable_daemon(void) { }
615static inline void ftrace_release_mod(struct module *mod) {} 616static inline void ftrace_module_init(struct module *mod) { }
616static inline void ftrace_module_init(struct module *mod) {} 617static inline void ftrace_module_enable(struct module *mod) { }
618static inline void ftrace_release_mod(struct module *mod) { }
617static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 619static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
618{ 620{
619 return -EINVAL; 621 return -EINVAL;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273ca4873..2d9b650047a5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
235/* low 64 bit */ 235/* low 64 bit */
236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 236#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
237 237
238/* PRS_REG */
239#define DMA_PRS_PPR ((u32)1)
240
238#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 241#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
239do { \ 242do { \
240 cycles_t start_time = get_cycles(); \ 243 cycles_t start_time = get_cycles(); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 851821bfd553..bec2abbd7ab2 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -526,6 +526,7 @@ enum ata_lpm_policy {
526enum ata_lpm_hints { 526enum ata_lpm_hints {
527 ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ 527 ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
528 ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ 528 ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
529 ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */
529}; 530};
530 531
531/* forward declarations */ 532/* forward declarations */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index bed40dff0e86..141ffdd59960 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -26,9 +26,8 @@ enum {
26 26
27 /* need to set a limit somewhere, but yes, this is likely overkill */ 27 /* need to set a limit somewhere, but yes, this is likely overkill */
28 ND_IOCTL_MAX_BUFLEN = SZ_4M, 28 ND_IOCTL_MAX_BUFLEN = SZ_4M,
29 ND_CMD_MAX_ELEM = 4, 29 ND_CMD_MAX_ELEM = 5,
30 ND_CMD_MAX_ENVELOPE = 16, 30 ND_CMD_MAX_ENVELOPE = 16,
31 ND_CMD_ARS_STATUS_MAX = SZ_4K,
32 ND_MAX_MAPPINGS = 32, 31 ND_MAX_MAPPINGS = 32,
33 32
34 /* region flag indicating to direct-map persistent memory by default */ 33 /* region flag indicating to direct-map persistent memory by default */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d6750111e48e..2190419bdf0a 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -135,6 +135,10 @@ enum {
135 /* Memory types */ 135 /* Memory types */
136 NVM_ID_FMTYPE_SLC = 0, 136 NVM_ID_FMTYPE_SLC = 0,
137 NVM_ID_FMTYPE_MLC = 1, 137 NVM_ID_FMTYPE_MLC = 1,
138
139 /* Device capabilities */
140 NVM_ID_DCAP_BBLKMGMT = 0x1,
141 NVM_UD_DCAP_ECC = 0x2,
138}; 142};
139 143
140struct nvm_id_lp_mlc { 144struct nvm_id_lp_mlc {
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c57e424d914b..4dca42fd32f5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,7 +66,7 @@ struct lock_class {
66 /* 66 /*
67 * class-hash: 67 * class-hash:
68 */ 68 */
69 struct list_head hash_entry; 69 struct hlist_node hash_entry;
70 70
71 /* 71 /*
72 * global list of all lock-classes: 72 * global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
199 u8 irq_context; 199 u8 irq_context;
200 u8 depth; 200 u8 depth;
201 u16 base; 201 u16 base;
202 struct list_head entry; 202 struct hlist_node entry;
203 u64 chain_key; 203 u64 chain_key;
204}; 204};
205 205
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 430a929f048b..a0e8cc8dcc67 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -44,6 +44,8 @@
44 44
45#include <linux/timecounter.h> 45#include <linux/timecounter.h>
46 46
47#define DEFAULT_UAR_PAGE_SHIFT 12
48
47#define MAX_MSIX_P_PORT 17 49#define MAX_MSIX_P_PORT 17
48#define MAX_MSIX 64 50#define MAX_MSIX 64
49#define MIN_MSIX_P_PORT 5 51#define MIN_MSIX_P_PORT 5
@@ -856,6 +858,7 @@ struct mlx4_dev {
856 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 858 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
857 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 859 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
858 struct mlx4_vf_dev *dev_vfs; 860 struct mlx4_vf_dev *dev_vfs;
861 u8 uar_page_shift;
859}; 862};
860 863
861struct mlx4_clock_params { 864struct mlx4_clock_params {
@@ -1528,4 +1531,14 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1528int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1531int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1529 struct mlx4_clock_params *params); 1532 struct mlx4_clock_params *params);
1530 1533
1534static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
1535{
1536 return (index << (PAGE_SHIFT - dev->uar_page_shift));
1537}
1538
1539static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
1540{
1541 /* The first 128 UARs are used for EQ doorbells */
1542 return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
1543}
1531#endif /* MLX4_DEVICE_H */ 1544#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 231ab6bcea76..51f1e540fc2b 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -207,15 +207,15 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
207 u8 outer_dmac[0x1]; 207 u8 outer_dmac[0x1];
208 u8 outer_smac[0x1]; 208 u8 outer_smac[0x1];
209 u8 outer_ether_type[0x1]; 209 u8 outer_ether_type[0x1];
210 u8 reserved_0[0x1]; 210 u8 reserved_at_3[0x1];
211 u8 outer_first_prio[0x1]; 211 u8 outer_first_prio[0x1];
212 u8 outer_first_cfi[0x1]; 212 u8 outer_first_cfi[0x1];
213 u8 outer_first_vid[0x1]; 213 u8 outer_first_vid[0x1];
214 u8 reserved_1[0x1]; 214 u8 reserved_at_7[0x1];
215 u8 outer_second_prio[0x1]; 215 u8 outer_second_prio[0x1];
216 u8 outer_second_cfi[0x1]; 216 u8 outer_second_cfi[0x1];
217 u8 outer_second_vid[0x1]; 217 u8 outer_second_vid[0x1];
218 u8 reserved_2[0x1]; 218 u8 reserved_at_b[0x1];
219 u8 outer_sip[0x1]; 219 u8 outer_sip[0x1];
220 u8 outer_dip[0x1]; 220 u8 outer_dip[0x1];
221 u8 outer_frag[0x1]; 221 u8 outer_frag[0x1];
@@ -230,21 +230,21 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
230 u8 outer_gre_protocol[0x1]; 230 u8 outer_gre_protocol[0x1];
231 u8 outer_gre_key[0x1]; 231 u8 outer_gre_key[0x1];
232 u8 outer_vxlan_vni[0x1]; 232 u8 outer_vxlan_vni[0x1];
233 u8 reserved_3[0x5]; 233 u8 reserved_at_1a[0x5];
234 u8 source_eswitch_port[0x1]; 234 u8 source_eswitch_port[0x1];
235 235
236 u8 inner_dmac[0x1]; 236 u8 inner_dmac[0x1];
237 u8 inner_smac[0x1]; 237 u8 inner_smac[0x1];
238 u8 inner_ether_type[0x1]; 238 u8 inner_ether_type[0x1];
239 u8 reserved_4[0x1]; 239 u8 reserved_at_23[0x1];
240 u8 inner_first_prio[0x1]; 240 u8 inner_first_prio[0x1];
241 u8 inner_first_cfi[0x1]; 241 u8 inner_first_cfi[0x1];
242 u8 inner_first_vid[0x1]; 242 u8 inner_first_vid[0x1];
243 u8 reserved_5[0x1]; 243 u8 reserved_at_27[0x1];
244 u8 inner_second_prio[0x1]; 244 u8 inner_second_prio[0x1];
245 u8 inner_second_cfi[0x1]; 245 u8 inner_second_cfi[0x1];
246 u8 inner_second_vid[0x1]; 246 u8 inner_second_vid[0x1];
247 u8 reserved_6[0x1]; 247 u8 reserved_at_2b[0x1];
248 u8 inner_sip[0x1]; 248 u8 inner_sip[0x1];
249 u8 inner_dip[0x1]; 249 u8 inner_dip[0x1];
250 u8 inner_frag[0x1]; 250 u8 inner_frag[0x1];
@@ -256,37 +256,37 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
256 u8 inner_tcp_sport[0x1]; 256 u8 inner_tcp_sport[0x1];
257 u8 inner_tcp_dport[0x1]; 257 u8 inner_tcp_dport[0x1];
258 u8 inner_tcp_flags[0x1]; 258 u8 inner_tcp_flags[0x1];
259 u8 reserved_7[0x9]; 259 u8 reserved_at_37[0x9];
260 260
261 u8 reserved_8[0x40]; 261 u8 reserved_at_40[0x40];
262}; 262};
263 263
264struct mlx5_ifc_flow_table_prop_layout_bits { 264struct mlx5_ifc_flow_table_prop_layout_bits {
265 u8 ft_support[0x1]; 265 u8 ft_support[0x1];
266 u8 reserved_0[0x2]; 266 u8 reserved_at_1[0x2];
267 u8 flow_modify_en[0x1]; 267 u8 flow_modify_en[0x1];
268 u8 modify_root[0x1]; 268 u8 modify_root[0x1];
269 u8 identified_miss_table_mode[0x1]; 269 u8 identified_miss_table_mode[0x1];
270 u8 flow_table_modify[0x1]; 270 u8 flow_table_modify[0x1];
271 u8 reserved_1[0x19]; 271 u8 reserved_at_7[0x19];
272 272
273 u8 reserved_2[0x2]; 273 u8 reserved_at_20[0x2];
274 u8 log_max_ft_size[0x6]; 274 u8 log_max_ft_size[0x6];
275 u8 reserved_3[0x10]; 275 u8 reserved_at_28[0x10];
276 u8 max_ft_level[0x8]; 276 u8 max_ft_level[0x8];
277 277
278 u8 reserved_4[0x20]; 278 u8 reserved_at_40[0x20];
279 279
280 u8 reserved_5[0x18]; 280 u8 reserved_at_60[0x18];
281 u8 log_max_ft_num[0x8]; 281 u8 log_max_ft_num[0x8];
282 282
283 u8 reserved_6[0x18]; 283 u8 reserved_at_80[0x18];
284 u8 log_max_destination[0x8]; 284 u8 log_max_destination[0x8];
285 285
286 u8 reserved_7[0x18]; 286 u8 reserved_at_a0[0x18];
287 u8 log_max_flow[0x8]; 287 u8 log_max_flow[0x8];
288 288
289 u8 reserved_8[0x40]; 289 u8 reserved_at_c0[0x40];
290 290
291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; 291 struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
292 292
@@ -298,13 +298,13 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
298 u8 receive[0x1]; 298 u8 receive[0x1];
299 u8 write[0x1]; 299 u8 write[0x1];
300 u8 read[0x1]; 300 u8 read[0x1];
301 u8 reserved_0[0x1]; 301 u8 reserved_at_4[0x1];
302 u8 srq_receive[0x1]; 302 u8 srq_receive[0x1];
303 u8 reserved_1[0x1a]; 303 u8 reserved_at_6[0x1a];
304}; 304};
305 305
306struct mlx5_ifc_ipv4_layout_bits { 306struct mlx5_ifc_ipv4_layout_bits {
307 u8 reserved_0[0x60]; 307 u8 reserved_at_0[0x60];
308 308
309 u8 ipv4[0x20]; 309 u8 ipv4[0x20];
310}; 310};
@@ -316,7 +316,7 @@ struct mlx5_ifc_ipv6_layout_bits {
316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 316union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 317 struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 318 struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
319 u8 reserved_0[0x80]; 319 u8 reserved_at_0[0x80];
320}; 320};
321 321
322struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 322struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
@@ -336,15 +336,15 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
336 u8 ip_dscp[0x6]; 336 u8 ip_dscp[0x6];
337 u8 ip_ecn[0x2]; 337 u8 ip_ecn[0x2];
338 u8 vlan_tag[0x1]; 338 u8 vlan_tag[0x1];
339 u8 reserved_0[0x1]; 339 u8 reserved_at_91[0x1];
340 u8 frag[0x1]; 340 u8 frag[0x1];
341 u8 reserved_1[0x4]; 341 u8 reserved_at_93[0x4];
342 u8 tcp_flags[0x9]; 342 u8 tcp_flags[0x9];
343 343
344 u8 tcp_sport[0x10]; 344 u8 tcp_sport[0x10];
345 u8 tcp_dport[0x10]; 345 u8 tcp_dport[0x10];
346 346
347 u8 reserved_2[0x20]; 347 u8 reserved_at_c0[0x20];
348 348
349 u8 udp_sport[0x10]; 349 u8 udp_sport[0x10];
350 u8 udp_dport[0x10]; 350 u8 udp_dport[0x10];
@@ -355,9 +355,9 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
355}; 355};
356 356
357struct mlx5_ifc_fte_match_set_misc_bits { 357struct mlx5_ifc_fte_match_set_misc_bits {
358 u8 reserved_0[0x20]; 358 u8 reserved_at_0[0x20];
359 359
360 u8 reserved_1[0x10]; 360 u8 reserved_at_20[0x10];
361 u8 source_port[0x10]; 361 u8 source_port[0x10];
362 362
363 u8 outer_second_prio[0x3]; 363 u8 outer_second_prio[0x3];
@@ -369,31 +369,31 @@ struct mlx5_ifc_fte_match_set_misc_bits {
369 369
370 u8 outer_second_vlan_tag[0x1]; 370 u8 outer_second_vlan_tag[0x1];
371 u8 inner_second_vlan_tag[0x1]; 371 u8 inner_second_vlan_tag[0x1];
372 u8 reserved_2[0xe]; 372 u8 reserved_at_62[0xe];
373 u8 gre_protocol[0x10]; 373 u8 gre_protocol[0x10];
374 374
375 u8 gre_key_h[0x18]; 375 u8 gre_key_h[0x18];
376 u8 gre_key_l[0x8]; 376 u8 gre_key_l[0x8];
377 377
378 u8 vxlan_vni[0x18]; 378 u8 vxlan_vni[0x18];
379 u8 reserved_3[0x8]; 379 u8 reserved_at_b8[0x8];
380 380
381 u8 reserved_4[0x20]; 381 u8 reserved_at_c0[0x20];
382 382
383 u8 reserved_5[0xc]; 383 u8 reserved_at_e0[0xc];
384 u8 outer_ipv6_flow_label[0x14]; 384 u8 outer_ipv6_flow_label[0x14];
385 385
386 u8 reserved_6[0xc]; 386 u8 reserved_at_100[0xc];
387 u8 inner_ipv6_flow_label[0x14]; 387 u8 inner_ipv6_flow_label[0x14];
388 388
389 u8 reserved_7[0xe0]; 389 u8 reserved_at_120[0xe0];
390}; 390};
391 391
392struct mlx5_ifc_cmd_pas_bits { 392struct mlx5_ifc_cmd_pas_bits {
393 u8 pa_h[0x20]; 393 u8 pa_h[0x20];
394 394
395 u8 pa_l[0x14]; 395 u8 pa_l[0x14];
396 u8 reserved_0[0xc]; 396 u8 reserved_at_34[0xc];
397}; 397};
398 398
399struct mlx5_ifc_uint64_bits { 399struct mlx5_ifc_uint64_bits {
@@ -418,31 +418,31 @@ enum {
418struct mlx5_ifc_ads_bits { 418struct mlx5_ifc_ads_bits {
419 u8 fl[0x1]; 419 u8 fl[0x1];
420 u8 free_ar[0x1]; 420 u8 free_ar[0x1];
421 u8 reserved_0[0xe]; 421 u8 reserved_at_2[0xe];
422 u8 pkey_index[0x10]; 422 u8 pkey_index[0x10];
423 423
424 u8 reserved_1[0x8]; 424 u8 reserved_at_20[0x8];
425 u8 grh[0x1]; 425 u8 grh[0x1];
426 u8 mlid[0x7]; 426 u8 mlid[0x7];
427 u8 rlid[0x10]; 427 u8 rlid[0x10];
428 428
429 u8 ack_timeout[0x5]; 429 u8 ack_timeout[0x5];
430 u8 reserved_2[0x3]; 430 u8 reserved_at_45[0x3];
431 u8 src_addr_index[0x8]; 431 u8 src_addr_index[0x8];
432 u8 reserved_3[0x4]; 432 u8 reserved_at_50[0x4];
433 u8 stat_rate[0x4]; 433 u8 stat_rate[0x4];
434 u8 hop_limit[0x8]; 434 u8 hop_limit[0x8];
435 435
436 u8 reserved_4[0x4]; 436 u8 reserved_at_60[0x4];
437 u8 tclass[0x8]; 437 u8 tclass[0x8];
438 u8 flow_label[0x14]; 438 u8 flow_label[0x14];
439 439
440 u8 rgid_rip[16][0x8]; 440 u8 rgid_rip[16][0x8];
441 441
442 u8 reserved_5[0x4]; 442 u8 reserved_at_100[0x4];
443 u8 f_dscp[0x1]; 443 u8 f_dscp[0x1];
444 u8 f_ecn[0x1]; 444 u8 f_ecn[0x1];
445 u8 reserved_6[0x1]; 445 u8 reserved_at_106[0x1];
446 u8 f_eth_prio[0x1]; 446 u8 f_eth_prio[0x1];
447 u8 ecn[0x2]; 447 u8 ecn[0x2];
448 u8 dscp[0x6]; 448 u8 dscp[0x6];
@@ -458,25 +458,25 @@ struct mlx5_ifc_ads_bits {
458}; 458};
459 459
460struct mlx5_ifc_flow_table_nic_cap_bits { 460struct mlx5_ifc_flow_table_nic_cap_bits {
461 u8 reserved_0[0x200]; 461 u8 reserved_at_0[0x200];
462 462
463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; 463 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
464 464
465 u8 reserved_1[0x200]; 465 u8 reserved_at_400[0x200];
466 466
467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; 467 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
468 468
469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; 469 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
470 470
471 u8 reserved_2[0x200]; 471 u8 reserved_at_a00[0x200];
472 472
473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; 473 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
474 474
475 u8 reserved_3[0x7200]; 475 u8 reserved_at_e00[0x7200];
476}; 476};
477 477
478struct mlx5_ifc_flow_table_eswitch_cap_bits { 478struct mlx5_ifc_flow_table_eswitch_cap_bits {
479 u8 reserved_0[0x200]; 479 u8 reserved_at_0[0x200];
480 480
481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; 481 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
482 482
@@ -484,7 +484,7 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
484 484
485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress; 485 struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
486 486
487 u8 reserved_1[0x7800]; 487 u8 reserved_at_800[0x7800];
488}; 488};
489 489
490struct mlx5_ifc_e_switch_cap_bits { 490struct mlx5_ifc_e_switch_cap_bits {
@@ -493,9 +493,9 @@ struct mlx5_ifc_e_switch_cap_bits {
493 u8 vport_svlan_insert[0x1]; 493 u8 vport_svlan_insert[0x1];
494 u8 vport_cvlan_insert_if_not_exist[0x1]; 494 u8 vport_cvlan_insert_if_not_exist[0x1];
495 u8 vport_cvlan_insert_overwrite[0x1]; 495 u8 vport_cvlan_insert_overwrite[0x1];
496 u8 reserved_0[0x1b]; 496 u8 reserved_at_5[0x1b];
497 497
498 u8 reserved_1[0x7e0]; 498 u8 reserved_at_20[0x7e0];
499}; 499};
500 500
501struct mlx5_ifc_per_protocol_networking_offload_caps_bits { 501struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -504,51 +504,51 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
504 u8 lro_cap[0x1]; 504 u8 lro_cap[0x1];
505 u8 lro_psh_flag[0x1]; 505 u8 lro_psh_flag[0x1];
506 u8 lro_time_stamp[0x1]; 506 u8 lro_time_stamp[0x1];
507 u8 reserved_0[0x3]; 507 u8 reserved_at_5[0x3];
508 u8 self_lb_en_modifiable[0x1]; 508 u8 self_lb_en_modifiable[0x1];
509 u8 reserved_1[0x2]; 509 u8 reserved_at_9[0x2];
510 u8 max_lso_cap[0x5]; 510 u8 max_lso_cap[0x5];
511 u8 reserved_2[0x4]; 511 u8 reserved_at_10[0x4];
512 u8 rss_ind_tbl_cap[0x4]; 512 u8 rss_ind_tbl_cap[0x4];
513 u8 reserved_3[0x3]; 513 u8 reserved_at_18[0x3];
514 u8 tunnel_lso_const_out_ip_id[0x1]; 514 u8 tunnel_lso_const_out_ip_id[0x1];
515 u8 reserved_4[0x2]; 515 u8 reserved_at_1c[0x2];
516 u8 tunnel_statless_gre[0x1]; 516 u8 tunnel_statless_gre[0x1];
517 u8 tunnel_stateless_vxlan[0x1]; 517 u8 tunnel_stateless_vxlan[0x1];
518 518
519 u8 reserved_5[0x20]; 519 u8 reserved_at_20[0x20];
520 520
521 u8 reserved_6[0x10]; 521 u8 reserved_at_40[0x10];
522 u8 lro_min_mss_size[0x10]; 522 u8 lro_min_mss_size[0x10];
523 523
524 u8 reserved_7[0x120]; 524 u8 reserved_at_60[0x120];
525 525
526 u8 lro_timer_supported_periods[4][0x20]; 526 u8 lro_timer_supported_periods[4][0x20];
527 527
528 u8 reserved_8[0x600]; 528 u8 reserved_at_200[0x600];
529}; 529};
530 530
531struct mlx5_ifc_roce_cap_bits { 531struct mlx5_ifc_roce_cap_bits {
532 u8 roce_apm[0x1]; 532 u8 roce_apm[0x1];
533 u8 reserved_0[0x1f]; 533 u8 reserved_at_1[0x1f];
534 534
535 u8 reserved_1[0x60]; 535 u8 reserved_at_20[0x60];
536 536
537 u8 reserved_2[0xc]; 537 u8 reserved_at_80[0xc];
538 u8 l3_type[0x4]; 538 u8 l3_type[0x4];
539 u8 reserved_3[0x8]; 539 u8 reserved_at_90[0x8];
540 u8 roce_version[0x8]; 540 u8 roce_version[0x8];
541 541
542 u8 reserved_4[0x10]; 542 u8 reserved_at_a0[0x10];
543 u8 r_roce_dest_udp_port[0x10]; 543 u8 r_roce_dest_udp_port[0x10];
544 544
545 u8 r_roce_max_src_udp_port[0x10]; 545 u8 r_roce_max_src_udp_port[0x10];
546 u8 r_roce_min_src_udp_port[0x10]; 546 u8 r_roce_min_src_udp_port[0x10];
547 547
548 u8 reserved_5[0x10]; 548 u8 reserved_at_e0[0x10];
549 u8 roce_address_table_size[0x10]; 549 u8 roce_address_table_size[0x10];
550 550
551 u8 reserved_6[0x700]; 551 u8 reserved_at_100[0x700];
552}; 552};
553 553
554enum { 554enum {
@@ -576,35 +576,35 @@ enum {
576}; 576};
577 577
578struct mlx5_ifc_atomic_caps_bits { 578struct mlx5_ifc_atomic_caps_bits {
579 u8 reserved_0[0x40]; 579 u8 reserved_at_0[0x40];
580 580
581 u8 atomic_req_8B_endianess_mode[0x2]; 581 u8 atomic_req_8B_endianess_mode[0x2];
582 u8 reserved_1[0x4]; 582 u8 reserved_at_42[0x4];
583 u8 supported_atomic_req_8B_endianess_mode_1[0x1]; 583 u8 supported_atomic_req_8B_endianess_mode_1[0x1];
584 584
585 u8 reserved_2[0x19]; 585 u8 reserved_at_47[0x19];
586 586
587 u8 reserved_3[0x20]; 587 u8 reserved_at_60[0x20];
588 588
589 u8 reserved_4[0x10]; 589 u8 reserved_at_80[0x10];
590 u8 atomic_operations[0x10]; 590 u8 atomic_operations[0x10];
591 591
592 u8 reserved_5[0x10]; 592 u8 reserved_at_a0[0x10];
593 u8 atomic_size_qp[0x10]; 593 u8 atomic_size_qp[0x10];
594 594
595 u8 reserved_6[0x10]; 595 u8 reserved_at_c0[0x10];
596 u8 atomic_size_dc[0x10]; 596 u8 atomic_size_dc[0x10];
597 597
598 u8 reserved_7[0x720]; 598 u8 reserved_at_e0[0x720];
599}; 599};
600 600
601struct mlx5_ifc_odp_cap_bits { 601struct mlx5_ifc_odp_cap_bits {
602 u8 reserved_0[0x40]; 602 u8 reserved_at_0[0x40];
603 603
604 u8 sig[0x1]; 604 u8 sig[0x1];
605 u8 reserved_1[0x1f]; 605 u8 reserved_at_41[0x1f];
606 606
607 u8 reserved_2[0x20]; 607 u8 reserved_at_60[0x20];
608 608
609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; 609 struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
610 610
@@ -612,7 +612,7 @@ struct mlx5_ifc_odp_cap_bits {
612 612
613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; 613 struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
614 614
615 u8 reserved_3[0x720]; 615 u8 reserved_at_e0[0x720];
616}; 616};
617 617
618enum { 618enum {
@@ -660,55 +660,55 @@ enum {
660}; 660};
661 661
662struct mlx5_ifc_cmd_hca_cap_bits { 662struct mlx5_ifc_cmd_hca_cap_bits {
663 u8 reserved_0[0x80]; 663 u8 reserved_at_0[0x80];
664 664
665 u8 log_max_srq_sz[0x8]; 665 u8 log_max_srq_sz[0x8];
666 u8 log_max_qp_sz[0x8]; 666 u8 log_max_qp_sz[0x8];
667 u8 reserved_1[0xb]; 667 u8 reserved_at_90[0xb];
668 u8 log_max_qp[0x5]; 668 u8 log_max_qp[0x5];
669 669
670 u8 reserved_2[0xb]; 670 u8 reserved_at_a0[0xb];
671 u8 log_max_srq[0x5]; 671 u8 log_max_srq[0x5];
672 u8 reserved_3[0x10]; 672 u8 reserved_at_b0[0x10];
673 673
674 u8 reserved_4[0x8]; 674 u8 reserved_at_c0[0x8];
675 u8 log_max_cq_sz[0x8]; 675 u8 log_max_cq_sz[0x8];
676 u8 reserved_5[0xb]; 676 u8 reserved_at_d0[0xb];
677 u8 log_max_cq[0x5]; 677 u8 log_max_cq[0x5];
678 678
679 u8 log_max_eq_sz[0x8]; 679 u8 log_max_eq_sz[0x8];
680 u8 reserved_6[0x2]; 680 u8 reserved_at_e8[0x2];
681 u8 log_max_mkey[0x6]; 681 u8 log_max_mkey[0x6];
682 u8 reserved_7[0xc]; 682 u8 reserved_at_f0[0xc];
683 u8 log_max_eq[0x4]; 683 u8 log_max_eq[0x4];
684 684
685 u8 max_indirection[0x8]; 685 u8 max_indirection[0x8];
686 u8 reserved_8[0x1]; 686 u8 reserved_at_108[0x1];
687 u8 log_max_mrw_sz[0x7]; 687 u8 log_max_mrw_sz[0x7];
688 u8 reserved_9[0x2]; 688 u8 reserved_at_110[0x2];
689 u8 log_max_bsf_list_size[0x6]; 689 u8 log_max_bsf_list_size[0x6];
690 u8 reserved_10[0x2]; 690 u8 reserved_at_118[0x2];
691 u8 log_max_klm_list_size[0x6]; 691 u8 log_max_klm_list_size[0x6];
692 692
693 u8 reserved_11[0xa]; 693 u8 reserved_at_120[0xa];
694 u8 log_max_ra_req_dc[0x6]; 694 u8 log_max_ra_req_dc[0x6];
695 u8 reserved_12[0xa]; 695 u8 reserved_at_130[0xa];
696 u8 log_max_ra_res_dc[0x6]; 696 u8 log_max_ra_res_dc[0x6];
697 697
698 u8 reserved_13[0xa]; 698 u8 reserved_at_140[0xa];
699 u8 log_max_ra_req_qp[0x6]; 699 u8 log_max_ra_req_qp[0x6];
700 u8 reserved_14[0xa]; 700 u8 reserved_at_150[0xa];
701 u8 log_max_ra_res_qp[0x6]; 701 u8 log_max_ra_res_qp[0x6];
702 702
703 u8 pad_cap[0x1]; 703 u8 pad_cap[0x1];
704 u8 cc_query_allowed[0x1]; 704 u8 cc_query_allowed[0x1];
705 u8 cc_modify_allowed[0x1]; 705 u8 cc_modify_allowed[0x1];
706 u8 reserved_15[0xd]; 706 u8 reserved_at_163[0xd];
707 u8 gid_table_size[0x10]; 707 u8 gid_table_size[0x10];
708 708
709 u8 out_of_seq_cnt[0x1]; 709 u8 out_of_seq_cnt[0x1];
710 u8 vport_counters[0x1]; 710 u8 vport_counters[0x1];
711 u8 reserved_16[0x4]; 711 u8 reserved_at_182[0x4];
712 u8 max_qp_cnt[0xa]; 712 u8 max_qp_cnt[0xa];
713 u8 pkey_table_size[0x10]; 713 u8 pkey_table_size[0x10];
714 714
@@ -716,158 +716,158 @@ struct mlx5_ifc_cmd_hca_cap_bits {
716 u8 vhca_group_manager[0x1]; 716 u8 vhca_group_manager[0x1];
717 u8 ib_virt[0x1]; 717 u8 ib_virt[0x1];
718 u8 eth_virt[0x1]; 718 u8 eth_virt[0x1];
719 u8 reserved_17[0x1]; 719 u8 reserved_at_1a4[0x1];
720 u8 ets[0x1]; 720 u8 ets[0x1];
721 u8 nic_flow_table[0x1]; 721 u8 nic_flow_table[0x1];
722 u8 eswitch_flow_table[0x1]; 722 u8 eswitch_flow_table[0x1];
723 u8 early_vf_enable; 723 u8 early_vf_enable;
724 u8 reserved_18[0x2]; 724 u8 reserved_at_1a8[0x2];
725 u8 local_ca_ack_delay[0x5]; 725 u8 local_ca_ack_delay[0x5];
726 u8 reserved_19[0x6]; 726 u8 reserved_at_1af[0x6];
727 u8 port_type[0x2]; 727 u8 port_type[0x2];
728 u8 num_ports[0x8]; 728 u8 num_ports[0x8];
729 729
730 u8 reserved_20[0x3]; 730 u8 reserved_at_1bf[0x3];
731 u8 log_max_msg[0x5]; 731 u8 log_max_msg[0x5];
732 u8 reserved_21[0x18]; 732 u8 reserved_at_1c7[0x18];
733 733
734 u8 stat_rate_support[0x10]; 734 u8 stat_rate_support[0x10];
735 u8 reserved_22[0xc]; 735 u8 reserved_at_1ef[0xc];
736 u8 cqe_version[0x4]; 736 u8 cqe_version[0x4];
737 737
738 u8 compact_address_vector[0x1]; 738 u8 compact_address_vector[0x1];
739 u8 reserved_23[0xe]; 739 u8 reserved_at_200[0xe];
740 u8 drain_sigerr[0x1]; 740 u8 drain_sigerr[0x1];
741 u8 cmdif_checksum[0x2]; 741 u8 cmdif_checksum[0x2];
742 u8 sigerr_cqe[0x1]; 742 u8 sigerr_cqe[0x1];
743 u8 reserved_24[0x1]; 743 u8 reserved_at_212[0x1];
744 u8 wq_signature[0x1]; 744 u8 wq_signature[0x1];
745 u8 sctr_data_cqe[0x1]; 745 u8 sctr_data_cqe[0x1];
746 u8 reserved_25[0x1]; 746 u8 reserved_at_215[0x1];
747 u8 sho[0x1]; 747 u8 sho[0x1];
748 u8 tph[0x1]; 748 u8 tph[0x1];
749 u8 rf[0x1]; 749 u8 rf[0x1];
750 u8 dct[0x1]; 750 u8 dct[0x1];
751 u8 reserved_26[0x1]; 751 u8 reserved_at_21a[0x1];
752 u8 eth_net_offloads[0x1]; 752 u8 eth_net_offloads[0x1];
753 u8 roce[0x1]; 753 u8 roce[0x1];
754 u8 atomic[0x1]; 754 u8 atomic[0x1];
755 u8 reserved_27[0x1]; 755 u8 reserved_at_21e[0x1];
756 756
757 u8 cq_oi[0x1]; 757 u8 cq_oi[0x1];
758 u8 cq_resize[0x1]; 758 u8 cq_resize[0x1];
759 u8 cq_moderation[0x1]; 759 u8 cq_moderation[0x1];
760 u8 reserved_28[0x3]; 760 u8 reserved_at_222[0x3];
761 u8 cq_eq_remap[0x1]; 761 u8 cq_eq_remap[0x1];
762 u8 pg[0x1]; 762 u8 pg[0x1];
763 u8 block_lb_mc[0x1]; 763 u8 block_lb_mc[0x1];
764 u8 reserved_29[0x1]; 764 u8 reserved_at_228[0x1];
765 u8 scqe_break_moderation[0x1]; 765 u8 scqe_break_moderation[0x1];
766 u8 reserved_30[0x1]; 766 u8 reserved_at_22a[0x1];
767 u8 cd[0x1]; 767 u8 cd[0x1];
768 u8 reserved_31[0x1]; 768 u8 reserved_at_22c[0x1];
769 u8 apm[0x1]; 769 u8 apm[0x1];
770 u8 reserved_32[0x7]; 770 u8 reserved_at_22e[0x7];
771 u8 qkv[0x1]; 771 u8 qkv[0x1];
772 u8 pkv[0x1]; 772 u8 pkv[0x1];
773 u8 reserved_33[0x4]; 773 u8 reserved_at_237[0x4];
774 u8 xrc[0x1]; 774 u8 xrc[0x1];
775 u8 ud[0x1]; 775 u8 ud[0x1];
776 u8 uc[0x1]; 776 u8 uc[0x1];
777 u8 rc[0x1]; 777 u8 rc[0x1];
778 778
779 u8 reserved_34[0xa]; 779 u8 reserved_at_23f[0xa];
780 u8 uar_sz[0x6]; 780 u8 uar_sz[0x6];
781 u8 reserved_35[0x8]; 781 u8 reserved_at_24f[0x8];
782 u8 log_pg_sz[0x8]; 782 u8 log_pg_sz[0x8];
783 783
784 u8 bf[0x1]; 784 u8 bf[0x1];
785 u8 reserved_36[0x1]; 785 u8 reserved_at_260[0x1];
786 u8 pad_tx_eth_packet[0x1]; 786 u8 pad_tx_eth_packet[0x1];
787 u8 reserved_37[0x8]; 787 u8 reserved_at_262[0x8];
788 u8 log_bf_reg_size[0x5]; 788 u8 log_bf_reg_size[0x5];
789 u8 reserved_38[0x10]; 789 u8 reserved_at_26f[0x10];
790 790
791 u8 reserved_39[0x10]; 791 u8 reserved_at_27f[0x10];
792 u8 max_wqe_sz_sq[0x10]; 792 u8 max_wqe_sz_sq[0x10];
793 793
794 u8 reserved_40[0x10]; 794 u8 reserved_at_29f[0x10];
795 u8 max_wqe_sz_rq[0x10]; 795 u8 max_wqe_sz_rq[0x10];
796 796
797 u8 reserved_41[0x10]; 797 u8 reserved_at_2bf[0x10];
798 u8 max_wqe_sz_sq_dc[0x10]; 798 u8 max_wqe_sz_sq_dc[0x10];
799 799
800 u8 reserved_42[0x7]; 800 u8 reserved_at_2df[0x7];
801 u8 max_qp_mcg[0x19]; 801 u8 max_qp_mcg[0x19];
802 802
803 u8 reserved_43[0x18]; 803 u8 reserved_at_2ff[0x18];
804 u8 log_max_mcg[0x8]; 804 u8 log_max_mcg[0x8];
805 805
806 u8 reserved_44[0x3]; 806 u8 reserved_at_31f[0x3];
807 u8 log_max_transport_domain[0x5]; 807 u8 log_max_transport_domain[0x5];
808 u8 reserved_45[0x3]; 808 u8 reserved_at_327[0x3];
809 u8 log_max_pd[0x5]; 809 u8 log_max_pd[0x5];
810 u8 reserved_46[0xb]; 810 u8 reserved_at_32f[0xb];
811 u8 log_max_xrcd[0x5]; 811 u8 log_max_xrcd[0x5];
812 812
813 u8 reserved_47[0x20]; 813 u8 reserved_at_33f[0x20];
814 814
815 u8 reserved_48[0x3]; 815 u8 reserved_at_35f[0x3];
816 u8 log_max_rq[0x5]; 816 u8 log_max_rq[0x5];
817 u8 reserved_49[0x3]; 817 u8 reserved_at_367[0x3];
818 u8 log_max_sq[0x5]; 818 u8 log_max_sq[0x5];
819 u8 reserved_50[0x3]; 819 u8 reserved_at_36f[0x3];
820 u8 log_max_tir[0x5]; 820 u8 log_max_tir[0x5];
821 u8 reserved_51[0x3]; 821 u8 reserved_at_377[0x3];
822 u8 log_max_tis[0x5]; 822 u8 log_max_tis[0x5];
823 823
824 u8 basic_cyclic_rcv_wqe[0x1]; 824 u8 basic_cyclic_rcv_wqe[0x1];
825 u8 reserved_52[0x2]; 825 u8 reserved_at_380[0x2];
826 u8 log_max_rmp[0x5]; 826 u8 log_max_rmp[0x5];
827 u8 reserved_53[0x3]; 827 u8 reserved_at_387[0x3];
828 u8 log_max_rqt[0x5]; 828 u8 log_max_rqt[0x5];
829 u8 reserved_54[0x3]; 829 u8 reserved_at_38f[0x3];
830 u8 log_max_rqt_size[0x5]; 830 u8 log_max_rqt_size[0x5];
831 u8 reserved_55[0x3]; 831 u8 reserved_at_397[0x3];
832 u8 log_max_tis_per_sq[0x5]; 832 u8 log_max_tis_per_sq[0x5];
833 833
834 u8 reserved_56[0x3]; 834 u8 reserved_at_39f[0x3];
835 u8 log_max_stride_sz_rq[0x5]; 835 u8 log_max_stride_sz_rq[0x5];
836 u8 reserved_57[0x3]; 836 u8 reserved_at_3a7[0x3];
837 u8 log_min_stride_sz_rq[0x5]; 837 u8 log_min_stride_sz_rq[0x5];
838 u8 reserved_58[0x3]; 838 u8 reserved_at_3af[0x3];
839 u8 log_max_stride_sz_sq[0x5]; 839 u8 log_max_stride_sz_sq[0x5];
840 u8 reserved_59[0x3]; 840 u8 reserved_at_3b7[0x3];
841 u8 log_min_stride_sz_sq[0x5]; 841 u8 log_min_stride_sz_sq[0x5];
842 842
843 u8 reserved_60[0x1b]; 843 u8 reserved_at_3bf[0x1b];
844 u8 log_max_wq_sz[0x5]; 844 u8 log_max_wq_sz[0x5];
845 845
846 u8 nic_vport_change_event[0x1]; 846 u8 nic_vport_change_event[0x1];
847 u8 reserved_61[0xa]; 847 u8 reserved_at_3e0[0xa];
848 u8 log_max_vlan_list[0x5]; 848 u8 log_max_vlan_list[0x5];
849 u8 reserved_62[0x3]; 849 u8 reserved_at_3ef[0x3];
850 u8 log_max_current_mc_list[0x5]; 850 u8 log_max_current_mc_list[0x5];
851 u8 reserved_63[0x3]; 851 u8 reserved_at_3f7[0x3];
852 u8 log_max_current_uc_list[0x5]; 852 u8 log_max_current_uc_list[0x5];
853 853
854 u8 reserved_64[0x80]; 854 u8 reserved_at_3ff[0x80];
855 855
856 u8 reserved_65[0x3]; 856 u8 reserved_at_47f[0x3];
857 u8 log_max_l2_table[0x5]; 857 u8 log_max_l2_table[0x5];
858 u8 reserved_66[0x8]; 858 u8 reserved_at_487[0x8];
859 u8 log_uar_page_sz[0x10]; 859 u8 log_uar_page_sz[0x10];
860 860
861 u8 reserved_67[0x20]; 861 u8 reserved_at_49f[0x20];
862 u8 device_frequency_mhz[0x20]; 862 u8 device_frequency_mhz[0x20];
863 u8 device_frequency_khz[0x20]; 863 u8 device_frequency_khz[0x20];
864 u8 reserved_68[0x5f]; 864 u8 reserved_at_4ff[0x5f];
865 u8 cqe_zip[0x1]; 865 u8 cqe_zip[0x1];
866 866
867 u8 cqe_zip_timeout[0x10]; 867 u8 cqe_zip_timeout[0x10];
868 u8 cqe_zip_max_num[0x10]; 868 u8 cqe_zip_max_num[0x10];
869 869
870 u8 reserved_69[0x220]; 870 u8 reserved_at_57f[0x220];
871}; 871};
872 872
873enum mlx5_flow_destination_type { 873enum mlx5_flow_destination_type {
@@ -880,7 +880,7 @@ struct mlx5_ifc_dest_format_struct_bits {
880 u8 destination_type[0x8]; 880 u8 destination_type[0x8];
881 u8 destination_id[0x18]; 881 u8 destination_id[0x18];
882 882
883 u8 reserved_0[0x20]; 883 u8 reserved_at_20[0x20];
884}; 884};
885 885
886struct mlx5_ifc_fte_match_param_bits { 886struct mlx5_ifc_fte_match_param_bits {
@@ -890,7 +890,7 @@ struct mlx5_ifc_fte_match_param_bits {
890 890
891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; 891 struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
892 892
893 u8 reserved_0[0xa00]; 893 u8 reserved_at_600[0xa00];
894}; 894};
895 895
896enum { 896enum {
@@ -922,18 +922,18 @@ struct mlx5_ifc_wq_bits {
922 u8 wq_signature[0x1]; 922 u8 wq_signature[0x1];
923 u8 end_padding_mode[0x2]; 923 u8 end_padding_mode[0x2];
924 u8 cd_slave[0x1]; 924 u8 cd_slave[0x1];
925 u8 reserved_0[0x18]; 925 u8 reserved_at_8[0x18];
926 926
927 u8 hds_skip_first_sge[0x1]; 927 u8 hds_skip_first_sge[0x1];
928 u8 log2_hds_buf_size[0x3]; 928 u8 log2_hds_buf_size[0x3];
929 u8 reserved_1[0x7]; 929 u8 reserved_at_24[0x7];
930 u8 page_offset[0x5]; 930 u8 page_offset[0x5];
931 u8 lwm[0x10]; 931 u8 lwm[0x10];
932 932
933 u8 reserved_2[0x8]; 933 u8 reserved_at_40[0x8];
934 u8 pd[0x18]; 934 u8 pd[0x18];
935 935
936 u8 reserved_3[0x8]; 936 u8 reserved_at_60[0x8];
937 u8 uar_page[0x18]; 937 u8 uar_page[0x18];
938 938
939 u8 dbr_addr[0x40]; 939 u8 dbr_addr[0x40];
@@ -942,60 +942,60 @@ struct mlx5_ifc_wq_bits {
942 942
943 u8 sw_counter[0x20]; 943 u8 sw_counter[0x20];
944 944
945 u8 reserved_4[0xc]; 945 u8 reserved_at_100[0xc];
946 u8 log_wq_stride[0x4]; 946 u8 log_wq_stride[0x4];
947 u8 reserved_5[0x3]; 947 u8 reserved_at_110[0x3];
948 u8 log_wq_pg_sz[0x5]; 948 u8 log_wq_pg_sz[0x5];
949 u8 reserved_6[0x3]; 949 u8 reserved_at_118[0x3];
950 u8 log_wq_sz[0x5]; 950 u8 log_wq_sz[0x5];
951 951
952 u8 reserved_7[0x4e0]; 952 u8 reserved_at_120[0x4e0];
953 953
954 struct mlx5_ifc_cmd_pas_bits pas[0]; 954 struct mlx5_ifc_cmd_pas_bits pas[0];
955}; 955};
956 956
957struct mlx5_ifc_rq_num_bits { 957struct mlx5_ifc_rq_num_bits {
958 u8 reserved_0[0x8]; 958 u8 reserved_at_0[0x8];
959 u8 rq_num[0x18]; 959 u8 rq_num[0x18];
960}; 960};
961 961
962struct mlx5_ifc_mac_address_layout_bits { 962struct mlx5_ifc_mac_address_layout_bits {
963 u8 reserved_0[0x10]; 963 u8 reserved_at_0[0x10];
964 u8 mac_addr_47_32[0x10]; 964 u8 mac_addr_47_32[0x10];
965 965
966 u8 mac_addr_31_0[0x20]; 966 u8 mac_addr_31_0[0x20];
967}; 967};
968 968
969struct mlx5_ifc_vlan_layout_bits { 969struct mlx5_ifc_vlan_layout_bits {
970 u8 reserved_0[0x14]; 970 u8 reserved_at_0[0x14];
971 u8 vlan[0x0c]; 971 u8 vlan[0x0c];
972 972
973 u8 reserved_1[0x20]; 973 u8 reserved_at_20[0x20];
974}; 974};
975 975
976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { 976struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
977 u8 reserved_0[0xa0]; 977 u8 reserved_at_0[0xa0];
978 978
979 u8 min_time_between_cnps[0x20]; 979 u8 min_time_between_cnps[0x20];
980 980
981 u8 reserved_1[0x12]; 981 u8 reserved_at_c0[0x12];
982 u8 cnp_dscp[0x6]; 982 u8 cnp_dscp[0x6];
983 u8 reserved_2[0x5]; 983 u8 reserved_at_d8[0x5];
984 u8 cnp_802p_prio[0x3]; 984 u8 cnp_802p_prio[0x3];
985 985
986 u8 reserved_3[0x720]; 986 u8 reserved_at_e0[0x720];
987}; 987};
988 988
989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { 989struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
990 u8 reserved_0[0x60]; 990 u8 reserved_at_0[0x60];
991 991
992 u8 reserved_1[0x4]; 992 u8 reserved_at_60[0x4];
993 u8 clamp_tgt_rate[0x1]; 993 u8 clamp_tgt_rate[0x1];
994 u8 reserved_2[0x3]; 994 u8 reserved_at_65[0x3];
995 u8 clamp_tgt_rate_after_time_inc[0x1]; 995 u8 clamp_tgt_rate_after_time_inc[0x1];
996 u8 reserved_3[0x17]; 996 u8 reserved_at_69[0x17];
997 997
998 u8 reserved_4[0x20]; 998 u8 reserved_at_80[0x20];
999 999
1000 u8 rpg_time_reset[0x20]; 1000 u8 rpg_time_reset[0x20];
1001 1001
@@ -1015,7 +1015,7 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1015 1015
1016 u8 rpg_min_rate[0x20]; 1016 u8 rpg_min_rate[0x20];
1017 1017
1018 u8 reserved_5[0xe0]; 1018 u8 reserved_at_1c0[0xe0];
1019 1019
1020 u8 rate_to_set_on_first_cnp[0x20]; 1020 u8 rate_to_set_on_first_cnp[0x20];
1021 1021
@@ -1025,15 +1025,15 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
1025 1025
1026 u8 rate_reduce_monitor_period[0x20]; 1026 u8 rate_reduce_monitor_period[0x20];
1027 1027
1028 u8 reserved_6[0x20]; 1028 u8 reserved_at_320[0x20];
1029 1029
1030 u8 initial_alpha_value[0x20]; 1030 u8 initial_alpha_value[0x20];
1031 1031
1032 u8 reserved_7[0x4a0]; 1032 u8 reserved_at_360[0x4a0];
1033}; 1033};
1034 1034
1035struct mlx5_ifc_cong_control_802_1qau_rp_bits { 1035struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1036 u8 reserved_0[0x80]; 1036 u8 reserved_at_0[0x80];
1037 1037
1038 u8 rppp_max_rps[0x20]; 1038 u8 rppp_max_rps[0x20];
1039 1039
@@ -1055,7 +1055,7 @@ struct mlx5_ifc_cong_control_802_1qau_rp_bits {
1055 1055
1056 u8 rpg_min_rate[0x20]; 1056 u8 rpg_min_rate[0x20];
1057 1057
1058 u8 reserved_1[0x640]; 1058 u8 reserved_at_1c0[0x640];
1059}; 1059};
1060 1060
1061enum { 1061enum {
@@ -1205,7 +1205,7 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
1205 1205
1206 u8 successful_recovery_events[0x20]; 1206 u8 successful_recovery_events[0x20];
1207 1207
1208 u8 reserved_0[0x180]; 1208 u8 reserved_at_640[0x180];
1209}; 1209};
1210 1210
1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { 1211struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -1213,7 +1213,7 @@ struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
1213 1213
1214 u8 transmit_queue_low[0x20]; 1214 u8 transmit_queue_low[0x20];
1215 1215
1216 u8 reserved_0[0x780]; 1216 u8 reserved_at_40[0x780];
1217}; 1217};
1218 1218
1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { 1219struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1221,7 +1221,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1221 1221
1222 u8 rx_octets_low[0x20]; 1222 u8 rx_octets_low[0x20];
1223 1223
1224 u8 reserved_0[0xc0]; 1224 u8 reserved_at_40[0xc0];
1225 1225
1226 u8 rx_frames_high[0x20]; 1226 u8 rx_frames_high[0x20];
1227 1227
@@ -1231,7 +1231,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1231 1231
1232 u8 tx_octets_low[0x20]; 1232 u8 tx_octets_low[0x20];
1233 1233
1234 u8 reserved_1[0xc0]; 1234 u8 reserved_at_180[0xc0];
1235 1235
1236 u8 tx_frames_high[0x20]; 1236 u8 tx_frames_high[0x20];
1237 1237
@@ -1257,7 +1257,7 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
1257 1257
1258 u8 rx_pause_transition_low[0x20]; 1258 u8 rx_pause_transition_low[0x20];
1259 1259
1260 u8 reserved_2[0x400]; 1260 u8 reserved_at_3c0[0x400];
1261}; 1261};
1262 1262
1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { 1263struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1265,7 +1265,7 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
1265 1265
1266 u8 port_transmit_wait_low[0x20]; 1266 u8 port_transmit_wait_low[0x20];
1267 1267
1268 u8 reserved_0[0x780]; 1268 u8 reserved_at_40[0x780];
1269}; 1269};
1270 1270
1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { 1271struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1333,7 +1333,7 @@ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
1333 1333
1334 u8 dot3out_pause_frames_low[0x20]; 1334 u8 dot3out_pause_frames_low[0x20];
1335 1335
1336 u8 reserved_0[0x3c0]; 1336 u8 reserved_at_400[0x3c0];
1337}; 1337};
1338 1338
1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { 1339struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
@@ -1421,7 +1421,7 @@ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
1421 1421
1422 u8 ether_stats_pkts8192to10239octets_low[0x20]; 1422 u8 ether_stats_pkts8192to10239octets_low[0x20];
1423 1423
1424 u8 reserved_0[0x280]; 1424 u8 reserved_at_540[0x280];
1425}; 1425};
1426 1426
1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { 1427struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
@@ -1477,7 +1477,7 @@ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
1477 1477
1478 u8 if_out_broadcast_pkts_low[0x20]; 1478 u8 if_out_broadcast_pkts_low[0x20];
1479 1479
1480 u8 reserved_0[0x480]; 1480 u8 reserved_at_340[0x480];
1481}; 1481};
1482 1482
1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { 1483struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
@@ -1557,54 +1557,54 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
1557 1557
1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; 1558 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
1559 1559
1560 u8 reserved_0[0x300]; 1560 u8 reserved_at_4c0[0x300];
1561}; 1561};
1562 1562
1563struct mlx5_ifc_cmd_inter_comp_event_bits { 1563struct mlx5_ifc_cmd_inter_comp_event_bits {
1564 u8 command_completion_vector[0x20]; 1564 u8 command_completion_vector[0x20];
1565 1565
1566 u8 reserved_0[0xc0]; 1566 u8 reserved_at_20[0xc0];
1567}; 1567};
1568 1568
1569struct mlx5_ifc_stall_vl_event_bits { 1569struct mlx5_ifc_stall_vl_event_bits {
1570 u8 reserved_0[0x18]; 1570 u8 reserved_at_0[0x18];
1571 u8 port_num[0x1]; 1571 u8 port_num[0x1];
1572 u8 reserved_1[0x3]; 1572 u8 reserved_at_19[0x3];
1573 u8 vl[0x4]; 1573 u8 vl[0x4];
1574 1574
1575 u8 reserved_2[0xa0]; 1575 u8 reserved_at_20[0xa0];
1576}; 1576};
1577 1577
1578struct mlx5_ifc_db_bf_congestion_event_bits { 1578struct mlx5_ifc_db_bf_congestion_event_bits {
1579 u8 event_subtype[0x8]; 1579 u8 event_subtype[0x8];
1580 u8 reserved_0[0x8]; 1580 u8 reserved_at_8[0x8];
1581 u8 congestion_level[0x8]; 1581 u8 congestion_level[0x8];
1582 u8 reserved_1[0x8]; 1582 u8 reserved_at_18[0x8];
1583 1583
1584 u8 reserved_2[0xa0]; 1584 u8 reserved_at_20[0xa0];
1585}; 1585};
1586 1586
1587struct mlx5_ifc_gpio_event_bits { 1587struct mlx5_ifc_gpio_event_bits {
1588 u8 reserved_0[0x60]; 1588 u8 reserved_at_0[0x60];
1589 1589
1590 u8 gpio_event_hi[0x20]; 1590 u8 gpio_event_hi[0x20];
1591 1591
1592 u8 gpio_event_lo[0x20]; 1592 u8 gpio_event_lo[0x20];
1593 1593
1594 u8 reserved_1[0x40]; 1594 u8 reserved_at_a0[0x40];
1595}; 1595};
1596 1596
1597struct mlx5_ifc_port_state_change_event_bits { 1597struct mlx5_ifc_port_state_change_event_bits {
1598 u8 reserved_0[0x40]; 1598 u8 reserved_at_0[0x40];
1599 1599
1600 u8 port_num[0x4]; 1600 u8 port_num[0x4];
1601 u8 reserved_1[0x1c]; 1601 u8 reserved_at_44[0x1c];
1602 1602
1603 u8 reserved_2[0x80]; 1603 u8 reserved_at_60[0x80];
1604}; 1604};
1605 1605
1606struct mlx5_ifc_dropped_packet_logged_bits { 1606struct mlx5_ifc_dropped_packet_logged_bits {
1607 u8 reserved_0[0xe0]; 1607 u8 reserved_at_0[0xe0];
1608}; 1608};
1609 1609
1610enum { 1610enum {
@@ -1613,15 +1613,15 @@ enum {
1613}; 1613};
1614 1614
1615struct mlx5_ifc_cq_error_bits { 1615struct mlx5_ifc_cq_error_bits {
1616 u8 reserved_0[0x8]; 1616 u8 reserved_at_0[0x8];
1617 u8 cqn[0x18]; 1617 u8 cqn[0x18];
1618 1618
1619 u8 reserved_1[0x20]; 1619 u8 reserved_at_20[0x20];
1620 1620
1621 u8 reserved_2[0x18]; 1621 u8 reserved_at_40[0x18];
1622 u8 syndrome[0x8]; 1622 u8 syndrome[0x8];
1623 1623
1624 u8 reserved_3[0x80]; 1624 u8 reserved_at_60[0x80];
1625}; 1625};
1626 1626
1627struct mlx5_ifc_rdma_page_fault_event_bits { 1627struct mlx5_ifc_rdma_page_fault_event_bits {
@@ -1629,14 +1629,14 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1629 1629
1630 u8 r_key[0x20]; 1630 u8 r_key[0x20];
1631 1631
1632 u8 reserved_0[0x10]; 1632 u8 reserved_at_40[0x10];
1633 u8 packet_len[0x10]; 1633 u8 packet_len[0x10];
1634 1634
1635 u8 rdma_op_len[0x20]; 1635 u8 rdma_op_len[0x20];
1636 1636
1637 u8 rdma_va[0x40]; 1637 u8 rdma_va[0x40];
1638 1638
1639 u8 reserved_1[0x5]; 1639 u8 reserved_at_c0[0x5];
1640 u8 rdma[0x1]; 1640 u8 rdma[0x1];
1641 u8 write[0x1]; 1641 u8 write[0x1];
1642 u8 requestor[0x1]; 1642 u8 requestor[0x1];
@@ -1646,15 +1646,15 @@ struct mlx5_ifc_rdma_page_fault_event_bits {
1646struct mlx5_ifc_wqe_associated_page_fault_event_bits { 1646struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1647 u8 bytes_committed[0x20]; 1647 u8 bytes_committed[0x20];
1648 1648
1649 u8 reserved_0[0x10]; 1649 u8 reserved_at_20[0x10];
1650 u8 wqe_index[0x10]; 1650 u8 wqe_index[0x10];
1651 1651
1652 u8 reserved_1[0x10]; 1652 u8 reserved_at_40[0x10];
1653 u8 len[0x10]; 1653 u8 len[0x10];
1654 1654
1655 u8 reserved_2[0x60]; 1655 u8 reserved_at_60[0x60];
1656 1656
1657 u8 reserved_3[0x5]; 1657 u8 reserved_at_c0[0x5];
1658 u8 rdma[0x1]; 1658 u8 rdma[0x1];
1659 u8 write_read[0x1]; 1659 u8 write_read[0x1];
1660 u8 requestor[0x1]; 1660 u8 requestor[0x1];
@@ -1662,26 +1662,26 @@ struct mlx5_ifc_wqe_associated_page_fault_event_bits {
1662}; 1662};
1663 1663
1664struct mlx5_ifc_qp_events_bits { 1664struct mlx5_ifc_qp_events_bits {
1665 u8 reserved_0[0xa0]; 1665 u8 reserved_at_0[0xa0];
1666 1666
1667 u8 type[0x8]; 1667 u8 type[0x8];
1668 u8 reserved_1[0x18]; 1668 u8 reserved_at_a8[0x18];
1669 1669
1670 u8 reserved_2[0x8]; 1670 u8 reserved_at_c0[0x8];
1671 u8 qpn_rqn_sqn[0x18]; 1671 u8 qpn_rqn_sqn[0x18];
1672}; 1672};
1673 1673
1674struct mlx5_ifc_dct_events_bits { 1674struct mlx5_ifc_dct_events_bits {
1675 u8 reserved_0[0xc0]; 1675 u8 reserved_at_0[0xc0];
1676 1676
1677 u8 reserved_1[0x8]; 1677 u8 reserved_at_c0[0x8];
1678 u8 dct_number[0x18]; 1678 u8 dct_number[0x18];
1679}; 1679};
1680 1680
1681struct mlx5_ifc_comp_event_bits { 1681struct mlx5_ifc_comp_event_bits {
1682 u8 reserved_0[0xc0]; 1682 u8 reserved_at_0[0xc0];
1683 1683
1684 u8 reserved_1[0x8]; 1684 u8 reserved_at_c0[0x8];
1685 u8 cq_number[0x18]; 1685 u8 cq_number[0x18];
1686}; 1686};
1687 1687
@@ -1754,41 +1754,41 @@ enum {
1754 1754
1755struct mlx5_ifc_qpc_bits { 1755struct mlx5_ifc_qpc_bits {
1756 u8 state[0x4]; 1756 u8 state[0x4];
1757 u8 reserved_0[0x4]; 1757 u8 reserved_at_4[0x4];
1758 u8 st[0x8]; 1758 u8 st[0x8];
1759 u8 reserved_1[0x3]; 1759 u8 reserved_at_10[0x3];
1760 u8 pm_state[0x2]; 1760 u8 pm_state[0x2];
1761 u8 reserved_2[0x7]; 1761 u8 reserved_at_15[0x7];
1762 u8 end_padding_mode[0x2]; 1762 u8 end_padding_mode[0x2];
1763 u8 reserved_3[0x2]; 1763 u8 reserved_at_1e[0x2];
1764 1764
1765 u8 wq_signature[0x1]; 1765 u8 wq_signature[0x1];
1766 u8 block_lb_mc[0x1]; 1766 u8 block_lb_mc[0x1];
1767 u8 atomic_like_write_en[0x1]; 1767 u8 atomic_like_write_en[0x1];
1768 u8 latency_sensitive[0x1]; 1768 u8 latency_sensitive[0x1];
1769 u8 reserved_4[0x1]; 1769 u8 reserved_at_24[0x1];
1770 u8 drain_sigerr[0x1]; 1770 u8 drain_sigerr[0x1];
1771 u8 reserved_5[0x2]; 1771 u8 reserved_at_26[0x2];
1772 u8 pd[0x18]; 1772 u8 pd[0x18];
1773 1773
1774 u8 mtu[0x3]; 1774 u8 mtu[0x3];
1775 u8 log_msg_max[0x5]; 1775 u8 log_msg_max[0x5];
1776 u8 reserved_6[0x1]; 1776 u8 reserved_at_48[0x1];
1777 u8 log_rq_size[0x4]; 1777 u8 log_rq_size[0x4];
1778 u8 log_rq_stride[0x3]; 1778 u8 log_rq_stride[0x3];
1779 u8 no_sq[0x1]; 1779 u8 no_sq[0x1];
1780 u8 log_sq_size[0x4]; 1780 u8 log_sq_size[0x4];
1781 u8 reserved_7[0x6]; 1781 u8 reserved_at_55[0x6];
1782 u8 rlky[0x1]; 1782 u8 rlky[0x1];
1783 u8 reserved_8[0x4]; 1783 u8 reserved_at_5c[0x4];
1784 1784
1785 u8 counter_set_id[0x8]; 1785 u8 counter_set_id[0x8];
1786 u8 uar_page[0x18]; 1786 u8 uar_page[0x18];
1787 1787
1788 u8 reserved_9[0x8]; 1788 u8 reserved_at_80[0x8];
1789 u8 user_index[0x18]; 1789 u8 user_index[0x18];
1790 1790
1791 u8 reserved_10[0x3]; 1791 u8 reserved_at_a0[0x3];
1792 u8 log_page_size[0x5]; 1792 u8 log_page_size[0x5];
1793 u8 remote_qpn[0x18]; 1793 u8 remote_qpn[0x18];
1794 1794
@@ -1797,66 +1797,66 @@ struct mlx5_ifc_qpc_bits {
1797 struct mlx5_ifc_ads_bits secondary_address_path; 1797 struct mlx5_ifc_ads_bits secondary_address_path;
1798 1798
1799 u8 log_ack_req_freq[0x4]; 1799 u8 log_ack_req_freq[0x4];
1800 u8 reserved_11[0x4]; 1800 u8 reserved_at_384[0x4];
1801 u8 log_sra_max[0x3]; 1801 u8 log_sra_max[0x3];
1802 u8 reserved_12[0x2]; 1802 u8 reserved_at_38b[0x2];
1803 u8 retry_count[0x3]; 1803 u8 retry_count[0x3];
1804 u8 rnr_retry[0x3]; 1804 u8 rnr_retry[0x3];
1805 u8 reserved_13[0x1]; 1805 u8 reserved_at_393[0x1];
1806 u8 fre[0x1]; 1806 u8 fre[0x1];
1807 u8 cur_rnr_retry[0x3]; 1807 u8 cur_rnr_retry[0x3];
1808 u8 cur_retry_count[0x3]; 1808 u8 cur_retry_count[0x3];
1809 u8 reserved_14[0x5]; 1809 u8 reserved_at_39b[0x5];
1810 1810
1811 u8 reserved_15[0x20]; 1811 u8 reserved_at_3a0[0x20];
1812 1812
1813 u8 reserved_16[0x8]; 1813 u8 reserved_at_3c0[0x8];
1814 u8 next_send_psn[0x18]; 1814 u8 next_send_psn[0x18];
1815 1815
1816 u8 reserved_17[0x8]; 1816 u8 reserved_at_3e0[0x8];
1817 u8 cqn_snd[0x18]; 1817 u8 cqn_snd[0x18];
1818 1818
1819 u8 reserved_18[0x40]; 1819 u8 reserved_at_400[0x40];
1820 1820
1821 u8 reserved_19[0x8]; 1821 u8 reserved_at_440[0x8];
1822 u8 last_acked_psn[0x18]; 1822 u8 last_acked_psn[0x18];
1823 1823
1824 u8 reserved_20[0x8]; 1824 u8 reserved_at_460[0x8];
1825 u8 ssn[0x18]; 1825 u8 ssn[0x18];
1826 1826
1827 u8 reserved_21[0x8]; 1827 u8 reserved_at_480[0x8];
1828 u8 log_rra_max[0x3]; 1828 u8 log_rra_max[0x3];
1829 u8 reserved_22[0x1]; 1829 u8 reserved_at_48b[0x1];
1830 u8 atomic_mode[0x4]; 1830 u8 atomic_mode[0x4];
1831 u8 rre[0x1]; 1831 u8 rre[0x1];
1832 u8 rwe[0x1]; 1832 u8 rwe[0x1];
1833 u8 rae[0x1]; 1833 u8 rae[0x1];
1834 u8 reserved_23[0x1]; 1834 u8 reserved_at_493[0x1];
1835 u8 page_offset[0x6]; 1835 u8 page_offset[0x6];
1836 u8 reserved_24[0x3]; 1836 u8 reserved_at_49a[0x3];
1837 u8 cd_slave_receive[0x1]; 1837 u8 cd_slave_receive[0x1];
1838 u8 cd_slave_send[0x1]; 1838 u8 cd_slave_send[0x1];
1839 u8 cd_master[0x1]; 1839 u8 cd_master[0x1];
1840 1840
1841 u8 reserved_25[0x3]; 1841 u8 reserved_at_4a0[0x3];
1842 u8 min_rnr_nak[0x5]; 1842 u8 min_rnr_nak[0x5];
1843 u8 next_rcv_psn[0x18]; 1843 u8 next_rcv_psn[0x18];
1844 1844
1845 u8 reserved_26[0x8]; 1845 u8 reserved_at_4c0[0x8];
1846 u8 xrcd[0x18]; 1846 u8 xrcd[0x18];
1847 1847
1848 u8 reserved_27[0x8]; 1848 u8 reserved_at_4e0[0x8];
1849 u8 cqn_rcv[0x18]; 1849 u8 cqn_rcv[0x18];
1850 1850
1851 u8 dbr_addr[0x40]; 1851 u8 dbr_addr[0x40];
1852 1852
1853 u8 q_key[0x20]; 1853 u8 q_key[0x20];
1854 1854
1855 u8 reserved_28[0x5]; 1855 u8 reserved_at_560[0x5];
1856 u8 rq_type[0x3]; 1856 u8 rq_type[0x3];
1857 u8 srqn_rmpn[0x18]; 1857 u8 srqn_rmpn[0x18];
1858 1858
1859 u8 reserved_29[0x8]; 1859 u8 reserved_at_580[0x8];
1860 u8 rmsn[0x18]; 1860 u8 rmsn[0x18];
1861 1861
1862 u8 hw_sq_wqebb_counter[0x10]; 1862 u8 hw_sq_wqebb_counter[0x10];
@@ -1866,33 +1866,33 @@ struct mlx5_ifc_qpc_bits {
1866 1866
1867 u8 sw_rq_counter[0x20]; 1867 u8 sw_rq_counter[0x20];
1868 1868
1869 u8 reserved_30[0x20]; 1869 u8 reserved_at_600[0x20];
1870 1870
1871 u8 reserved_31[0xf]; 1871 u8 reserved_at_620[0xf];
1872 u8 cgs[0x1]; 1872 u8 cgs[0x1];
1873 u8 cs_req[0x8]; 1873 u8 cs_req[0x8];
1874 u8 cs_res[0x8]; 1874 u8 cs_res[0x8];
1875 1875
1876 u8 dc_access_key[0x40]; 1876 u8 dc_access_key[0x40];
1877 1877
1878 u8 reserved_32[0xc0]; 1878 u8 reserved_at_680[0xc0];
1879}; 1879};
1880 1880
1881struct mlx5_ifc_roce_addr_layout_bits { 1881struct mlx5_ifc_roce_addr_layout_bits {
1882 u8 source_l3_address[16][0x8]; 1882 u8 source_l3_address[16][0x8];
1883 1883
1884 u8 reserved_0[0x3]; 1884 u8 reserved_at_80[0x3];
1885 u8 vlan_valid[0x1]; 1885 u8 vlan_valid[0x1];
1886 u8 vlan_id[0xc]; 1886 u8 vlan_id[0xc];
1887 u8 source_mac_47_32[0x10]; 1887 u8 source_mac_47_32[0x10];
1888 1888
1889 u8 source_mac_31_0[0x20]; 1889 u8 source_mac_31_0[0x20];
1890 1890
1891 u8 reserved_1[0x14]; 1891 u8 reserved_at_c0[0x14];
1892 u8 roce_l3_type[0x4]; 1892 u8 roce_l3_type[0x4];
1893 u8 roce_version[0x8]; 1893 u8 roce_version[0x8];
1894 1894
1895 u8 reserved_2[0x20]; 1895 u8 reserved_at_e0[0x20];
1896}; 1896};
1897 1897
1898union mlx5_ifc_hca_cap_union_bits { 1898union mlx5_ifc_hca_cap_union_bits {
@@ -1904,7 +1904,7 @@ union mlx5_ifc_hca_cap_union_bits {
1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; 1904 struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; 1905 struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap; 1906 struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
1907 u8 reserved_0[0x8000]; 1907 u8 reserved_at_0[0x8000];
1908}; 1908};
1909 1909
1910enum { 1910enum {
@@ -1914,24 +1914,24 @@ enum {
1914}; 1914};
1915 1915
1916struct mlx5_ifc_flow_context_bits { 1916struct mlx5_ifc_flow_context_bits {
1917 u8 reserved_0[0x20]; 1917 u8 reserved_at_0[0x20];
1918 1918
1919 u8 group_id[0x20]; 1919 u8 group_id[0x20];
1920 1920
1921 u8 reserved_1[0x8]; 1921 u8 reserved_at_40[0x8];
1922 u8 flow_tag[0x18]; 1922 u8 flow_tag[0x18];
1923 1923
1924 u8 reserved_2[0x10]; 1924 u8 reserved_at_60[0x10];
1925 u8 action[0x10]; 1925 u8 action[0x10];
1926 1926
1927 u8 reserved_3[0x8]; 1927 u8 reserved_at_80[0x8];
1928 u8 destination_list_size[0x18]; 1928 u8 destination_list_size[0x18];
1929 1929
1930 u8 reserved_4[0x160]; 1930 u8 reserved_at_a0[0x160];
1931 1931
1932 struct mlx5_ifc_fte_match_param_bits match_value; 1932 struct mlx5_ifc_fte_match_param_bits match_value;
1933 1933
1934 u8 reserved_5[0x600]; 1934 u8 reserved_at_1200[0x600];
1935 1935
1936 struct mlx5_ifc_dest_format_struct_bits destination[0]; 1936 struct mlx5_ifc_dest_format_struct_bits destination[0];
1937}; 1937};
@@ -1944,43 +1944,43 @@ enum {
1944struct mlx5_ifc_xrc_srqc_bits { 1944struct mlx5_ifc_xrc_srqc_bits {
1945 u8 state[0x4]; 1945 u8 state[0x4];
1946 u8 log_xrc_srq_size[0x4]; 1946 u8 log_xrc_srq_size[0x4];
1947 u8 reserved_0[0x18]; 1947 u8 reserved_at_8[0x18];
1948 1948
1949 u8 wq_signature[0x1]; 1949 u8 wq_signature[0x1];
1950 u8 cont_srq[0x1]; 1950 u8 cont_srq[0x1];
1951 u8 reserved_1[0x1]; 1951 u8 reserved_at_22[0x1];
1952 u8 rlky[0x1]; 1952 u8 rlky[0x1];
1953 u8 basic_cyclic_rcv_wqe[0x1]; 1953 u8 basic_cyclic_rcv_wqe[0x1];
1954 u8 log_rq_stride[0x3]; 1954 u8 log_rq_stride[0x3];
1955 u8 xrcd[0x18]; 1955 u8 xrcd[0x18];
1956 1956
1957 u8 page_offset[0x6]; 1957 u8 page_offset[0x6];
1958 u8 reserved_2[0x2]; 1958 u8 reserved_at_46[0x2];
1959 u8 cqn[0x18]; 1959 u8 cqn[0x18];
1960 1960
1961 u8 reserved_3[0x20]; 1961 u8 reserved_at_60[0x20];
1962 1962
1963 u8 user_index_equal_xrc_srqn[0x1]; 1963 u8 user_index_equal_xrc_srqn[0x1];
1964 u8 reserved_4[0x1]; 1964 u8 reserved_at_81[0x1];
1965 u8 log_page_size[0x6]; 1965 u8 log_page_size[0x6];
1966 u8 user_index[0x18]; 1966 u8 user_index[0x18];
1967 1967
1968 u8 reserved_5[0x20]; 1968 u8 reserved_at_a0[0x20];
1969 1969
1970 u8 reserved_6[0x8]; 1970 u8 reserved_at_c0[0x8];
1971 u8 pd[0x18]; 1971 u8 pd[0x18];
1972 1972
1973 u8 lwm[0x10]; 1973 u8 lwm[0x10];
1974 u8 wqe_cnt[0x10]; 1974 u8 wqe_cnt[0x10];
1975 1975
1976 u8 reserved_7[0x40]; 1976 u8 reserved_at_100[0x40];
1977 1977
1978 u8 db_record_addr_h[0x20]; 1978 u8 db_record_addr_h[0x20];
1979 1979
1980 u8 db_record_addr_l[0x1e]; 1980 u8 db_record_addr_l[0x1e];
1981 u8 reserved_8[0x2]; 1981 u8 reserved_at_17e[0x2];
1982 1982
1983 u8 reserved_9[0x80]; 1983 u8 reserved_at_180[0x80];
1984}; 1984};
1985 1985
1986struct mlx5_ifc_traffic_counter_bits { 1986struct mlx5_ifc_traffic_counter_bits {
@@ -1990,16 +1990,16 @@ struct mlx5_ifc_traffic_counter_bits {
1990}; 1990};
1991 1991
1992struct mlx5_ifc_tisc_bits { 1992struct mlx5_ifc_tisc_bits {
1993 u8 reserved_0[0xc]; 1993 u8 reserved_at_0[0xc];
1994 u8 prio[0x4]; 1994 u8 prio[0x4];
1995 u8 reserved_1[0x10]; 1995 u8 reserved_at_10[0x10];
1996 1996
1997 u8 reserved_2[0x100]; 1997 u8 reserved_at_20[0x100];
1998 1998
1999 u8 reserved_3[0x8]; 1999 u8 reserved_at_120[0x8];
2000 u8 transport_domain[0x18]; 2000 u8 transport_domain[0x18];
2001 2001
2002 u8 reserved_4[0x3c0]; 2002 u8 reserved_at_140[0x3c0];
2003}; 2003};
2004 2004
2005enum { 2005enum {
@@ -2024,31 +2024,31 @@ enum {
2024}; 2024};
2025 2025
2026struct mlx5_ifc_tirc_bits { 2026struct mlx5_ifc_tirc_bits {
2027 u8 reserved_0[0x20]; 2027 u8 reserved_at_0[0x20];
2028 2028
2029 u8 disp_type[0x4]; 2029 u8 disp_type[0x4];
2030 u8 reserved_1[0x1c]; 2030 u8 reserved_at_24[0x1c];
2031 2031
2032 u8 reserved_2[0x40]; 2032 u8 reserved_at_40[0x40];
2033 2033
2034 u8 reserved_3[0x4]; 2034 u8 reserved_at_80[0x4];
2035 u8 lro_timeout_period_usecs[0x10]; 2035 u8 lro_timeout_period_usecs[0x10];
2036 u8 lro_enable_mask[0x4]; 2036 u8 lro_enable_mask[0x4];
2037 u8 lro_max_ip_payload_size[0x8]; 2037 u8 lro_max_ip_payload_size[0x8];
2038 2038
2039 u8 reserved_4[0x40]; 2039 u8 reserved_at_a0[0x40];
2040 2040
2041 u8 reserved_5[0x8]; 2041 u8 reserved_at_e0[0x8];
2042 u8 inline_rqn[0x18]; 2042 u8 inline_rqn[0x18];
2043 2043
2044 u8 rx_hash_symmetric[0x1]; 2044 u8 rx_hash_symmetric[0x1];
2045 u8 reserved_6[0x1]; 2045 u8 reserved_at_101[0x1];
2046 u8 tunneled_offload_en[0x1]; 2046 u8 tunneled_offload_en[0x1];
2047 u8 reserved_7[0x5]; 2047 u8 reserved_at_103[0x5];
2048 u8 indirect_table[0x18]; 2048 u8 indirect_table[0x18];
2049 2049
2050 u8 rx_hash_fn[0x4]; 2050 u8 rx_hash_fn[0x4];
2051 u8 reserved_8[0x2]; 2051 u8 reserved_at_124[0x2];
2052 u8 self_lb_block[0x2]; 2052 u8 self_lb_block[0x2];
2053 u8 transport_domain[0x18]; 2053 u8 transport_domain[0x18];
2054 2054
@@ -2058,7 +2058,7 @@ struct mlx5_ifc_tirc_bits {
2058 2058
2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; 2059 struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
2060 2060
2061 u8 reserved_9[0x4c0]; 2061 u8 reserved_at_2c0[0x4c0];
2062}; 2062};
2063 2063
2064enum { 2064enum {
@@ -2069,39 +2069,39 @@ enum {
2069struct mlx5_ifc_srqc_bits { 2069struct mlx5_ifc_srqc_bits {
2070 u8 state[0x4]; 2070 u8 state[0x4];
2071 u8 log_srq_size[0x4]; 2071 u8 log_srq_size[0x4];
2072 u8 reserved_0[0x18]; 2072 u8 reserved_at_8[0x18];
2073 2073
2074 u8 wq_signature[0x1]; 2074 u8 wq_signature[0x1];
2075 u8 cont_srq[0x1]; 2075 u8 cont_srq[0x1];
2076 u8 reserved_1[0x1]; 2076 u8 reserved_at_22[0x1];
2077 u8 rlky[0x1]; 2077 u8 rlky[0x1];
2078 u8 reserved_2[0x1]; 2078 u8 reserved_at_24[0x1];
2079 u8 log_rq_stride[0x3]; 2079 u8 log_rq_stride[0x3];
2080 u8 xrcd[0x18]; 2080 u8 xrcd[0x18];
2081 2081
2082 u8 page_offset[0x6]; 2082 u8 page_offset[0x6];
2083 u8 reserved_3[0x2]; 2083 u8 reserved_at_46[0x2];
2084 u8 cqn[0x18]; 2084 u8 cqn[0x18];
2085 2085
2086 u8 reserved_4[0x20]; 2086 u8 reserved_at_60[0x20];
2087 2087
2088 u8 reserved_5[0x2]; 2088 u8 reserved_at_80[0x2];
2089 u8 log_page_size[0x6]; 2089 u8 log_page_size[0x6];
2090 u8 reserved_6[0x18]; 2090 u8 reserved_at_88[0x18];
2091 2091
2092 u8 reserved_7[0x20]; 2092 u8 reserved_at_a0[0x20];
2093 2093
2094 u8 reserved_8[0x8]; 2094 u8 reserved_at_c0[0x8];
2095 u8 pd[0x18]; 2095 u8 pd[0x18];
2096 2096
2097 u8 lwm[0x10]; 2097 u8 lwm[0x10];
2098 u8 wqe_cnt[0x10]; 2098 u8 wqe_cnt[0x10];
2099 2099
2100 u8 reserved_9[0x40]; 2100 u8 reserved_at_100[0x40];
2101 2101
2102 u8 dbr_addr[0x40]; 2102 u8 dbr_addr[0x40];
2103 2103
2104 u8 reserved_10[0x80]; 2104 u8 reserved_at_180[0x80];
2105}; 2105};
2106 2106
2107enum { 2107enum {
@@ -2115,39 +2115,39 @@ struct mlx5_ifc_sqc_bits {
2115 u8 cd_master[0x1]; 2115 u8 cd_master[0x1];
2116 u8 fre[0x1]; 2116 u8 fre[0x1];
2117 u8 flush_in_error_en[0x1]; 2117 u8 flush_in_error_en[0x1];
2118 u8 reserved_0[0x4]; 2118 u8 reserved_at_4[0x4];
2119 u8 state[0x4]; 2119 u8 state[0x4];
2120 u8 reserved_1[0x14]; 2120 u8 reserved_at_c[0x14];
2121 2121
2122 u8 reserved_2[0x8]; 2122 u8 reserved_at_20[0x8];
2123 u8 user_index[0x18]; 2123 u8 user_index[0x18];
2124 2124
2125 u8 reserved_3[0x8]; 2125 u8 reserved_at_40[0x8];
2126 u8 cqn[0x18]; 2126 u8 cqn[0x18];
2127 2127
2128 u8 reserved_4[0xa0]; 2128 u8 reserved_at_60[0xa0];
2129 2129
2130 u8 tis_lst_sz[0x10]; 2130 u8 tis_lst_sz[0x10];
2131 u8 reserved_5[0x10]; 2131 u8 reserved_at_110[0x10];
2132 2132
2133 u8 reserved_6[0x40]; 2133 u8 reserved_at_120[0x40];
2134 2134
2135 u8 reserved_7[0x8]; 2135 u8 reserved_at_160[0x8];
2136 u8 tis_num_0[0x18]; 2136 u8 tis_num_0[0x18];
2137 2137
2138 struct mlx5_ifc_wq_bits wq; 2138 struct mlx5_ifc_wq_bits wq;
2139}; 2139};
2140 2140
2141struct mlx5_ifc_rqtc_bits { 2141struct mlx5_ifc_rqtc_bits {
2142 u8 reserved_0[0xa0]; 2142 u8 reserved_at_0[0xa0];
2143 2143
2144 u8 reserved_1[0x10]; 2144 u8 reserved_at_a0[0x10];
2145 u8 rqt_max_size[0x10]; 2145 u8 rqt_max_size[0x10];
2146 2146
2147 u8 reserved_2[0x10]; 2147 u8 reserved_at_c0[0x10];
2148 u8 rqt_actual_size[0x10]; 2148 u8 rqt_actual_size[0x10];
2149 2149
2150 u8 reserved_3[0x6a0]; 2150 u8 reserved_at_e0[0x6a0];
2151 2151
2152 struct mlx5_ifc_rq_num_bits rq_num[0]; 2152 struct mlx5_ifc_rq_num_bits rq_num[0];
2153}; 2153};
@@ -2165,27 +2165,27 @@ enum {
2165 2165
2166struct mlx5_ifc_rqc_bits { 2166struct mlx5_ifc_rqc_bits {
2167 u8 rlky[0x1]; 2167 u8 rlky[0x1];
2168 u8 reserved_0[0x2]; 2168 u8 reserved_at_1[0x2];
2169 u8 vsd[0x1]; 2169 u8 vsd[0x1];
2170 u8 mem_rq_type[0x4]; 2170 u8 mem_rq_type[0x4];
2171 u8 state[0x4]; 2171 u8 state[0x4];
2172 u8 reserved_1[0x1]; 2172 u8 reserved_at_c[0x1];
2173 u8 flush_in_error_en[0x1]; 2173 u8 flush_in_error_en[0x1];
2174 u8 reserved_2[0x12]; 2174 u8 reserved_at_e[0x12];
2175 2175
2176 u8 reserved_3[0x8]; 2176 u8 reserved_at_20[0x8];
2177 u8 user_index[0x18]; 2177 u8 user_index[0x18];
2178 2178
2179 u8 reserved_4[0x8]; 2179 u8 reserved_at_40[0x8];
2180 u8 cqn[0x18]; 2180 u8 cqn[0x18];
2181 2181
2182 u8 counter_set_id[0x8]; 2182 u8 counter_set_id[0x8];
2183 u8 reserved_5[0x18]; 2183 u8 reserved_at_68[0x18];
2184 2184
2185 u8 reserved_6[0x8]; 2185 u8 reserved_at_80[0x8];
2186 u8 rmpn[0x18]; 2186 u8 rmpn[0x18];
2187 2187
2188 u8 reserved_7[0xe0]; 2188 u8 reserved_at_a0[0xe0];
2189 2189
2190 struct mlx5_ifc_wq_bits wq; 2190 struct mlx5_ifc_wq_bits wq;
2191}; 2191};
@@ -2196,31 +2196,31 @@ enum {
2196}; 2196};
2197 2197
2198struct mlx5_ifc_rmpc_bits { 2198struct mlx5_ifc_rmpc_bits {
2199 u8 reserved_0[0x8]; 2199 u8 reserved_at_0[0x8];
2200 u8 state[0x4]; 2200 u8 state[0x4];
2201 u8 reserved_1[0x14]; 2201 u8 reserved_at_c[0x14];
2202 2202
2203 u8 basic_cyclic_rcv_wqe[0x1]; 2203 u8 basic_cyclic_rcv_wqe[0x1];
2204 u8 reserved_2[0x1f]; 2204 u8 reserved_at_21[0x1f];
2205 2205
2206 u8 reserved_3[0x140]; 2206 u8 reserved_at_40[0x140];
2207 2207
2208 struct mlx5_ifc_wq_bits wq; 2208 struct mlx5_ifc_wq_bits wq;
2209}; 2209};
2210 2210
2211struct mlx5_ifc_nic_vport_context_bits { 2211struct mlx5_ifc_nic_vport_context_bits {
2212 u8 reserved_0[0x1f]; 2212 u8 reserved_at_0[0x1f];
2213 u8 roce_en[0x1]; 2213 u8 roce_en[0x1];
2214 2214
2215 u8 arm_change_event[0x1]; 2215 u8 arm_change_event[0x1];
2216 u8 reserved_1[0x1a]; 2216 u8 reserved_at_21[0x1a];
2217 u8 event_on_mtu[0x1]; 2217 u8 event_on_mtu[0x1];
2218 u8 event_on_promisc_change[0x1]; 2218 u8 event_on_promisc_change[0x1];
2219 u8 event_on_vlan_change[0x1]; 2219 u8 event_on_vlan_change[0x1];
2220 u8 event_on_mc_address_change[0x1]; 2220 u8 event_on_mc_address_change[0x1];
2221 u8 event_on_uc_address_change[0x1]; 2221 u8 event_on_uc_address_change[0x1];
2222 2222
2223 u8 reserved_2[0xf0]; 2223 u8 reserved_at_40[0xf0];
2224 2224
2225 u8 mtu[0x10]; 2225 u8 mtu[0x10];
2226 2226
@@ -2228,21 +2228,21 @@ struct mlx5_ifc_nic_vport_context_bits {
2228 u8 port_guid[0x40]; 2228 u8 port_guid[0x40];
2229 u8 node_guid[0x40]; 2229 u8 node_guid[0x40];
2230 2230
2231 u8 reserved_3[0x140]; 2231 u8 reserved_at_200[0x140];
2232 u8 qkey_violation_counter[0x10]; 2232 u8 qkey_violation_counter[0x10];
2233 u8 reserved_4[0x430]; 2233 u8 reserved_at_350[0x430];
2234 2234
2235 u8 promisc_uc[0x1]; 2235 u8 promisc_uc[0x1];
2236 u8 promisc_mc[0x1]; 2236 u8 promisc_mc[0x1];
2237 u8 promisc_all[0x1]; 2237 u8 promisc_all[0x1];
2238 u8 reserved_5[0x2]; 2238 u8 reserved_at_783[0x2];
2239 u8 allowed_list_type[0x3]; 2239 u8 allowed_list_type[0x3];
2240 u8 reserved_6[0xc]; 2240 u8 reserved_at_788[0xc];
2241 u8 allowed_list_size[0xc]; 2241 u8 allowed_list_size[0xc];
2242 2242
2243 struct mlx5_ifc_mac_address_layout_bits permanent_address; 2243 struct mlx5_ifc_mac_address_layout_bits permanent_address;
2244 2244
2245 u8 reserved_7[0x20]; 2245 u8 reserved_at_7e0[0x20];
2246 2246
2247 u8 current_uc_mac_address[0][0x40]; 2247 u8 current_uc_mac_address[0][0x40];
2248}; 2248};
@@ -2254,9 +2254,9 @@ enum {
2254}; 2254};
2255 2255
2256struct mlx5_ifc_mkc_bits { 2256struct mlx5_ifc_mkc_bits {
2257 u8 reserved_0[0x1]; 2257 u8 reserved_at_0[0x1];
2258 u8 free[0x1]; 2258 u8 free[0x1];
2259 u8 reserved_1[0xd]; 2259 u8 reserved_at_2[0xd];
2260 u8 small_fence_on_rdma_read_response[0x1]; 2260 u8 small_fence_on_rdma_read_response[0x1];
2261 u8 umr_en[0x1]; 2261 u8 umr_en[0x1];
2262 u8 a[0x1]; 2262 u8 a[0x1];
@@ -2265,19 +2265,19 @@ struct mlx5_ifc_mkc_bits {
2265 u8 lw[0x1]; 2265 u8 lw[0x1];
2266 u8 lr[0x1]; 2266 u8 lr[0x1];
2267 u8 access_mode[0x2]; 2267 u8 access_mode[0x2];
2268 u8 reserved_2[0x8]; 2268 u8 reserved_at_18[0x8];
2269 2269
2270 u8 qpn[0x18]; 2270 u8 qpn[0x18];
2271 u8 mkey_7_0[0x8]; 2271 u8 mkey_7_0[0x8];
2272 2272
2273 u8 reserved_3[0x20]; 2273 u8 reserved_at_40[0x20];
2274 2274
2275 u8 length64[0x1]; 2275 u8 length64[0x1];
2276 u8 bsf_en[0x1]; 2276 u8 bsf_en[0x1];
2277 u8 sync_umr[0x1]; 2277 u8 sync_umr[0x1];
2278 u8 reserved_4[0x2]; 2278 u8 reserved_at_63[0x2];
2279 u8 expected_sigerr_count[0x1]; 2279 u8 expected_sigerr_count[0x1];
2280 u8 reserved_5[0x1]; 2280 u8 reserved_at_66[0x1];
2281 u8 en_rinval[0x1]; 2281 u8 en_rinval[0x1];
2282 u8 pd[0x18]; 2282 u8 pd[0x18];
2283 2283
@@ -2287,18 +2287,18 @@ struct mlx5_ifc_mkc_bits {
2287 2287
2288 u8 bsf_octword_size[0x20]; 2288 u8 bsf_octword_size[0x20];
2289 2289
2290 u8 reserved_6[0x80]; 2290 u8 reserved_at_120[0x80];
2291 2291
2292 u8 translations_octword_size[0x20]; 2292 u8 translations_octword_size[0x20];
2293 2293
2294 u8 reserved_7[0x1b]; 2294 u8 reserved_at_1c0[0x1b];
2295 u8 log_page_size[0x5]; 2295 u8 log_page_size[0x5];
2296 2296
2297 u8 reserved_8[0x20]; 2297 u8 reserved_at_1e0[0x20];
2298}; 2298};
2299 2299
2300struct mlx5_ifc_pkey_bits { 2300struct mlx5_ifc_pkey_bits {
2301 u8 reserved_0[0x10]; 2301 u8 reserved_at_0[0x10];
2302 u8 pkey[0x10]; 2302 u8 pkey[0x10];
2303}; 2303};
2304 2304
@@ -2309,19 +2309,19 @@ struct mlx5_ifc_array128_auto_bits {
2309struct mlx5_ifc_hca_vport_context_bits { 2309struct mlx5_ifc_hca_vport_context_bits {
2310 u8 field_select[0x20]; 2310 u8 field_select[0x20];
2311 2311
2312 u8 reserved_0[0xe0]; 2312 u8 reserved_at_20[0xe0];
2313 2313
2314 u8 sm_virt_aware[0x1]; 2314 u8 sm_virt_aware[0x1];
2315 u8 has_smi[0x1]; 2315 u8 has_smi[0x1];
2316 u8 has_raw[0x1]; 2316 u8 has_raw[0x1];
2317 u8 grh_required[0x1]; 2317 u8 grh_required[0x1];
2318 u8 reserved_1[0xc]; 2318 u8 reserved_at_104[0xc];
2319 u8 port_physical_state[0x4]; 2319 u8 port_physical_state[0x4];
2320 u8 vport_state_policy[0x4]; 2320 u8 vport_state_policy[0x4];
2321 u8 port_state[0x4]; 2321 u8 port_state[0x4];
2322 u8 vport_state[0x4]; 2322 u8 vport_state[0x4];
2323 2323
2324 u8 reserved_2[0x20]; 2324 u8 reserved_at_120[0x20];
2325 2325
2326 u8 system_image_guid[0x40]; 2326 u8 system_image_guid[0x40];
2327 2327
@@ -2337,33 +2337,33 @@ struct mlx5_ifc_hca_vport_context_bits {
2337 2337
2338 u8 cap_mask2_field_select[0x20]; 2338 u8 cap_mask2_field_select[0x20];
2339 2339
2340 u8 reserved_3[0x80]; 2340 u8 reserved_at_280[0x80];
2341 2341
2342 u8 lid[0x10]; 2342 u8 lid[0x10];
2343 u8 reserved_4[0x4]; 2343 u8 reserved_at_310[0x4];
2344 u8 init_type_reply[0x4]; 2344 u8 init_type_reply[0x4];
2345 u8 lmc[0x3]; 2345 u8 lmc[0x3];
2346 u8 subnet_timeout[0x5]; 2346 u8 subnet_timeout[0x5];
2347 2347
2348 u8 sm_lid[0x10]; 2348 u8 sm_lid[0x10];
2349 u8 sm_sl[0x4]; 2349 u8 sm_sl[0x4];
2350 u8 reserved_5[0xc]; 2350 u8 reserved_at_334[0xc];
2351 2351
2352 u8 qkey_violation_counter[0x10]; 2352 u8 qkey_violation_counter[0x10];
2353 u8 pkey_violation_counter[0x10]; 2353 u8 pkey_violation_counter[0x10];
2354 2354
2355 u8 reserved_6[0xca0]; 2355 u8 reserved_at_360[0xca0];
2356}; 2356};
2357 2357
2358struct mlx5_ifc_esw_vport_context_bits { 2358struct mlx5_ifc_esw_vport_context_bits {
2359 u8 reserved_0[0x3]; 2359 u8 reserved_at_0[0x3];
2360 u8 vport_svlan_strip[0x1]; 2360 u8 vport_svlan_strip[0x1];
2361 u8 vport_cvlan_strip[0x1]; 2361 u8 vport_cvlan_strip[0x1];
2362 u8 vport_svlan_insert[0x1]; 2362 u8 vport_svlan_insert[0x1];
2363 u8 vport_cvlan_insert[0x2]; 2363 u8 vport_cvlan_insert[0x2];
2364 u8 reserved_1[0x18]; 2364 u8 reserved_at_8[0x18];
2365 2365
2366 u8 reserved_2[0x20]; 2366 u8 reserved_at_20[0x20];
2367 2367
2368 u8 svlan_cfi[0x1]; 2368 u8 svlan_cfi[0x1];
2369 u8 svlan_pcp[0x3]; 2369 u8 svlan_pcp[0x3];
@@ -2372,7 +2372,7 @@ struct mlx5_ifc_esw_vport_context_bits {
2372 u8 cvlan_pcp[0x3]; 2372 u8 cvlan_pcp[0x3];
2373 u8 cvlan_id[0xc]; 2373 u8 cvlan_id[0xc];
2374 2374
2375 u8 reserved_3[0x7a0]; 2375 u8 reserved_at_60[0x7a0];
2376}; 2376};
2377 2377
2378enum { 2378enum {
@@ -2387,41 +2387,41 @@ enum {
2387 2387
2388struct mlx5_ifc_eqc_bits { 2388struct mlx5_ifc_eqc_bits {
2389 u8 status[0x4]; 2389 u8 status[0x4];
2390 u8 reserved_0[0x9]; 2390 u8 reserved_at_4[0x9];
2391 u8 ec[0x1]; 2391 u8 ec[0x1];
2392 u8 oi[0x1]; 2392 u8 oi[0x1];
2393 u8 reserved_1[0x5]; 2393 u8 reserved_at_f[0x5];
2394 u8 st[0x4]; 2394 u8 st[0x4];
2395 u8 reserved_2[0x8]; 2395 u8 reserved_at_18[0x8];
2396 2396
2397 u8 reserved_3[0x20]; 2397 u8 reserved_at_20[0x20];
2398 2398
2399 u8 reserved_4[0x14]; 2399 u8 reserved_at_40[0x14];
2400 u8 page_offset[0x6]; 2400 u8 page_offset[0x6];
2401 u8 reserved_5[0x6]; 2401 u8 reserved_at_5a[0x6];
2402 2402
2403 u8 reserved_6[0x3]; 2403 u8 reserved_at_60[0x3];
2404 u8 log_eq_size[0x5]; 2404 u8 log_eq_size[0x5];
2405 u8 uar_page[0x18]; 2405 u8 uar_page[0x18];
2406 2406
2407 u8 reserved_7[0x20]; 2407 u8 reserved_at_80[0x20];
2408 2408
2409 u8 reserved_8[0x18]; 2409 u8 reserved_at_a0[0x18];
2410 u8 intr[0x8]; 2410 u8 intr[0x8];
2411 2411
2412 u8 reserved_9[0x3]; 2412 u8 reserved_at_c0[0x3];
2413 u8 log_page_size[0x5]; 2413 u8 log_page_size[0x5];
2414 u8 reserved_10[0x18]; 2414 u8 reserved_at_c8[0x18];
2415 2415
2416 u8 reserved_11[0x60]; 2416 u8 reserved_at_e0[0x60];
2417 2417
2418 u8 reserved_12[0x8]; 2418 u8 reserved_at_140[0x8];
2419 u8 consumer_counter[0x18]; 2419 u8 consumer_counter[0x18];
2420 2420
2421 u8 reserved_13[0x8]; 2421 u8 reserved_at_160[0x8];
2422 u8 producer_counter[0x18]; 2422 u8 producer_counter[0x18];
2423 2423
2424 u8 reserved_14[0x80]; 2424 u8 reserved_at_180[0x80];
2425}; 2425};
2426 2426
2427enum { 2427enum {
@@ -2445,14 +2445,14 @@ enum {
2445}; 2445};
2446 2446
2447struct mlx5_ifc_dctc_bits { 2447struct mlx5_ifc_dctc_bits {
2448 u8 reserved_0[0x4]; 2448 u8 reserved_at_0[0x4];
2449 u8 state[0x4]; 2449 u8 state[0x4];
2450 u8 reserved_1[0x18]; 2450 u8 reserved_at_8[0x18];
2451 2451
2452 u8 reserved_2[0x8]; 2452 u8 reserved_at_20[0x8];
2453 u8 user_index[0x18]; 2453 u8 user_index[0x18];
2454 2454
2455 u8 reserved_3[0x8]; 2455 u8 reserved_at_40[0x8];
2456 u8 cqn[0x18]; 2456 u8 cqn[0x18];
2457 2457
2458 u8 counter_set_id[0x8]; 2458 u8 counter_set_id[0x8];
@@ -2464,45 +2464,45 @@ struct mlx5_ifc_dctc_bits {
2464 u8 latency_sensitive[0x1]; 2464 u8 latency_sensitive[0x1];
2465 u8 rlky[0x1]; 2465 u8 rlky[0x1];
2466 u8 free_ar[0x1]; 2466 u8 free_ar[0x1];
2467 u8 reserved_4[0xd]; 2467 u8 reserved_at_73[0xd];
2468 2468
2469 u8 reserved_5[0x8]; 2469 u8 reserved_at_80[0x8];
2470 u8 cs_res[0x8]; 2470 u8 cs_res[0x8];
2471 u8 reserved_6[0x3]; 2471 u8 reserved_at_90[0x3];
2472 u8 min_rnr_nak[0x5]; 2472 u8 min_rnr_nak[0x5];
2473 u8 reserved_7[0x8]; 2473 u8 reserved_at_98[0x8];
2474 2474
2475 u8 reserved_8[0x8]; 2475 u8 reserved_at_a0[0x8];
2476 u8 srqn[0x18]; 2476 u8 srqn[0x18];
2477 2477
2478 u8 reserved_9[0x8]; 2478 u8 reserved_at_c0[0x8];
2479 u8 pd[0x18]; 2479 u8 pd[0x18];
2480 2480
2481 u8 tclass[0x8]; 2481 u8 tclass[0x8];
2482 u8 reserved_10[0x4]; 2482 u8 reserved_at_e8[0x4];
2483 u8 flow_label[0x14]; 2483 u8 flow_label[0x14];
2484 2484
2485 u8 dc_access_key[0x40]; 2485 u8 dc_access_key[0x40];
2486 2486
2487 u8 reserved_11[0x5]; 2487 u8 reserved_at_140[0x5];
2488 u8 mtu[0x3]; 2488 u8 mtu[0x3];
2489 u8 port[0x8]; 2489 u8 port[0x8];
2490 u8 pkey_index[0x10]; 2490 u8 pkey_index[0x10];
2491 2491
2492 u8 reserved_12[0x8]; 2492 u8 reserved_at_160[0x8];
2493 u8 my_addr_index[0x8]; 2493 u8 my_addr_index[0x8];
2494 u8 reserved_13[0x8]; 2494 u8 reserved_at_170[0x8];
2495 u8 hop_limit[0x8]; 2495 u8 hop_limit[0x8];
2496 2496
2497 u8 dc_access_key_violation_count[0x20]; 2497 u8 dc_access_key_violation_count[0x20];
2498 2498
2499 u8 reserved_14[0x14]; 2499 u8 reserved_at_1a0[0x14];
2500 u8 dei_cfi[0x1]; 2500 u8 dei_cfi[0x1];
2501 u8 eth_prio[0x3]; 2501 u8 eth_prio[0x3];
2502 u8 ecn[0x2]; 2502 u8 ecn[0x2];
2503 u8 dscp[0x6]; 2503 u8 dscp[0x6];
2504 2504
2505 u8 reserved_15[0x40]; 2505 u8 reserved_at_1c0[0x40];
2506}; 2506};
2507 2507
2508enum { 2508enum {
@@ -2524,54 +2524,54 @@ enum {
2524 2524
2525struct mlx5_ifc_cqc_bits { 2525struct mlx5_ifc_cqc_bits {
2526 u8 status[0x4]; 2526 u8 status[0x4];
2527 u8 reserved_0[0x4]; 2527 u8 reserved_at_4[0x4];
2528 u8 cqe_sz[0x3]; 2528 u8 cqe_sz[0x3];
2529 u8 cc[0x1]; 2529 u8 cc[0x1];
2530 u8 reserved_1[0x1]; 2530 u8 reserved_at_c[0x1];
2531 u8 scqe_break_moderation_en[0x1]; 2531 u8 scqe_break_moderation_en[0x1];
2532 u8 oi[0x1]; 2532 u8 oi[0x1];
2533 u8 reserved_2[0x2]; 2533 u8 reserved_at_f[0x2];
2534 u8 cqe_zip_en[0x1]; 2534 u8 cqe_zip_en[0x1];
2535 u8 mini_cqe_res_format[0x2]; 2535 u8 mini_cqe_res_format[0x2];
2536 u8 st[0x4]; 2536 u8 st[0x4];
2537 u8 reserved_3[0x8]; 2537 u8 reserved_at_18[0x8];
2538 2538
2539 u8 reserved_4[0x20]; 2539 u8 reserved_at_20[0x20];
2540 2540
2541 u8 reserved_5[0x14]; 2541 u8 reserved_at_40[0x14];
2542 u8 page_offset[0x6]; 2542 u8 page_offset[0x6];
2543 u8 reserved_6[0x6]; 2543 u8 reserved_at_5a[0x6];
2544 2544
2545 u8 reserved_7[0x3]; 2545 u8 reserved_at_60[0x3];
2546 u8 log_cq_size[0x5]; 2546 u8 log_cq_size[0x5];
2547 u8 uar_page[0x18]; 2547 u8 uar_page[0x18];
2548 2548
2549 u8 reserved_8[0x4]; 2549 u8 reserved_at_80[0x4];
2550 u8 cq_period[0xc]; 2550 u8 cq_period[0xc];
2551 u8 cq_max_count[0x10]; 2551 u8 cq_max_count[0x10];
2552 2552
2553 u8 reserved_9[0x18]; 2553 u8 reserved_at_a0[0x18];
2554 u8 c_eqn[0x8]; 2554 u8 c_eqn[0x8];
2555 2555
2556 u8 reserved_10[0x3]; 2556 u8 reserved_at_c0[0x3];
2557 u8 log_page_size[0x5]; 2557 u8 log_page_size[0x5];
2558 u8 reserved_11[0x18]; 2558 u8 reserved_at_c8[0x18];
2559 2559
2560 u8 reserved_12[0x20]; 2560 u8 reserved_at_e0[0x20];
2561 2561
2562 u8 reserved_13[0x8]; 2562 u8 reserved_at_100[0x8];
2563 u8 last_notified_index[0x18]; 2563 u8 last_notified_index[0x18];
2564 2564
2565 u8 reserved_14[0x8]; 2565 u8 reserved_at_120[0x8];
2566 u8 last_solicit_index[0x18]; 2566 u8 last_solicit_index[0x18];
2567 2567
2568 u8 reserved_15[0x8]; 2568 u8 reserved_at_140[0x8];
2569 u8 consumer_counter[0x18]; 2569 u8 consumer_counter[0x18];
2570 2570
2571 u8 reserved_16[0x8]; 2571 u8 reserved_at_160[0x8];
2572 u8 producer_counter[0x18]; 2572 u8 producer_counter[0x18];
2573 2573
2574 u8 reserved_17[0x40]; 2574 u8 reserved_at_180[0x40];
2575 2575
2576 u8 dbr_addr[0x40]; 2576 u8 dbr_addr[0x40];
2577}; 2577};
@@ -2580,16 +2580,16 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; 2580 struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; 2581 struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; 2582 struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
2583 u8 reserved_0[0x800]; 2583 u8 reserved_at_0[0x800];
2584}; 2584};
2585 2585
2586struct mlx5_ifc_query_adapter_param_block_bits { 2586struct mlx5_ifc_query_adapter_param_block_bits {
2587 u8 reserved_0[0xc0]; 2587 u8 reserved_at_0[0xc0];
2588 2588
2589 u8 reserved_1[0x8]; 2589 u8 reserved_at_c0[0x8];
2590 u8 ieee_vendor_id[0x18]; 2590 u8 ieee_vendor_id[0x18];
2591 2591
2592 u8 reserved_2[0x10]; 2592 u8 reserved_at_e0[0x10];
2593 u8 vsd_vendor_id[0x10]; 2593 u8 vsd_vendor_id[0x10];
2594 2594
2595 u8 vsd[208][0x8]; 2595 u8 vsd[208][0x8];
@@ -2600,14 +2600,14 @@ struct mlx5_ifc_query_adapter_param_block_bits {
2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { 2600union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
2601 struct mlx5_ifc_modify_field_select_bits modify_field_select; 2601 struct mlx5_ifc_modify_field_select_bits modify_field_select;
2602 struct mlx5_ifc_resize_field_select_bits resize_field_select; 2602 struct mlx5_ifc_resize_field_select_bits resize_field_select;
2603 u8 reserved_0[0x20]; 2603 u8 reserved_at_0[0x20];
2604}; 2604};
2605 2605
2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits { 2606union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; 2607 struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; 2608 struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; 2609 struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
2610 u8 reserved_0[0x20]; 2610 u8 reserved_at_0[0x20];
2611}; 2611};
2612 2612
2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { 2613union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
@@ -2619,7 +2619,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; 2619 struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; 2620 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; 2621 struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
2622 u8 reserved_0[0x7c0]; 2622 u8 reserved_at_0[0x7c0];
2623}; 2623};
2624 2624
2625union mlx5_ifc_event_auto_bits { 2625union mlx5_ifc_event_auto_bits {
@@ -2635,23 +2635,23 @@ union mlx5_ifc_event_auto_bits {
2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; 2635 struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event; 2636 struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; 2637 struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
2638 u8 reserved_0[0xe0]; 2638 u8 reserved_at_0[0xe0];
2639}; 2639};
2640 2640
2641struct mlx5_ifc_health_buffer_bits { 2641struct mlx5_ifc_health_buffer_bits {
2642 u8 reserved_0[0x100]; 2642 u8 reserved_at_0[0x100];
2643 2643
2644 u8 assert_existptr[0x20]; 2644 u8 assert_existptr[0x20];
2645 2645
2646 u8 assert_callra[0x20]; 2646 u8 assert_callra[0x20];
2647 2647
2648 u8 reserved_1[0x40]; 2648 u8 reserved_at_140[0x40];
2649 2649
2650 u8 fw_version[0x20]; 2650 u8 fw_version[0x20];
2651 2651
2652 u8 hw_id[0x20]; 2652 u8 hw_id[0x20];
2653 2653
2654 u8 reserved_2[0x20]; 2654 u8 reserved_at_1c0[0x20];
2655 2655
2656 u8 irisc_index[0x8]; 2656 u8 irisc_index[0x8];
2657 u8 synd[0x8]; 2657 u8 synd[0x8];
@@ -2660,20 +2660,20 @@ struct mlx5_ifc_health_buffer_bits {
2660 2660
2661struct mlx5_ifc_register_loopback_control_bits { 2661struct mlx5_ifc_register_loopback_control_bits {
2662 u8 no_lb[0x1]; 2662 u8 no_lb[0x1];
2663 u8 reserved_0[0x7]; 2663 u8 reserved_at_1[0x7];
2664 u8 port[0x8]; 2664 u8 port[0x8];
2665 u8 reserved_1[0x10]; 2665 u8 reserved_at_10[0x10];
2666 2666
2667 u8 reserved_2[0x60]; 2667 u8 reserved_at_20[0x60];
2668}; 2668};
2669 2669
2670struct mlx5_ifc_teardown_hca_out_bits { 2670struct mlx5_ifc_teardown_hca_out_bits {
2671 u8 status[0x8]; 2671 u8 status[0x8];
2672 u8 reserved_0[0x18]; 2672 u8 reserved_at_8[0x18];
2673 2673
2674 u8 syndrome[0x20]; 2674 u8 syndrome[0x20];
2675 2675
2676 u8 reserved_1[0x40]; 2676 u8 reserved_at_40[0x40];
2677}; 2677};
2678 2678
2679enum { 2679enum {
@@ -2683,108 +2683,108 @@ enum {
2683 2683
2684struct mlx5_ifc_teardown_hca_in_bits { 2684struct mlx5_ifc_teardown_hca_in_bits {
2685 u8 opcode[0x10]; 2685 u8 opcode[0x10];
2686 u8 reserved_0[0x10]; 2686 u8 reserved_at_10[0x10];
2687 2687
2688 u8 reserved_1[0x10]; 2688 u8 reserved_at_20[0x10];
2689 u8 op_mod[0x10]; 2689 u8 op_mod[0x10];
2690 2690
2691 u8 reserved_2[0x10]; 2691 u8 reserved_at_40[0x10];
2692 u8 profile[0x10]; 2692 u8 profile[0x10];
2693 2693
2694 u8 reserved_3[0x20]; 2694 u8 reserved_at_60[0x20];
2695}; 2695};
2696 2696
2697struct mlx5_ifc_sqerr2rts_qp_out_bits { 2697struct mlx5_ifc_sqerr2rts_qp_out_bits {
2698 u8 status[0x8]; 2698 u8 status[0x8];
2699 u8 reserved_0[0x18]; 2699 u8 reserved_at_8[0x18];
2700 2700
2701 u8 syndrome[0x20]; 2701 u8 syndrome[0x20];
2702 2702
2703 u8 reserved_1[0x40]; 2703 u8 reserved_at_40[0x40];
2704}; 2704};
2705 2705
2706struct mlx5_ifc_sqerr2rts_qp_in_bits { 2706struct mlx5_ifc_sqerr2rts_qp_in_bits {
2707 u8 opcode[0x10]; 2707 u8 opcode[0x10];
2708 u8 reserved_0[0x10]; 2708 u8 reserved_at_10[0x10];
2709 2709
2710 u8 reserved_1[0x10]; 2710 u8 reserved_at_20[0x10];
2711 u8 op_mod[0x10]; 2711 u8 op_mod[0x10];
2712 2712
2713 u8 reserved_2[0x8]; 2713 u8 reserved_at_40[0x8];
2714 u8 qpn[0x18]; 2714 u8 qpn[0x18];
2715 2715
2716 u8 reserved_3[0x20]; 2716 u8 reserved_at_60[0x20];
2717 2717
2718 u8 opt_param_mask[0x20]; 2718 u8 opt_param_mask[0x20];
2719 2719
2720 u8 reserved_4[0x20]; 2720 u8 reserved_at_a0[0x20];
2721 2721
2722 struct mlx5_ifc_qpc_bits qpc; 2722 struct mlx5_ifc_qpc_bits qpc;
2723 2723
2724 u8 reserved_5[0x80]; 2724 u8 reserved_at_800[0x80];
2725}; 2725};
2726 2726
2727struct mlx5_ifc_sqd2rts_qp_out_bits { 2727struct mlx5_ifc_sqd2rts_qp_out_bits {
2728 u8 status[0x8]; 2728 u8 status[0x8];
2729 u8 reserved_0[0x18]; 2729 u8 reserved_at_8[0x18];
2730 2730
2731 u8 syndrome[0x20]; 2731 u8 syndrome[0x20];
2732 2732
2733 u8 reserved_1[0x40]; 2733 u8 reserved_at_40[0x40];
2734}; 2734};
2735 2735
2736struct mlx5_ifc_sqd2rts_qp_in_bits { 2736struct mlx5_ifc_sqd2rts_qp_in_bits {
2737 u8 opcode[0x10]; 2737 u8 opcode[0x10];
2738 u8 reserved_0[0x10]; 2738 u8 reserved_at_10[0x10];
2739 2739
2740 u8 reserved_1[0x10]; 2740 u8 reserved_at_20[0x10];
2741 u8 op_mod[0x10]; 2741 u8 op_mod[0x10];
2742 2742
2743 u8 reserved_2[0x8]; 2743 u8 reserved_at_40[0x8];
2744 u8 qpn[0x18]; 2744 u8 qpn[0x18];
2745 2745
2746 u8 reserved_3[0x20]; 2746 u8 reserved_at_60[0x20];
2747 2747
2748 u8 opt_param_mask[0x20]; 2748 u8 opt_param_mask[0x20];
2749 2749
2750 u8 reserved_4[0x20]; 2750 u8 reserved_at_a0[0x20];
2751 2751
2752 struct mlx5_ifc_qpc_bits qpc; 2752 struct mlx5_ifc_qpc_bits qpc;
2753 2753
2754 u8 reserved_5[0x80]; 2754 u8 reserved_at_800[0x80];
2755}; 2755};
2756 2756
2757struct mlx5_ifc_set_roce_address_out_bits { 2757struct mlx5_ifc_set_roce_address_out_bits {
2758 u8 status[0x8]; 2758 u8 status[0x8];
2759 u8 reserved_0[0x18]; 2759 u8 reserved_at_8[0x18];
2760 2760
2761 u8 syndrome[0x20]; 2761 u8 syndrome[0x20];
2762 2762
2763 u8 reserved_1[0x40]; 2763 u8 reserved_at_40[0x40];
2764}; 2764};
2765 2765
2766struct mlx5_ifc_set_roce_address_in_bits { 2766struct mlx5_ifc_set_roce_address_in_bits {
2767 u8 opcode[0x10]; 2767 u8 opcode[0x10];
2768 u8 reserved_0[0x10]; 2768 u8 reserved_at_10[0x10];
2769 2769
2770 u8 reserved_1[0x10]; 2770 u8 reserved_at_20[0x10];
2771 u8 op_mod[0x10]; 2771 u8 op_mod[0x10];
2772 2772
2773 u8 roce_address_index[0x10]; 2773 u8 roce_address_index[0x10];
2774 u8 reserved_2[0x10]; 2774 u8 reserved_at_50[0x10];
2775 2775
2776 u8 reserved_3[0x20]; 2776 u8 reserved_at_60[0x20];
2777 2777
2778 struct mlx5_ifc_roce_addr_layout_bits roce_address; 2778 struct mlx5_ifc_roce_addr_layout_bits roce_address;
2779}; 2779};
2780 2780
2781struct mlx5_ifc_set_mad_demux_out_bits { 2781struct mlx5_ifc_set_mad_demux_out_bits {
2782 u8 status[0x8]; 2782 u8 status[0x8];
2783 u8 reserved_0[0x18]; 2783 u8 reserved_at_8[0x18];
2784 2784
2785 u8 syndrome[0x20]; 2785 u8 syndrome[0x20];
2786 2786
2787 u8 reserved_1[0x40]; 2787 u8 reserved_at_40[0x40];
2788}; 2788};
2789 2789
2790enum { 2790enum {
@@ -2794,89 +2794,89 @@ enum {
2794 2794
2795struct mlx5_ifc_set_mad_demux_in_bits { 2795struct mlx5_ifc_set_mad_demux_in_bits {
2796 u8 opcode[0x10]; 2796 u8 opcode[0x10];
2797 u8 reserved_0[0x10]; 2797 u8 reserved_at_10[0x10];
2798 2798
2799 u8 reserved_1[0x10]; 2799 u8 reserved_at_20[0x10];
2800 u8 op_mod[0x10]; 2800 u8 op_mod[0x10];
2801 2801
2802 u8 reserved_2[0x20]; 2802 u8 reserved_at_40[0x20];
2803 2803
2804 u8 reserved_3[0x6]; 2804 u8 reserved_at_60[0x6];
2805 u8 demux_mode[0x2]; 2805 u8 demux_mode[0x2];
2806 u8 reserved_4[0x18]; 2806 u8 reserved_at_68[0x18];
2807}; 2807};
2808 2808
2809struct mlx5_ifc_set_l2_table_entry_out_bits { 2809struct mlx5_ifc_set_l2_table_entry_out_bits {
2810 u8 status[0x8]; 2810 u8 status[0x8];
2811 u8 reserved_0[0x18]; 2811 u8 reserved_at_8[0x18];
2812 2812
2813 u8 syndrome[0x20]; 2813 u8 syndrome[0x20];
2814 2814
2815 u8 reserved_1[0x40]; 2815 u8 reserved_at_40[0x40];
2816}; 2816};
2817 2817
2818struct mlx5_ifc_set_l2_table_entry_in_bits { 2818struct mlx5_ifc_set_l2_table_entry_in_bits {
2819 u8 opcode[0x10]; 2819 u8 opcode[0x10];
2820 u8 reserved_0[0x10]; 2820 u8 reserved_at_10[0x10];
2821 2821
2822 u8 reserved_1[0x10]; 2822 u8 reserved_at_20[0x10];
2823 u8 op_mod[0x10]; 2823 u8 op_mod[0x10];
2824 2824
2825 u8 reserved_2[0x60]; 2825 u8 reserved_at_40[0x60];
2826 2826
2827 u8 reserved_3[0x8]; 2827 u8 reserved_at_a0[0x8];
2828 u8 table_index[0x18]; 2828 u8 table_index[0x18];
2829 2829
2830 u8 reserved_4[0x20]; 2830 u8 reserved_at_c0[0x20];
2831 2831
2832 u8 reserved_5[0x13]; 2832 u8 reserved_at_e0[0x13];
2833 u8 vlan_valid[0x1]; 2833 u8 vlan_valid[0x1];
2834 u8 vlan[0xc]; 2834 u8 vlan[0xc];
2835 2835
2836 struct mlx5_ifc_mac_address_layout_bits mac_address; 2836 struct mlx5_ifc_mac_address_layout_bits mac_address;
2837 2837
2838 u8 reserved_6[0xc0]; 2838 u8 reserved_at_140[0xc0];
2839}; 2839};
2840 2840
2841struct mlx5_ifc_set_issi_out_bits { 2841struct mlx5_ifc_set_issi_out_bits {
2842 u8 status[0x8]; 2842 u8 status[0x8];
2843 u8 reserved_0[0x18]; 2843 u8 reserved_at_8[0x18];
2844 2844
2845 u8 syndrome[0x20]; 2845 u8 syndrome[0x20];
2846 2846
2847 u8 reserved_1[0x40]; 2847 u8 reserved_at_40[0x40];
2848}; 2848};
2849 2849
2850struct mlx5_ifc_set_issi_in_bits { 2850struct mlx5_ifc_set_issi_in_bits {
2851 u8 opcode[0x10]; 2851 u8 opcode[0x10];
2852 u8 reserved_0[0x10]; 2852 u8 reserved_at_10[0x10];
2853 2853
2854 u8 reserved_1[0x10]; 2854 u8 reserved_at_20[0x10];
2855 u8 op_mod[0x10]; 2855 u8 op_mod[0x10];
2856 2856
2857 u8 reserved_2[0x10]; 2857 u8 reserved_at_40[0x10];
2858 u8 current_issi[0x10]; 2858 u8 current_issi[0x10];
2859 2859
2860 u8 reserved_3[0x20]; 2860 u8 reserved_at_60[0x20];
2861}; 2861};
2862 2862
2863struct mlx5_ifc_set_hca_cap_out_bits { 2863struct mlx5_ifc_set_hca_cap_out_bits {
2864 u8 status[0x8]; 2864 u8 status[0x8];
2865 u8 reserved_0[0x18]; 2865 u8 reserved_at_8[0x18];
2866 2866
2867 u8 syndrome[0x20]; 2867 u8 syndrome[0x20];
2868 2868
2869 u8 reserved_1[0x40]; 2869 u8 reserved_at_40[0x40];
2870}; 2870};
2871 2871
2872struct mlx5_ifc_set_hca_cap_in_bits { 2872struct mlx5_ifc_set_hca_cap_in_bits {
2873 u8 opcode[0x10]; 2873 u8 opcode[0x10];
2874 u8 reserved_0[0x10]; 2874 u8 reserved_at_10[0x10];
2875 2875
2876 u8 reserved_1[0x10]; 2876 u8 reserved_at_20[0x10];
2877 u8 op_mod[0x10]; 2877 u8 op_mod[0x10];
2878 2878
2879 u8 reserved_2[0x40]; 2879 u8 reserved_at_40[0x40];
2880 2880
2881 union mlx5_ifc_hca_cap_union_bits capability; 2881 union mlx5_ifc_hca_cap_union_bits capability;
2882}; 2882};
@@ -2890,156 +2890,156 @@ enum {
2890 2890
2891struct mlx5_ifc_set_fte_out_bits { 2891struct mlx5_ifc_set_fte_out_bits {
2892 u8 status[0x8]; 2892 u8 status[0x8];
2893 u8 reserved_0[0x18]; 2893 u8 reserved_at_8[0x18];
2894 2894
2895 u8 syndrome[0x20]; 2895 u8 syndrome[0x20];
2896 2896
2897 u8 reserved_1[0x40]; 2897 u8 reserved_at_40[0x40];
2898}; 2898};
2899 2899
2900struct mlx5_ifc_set_fte_in_bits { 2900struct mlx5_ifc_set_fte_in_bits {
2901 u8 opcode[0x10]; 2901 u8 opcode[0x10];
2902 u8 reserved_0[0x10]; 2902 u8 reserved_at_10[0x10];
2903 2903
2904 u8 reserved_1[0x10]; 2904 u8 reserved_at_20[0x10];
2905 u8 op_mod[0x10]; 2905 u8 op_mod[0x10];
2906 2906
2907 u8 reserved_2[0x40]; 2907 u8 reserved_at_40[0x40];
2908 2908
2909 u8 table_type[0x8]; 2909 u8 table_type[0x8];
2910 u8 reserved_3[0x18]; 2910 u8 reserved_at_88[0x18];
2911 2911
2912 u8 reserved_4[0x8]; 2912 u8 reserved_at_a0[0x8];
2913 u8 table_id[0x18]; 2913 u8 table_id[0x18];
2914 2914
2915 u8 reserved_5[0x18]; 2915 u8 reserved_at_c0[0x18];
2916 u8 modify_enable_mask[0x8]; 2916 u8 modify_enable_mask[0x8];
2917 2917
2918 u8 reserved_6[0x20]; 2918 u8 reserved_at_e0[0x20];
2919 2919
2920 u8 flow_index[0x20]; 2920 u8 flow_index[0x20];
2921 2921
2922 u8 reserved_7[0xe0]; 2922 u8 reserved_at_120[0xe0];
2923 2923
2924 struct mlx5_ifc_flow_context_bits flow_context; 2924 struct mlx5_ifc_flow_context_bits flow_context;
2925}; 2925};
2926 2926
2927struct mlx5_ifc_rts2rts_qp_out_bits { 2927struct mlx5_ifc_rts2rts_qp_out_bits {
2928 u8 status[0x8]; 2928 u8 status[0x8];
2929 u8 reserved_0[0x18]; 2929 u8 reserved_at_8[0x18];
2930 2930
2931 u8 syndrome[0x20]; 2931 u8 syndrome[0x20];
2932 2932
2933 u8 reserved_1[0x40]; 2933 u8 reserved_at_40[0x40];
2934}; 2934};
2935 2935
2936struct mlx5_ifc_rts2rts_qp_in_bits { 2936struct mlx5_ifc_rts2rts_qp_in_bits {
2937 u8 opcode[0x10]; 2937 u8 opcode[0x10];
2938 u8 reserved_0[0x10]; 2938 u8 reserved_at_10[0x10];
2939 2939
2940 u8 reserved_1[0x10]; 2940 u8 reserved_at_20[0x10];
2941 u8 op_mod[0x10]; 2941 u8 op_mod[0x10];
2942 2942
2943 u8 reserved_2[0x8]; 2943 u8 reserved_at_40[0x8];
2944 u8 qpn[0x18]; 2944 u8 qpn[0x18];
2945 2945
2946 u8 reserved_3[0x20]; 2946 u8 reserved_at_60[0x20];
2947 2947
2948 u8 opt_param_mask[0x20]; 2948 u8 opt_param_mask[0x20];
2949 2949
2950 u8 reserved_4[0x20]; 2950 u8 reserved_at_a0[0x20];
2951 2951
2952 struct mlx5_ifc_qpc_bits qpc; 2952 struct mlx5_ifc_qpc_bits qpc;
2953 2953
2954 u8 reserved_5[0x80]; 2954 u8 reserved_at_800[0x80];
2955}; 2955};
2956 2956
2957struct mlx5_ifc_rtr2rts_qp_out_bits { 2957struct mlx5_ifc_rtr2rts_qp_out_bits {
2958 u8 status[0x8]; 2958 u8 status[0x8];
2959 u8 reserved_0[0x18]; 2959 u8 reserved_at_8[0x18];
2960 2960
2961 u8 syndrome[0x20]; 2961 u8 syndrome[0x20];
2962 2962
2963 u8 reserved_1[0x40]; 2963 u8 reserved_at_40[0x40];
2964}; 2964};
2965 2965
2966struct mlx5_ifc_rtr2rts_qp_in_bits { 2966struct mlx5_ifc_rtr2rts_qp_in_bits {
2967 u8 opcode[0x10]; 2967 u8 opcode[0x10];
2968 u8 reserved_0[0x10]; 2968 u8 reserved_at_10[0x10];
2969 2969
2970 u8 reserved_1[0x10]; 2970 u8 reserved_at_20[0x10];
2971 u8 op_mod[0x10]; 2971 u8 op_mod[0x10];
2972 2972
2973 u8 reserved_2[0x8]; 2973 u8 reserved_at_40[0x8];
2974 u8 qpn[0x18]; 2974 u8 qpn[0x18];
2975 2975
2976 u8 reserved_3[0x20]; 2976 u8 reserved_at_60[0x20];
2977 2977
2978 u8 opt_param_mask[0x20]; 2978 u8 opt_param_mask[0x20];
2979 2979
2980 u8 reserved_4[0x20]; 2980 u8 reserved_at_a0[0x20];
2981 2981
2982 struct mlx5_ifc_qpc_bits qpc; 2982 struct mlx5_ifc_qpc_bits qpc;
2983 2983
2984 u8 reserved_5[0x80]; 2984 u8 reserved_at_800[0x80];
2985}; 2985};
2986 2986
2987struct mlx5_ifc_rst2init_qp_out_bits { 2987struct mlx5_ifc_rst2init_qp_out_bits {
2988 u8 status[0x8]; 2988 u8 status[0x8];
2989 u8 reserved_0[0x18]; 2989 u8 reserved_at_8[0x18];
2990 2990
2991 u8 syndrome[0x20]; 2991 u8 syndrome[0x20];
2992 2992
2993 u8 reserved_1[0x40]; 2993 u8 reserved_at_40[0x40];
2994}; 2994};
2995 2995
2996struct mlx5_ifc_rst2init_qp_in_bits { 2996struct mlx5_ifc_rst2init_qp_in_bits {
2997 u8 opcode[0x10]; 2997 u8 opcode[0x10];
2998 u8 reserved_0[0x10]; 2998 u8 reserved_at_10[0x10];
2999 2999
3000 u8 reserved_1[0x10]; 3000 u8 reserved_at_20[0x10];
3001 u8 op_mod[0x10]; 3001 u8 op_mod[0x10];
3002 3002
3003 u8 reserved_2[0x8]; 3003 u8 reserved_at_40[0x8];
3004 u8 qpn[0x18]; 3004 u8 qpn[0x18];
3005 3005
3006 u8 reserved_3[0x20]; 3006 u8 reserved_at_60[0x20];
3007 3007
3008 u8 opt_param_mask[0x20]; 3008 u8 opt_param_mask[0x20];
3009 3009
3010 u8 reserved_4[0x20]; 3010 u8 reserved_at_a0[0x20];
3011 3011
3012 struct mlx5_ifc_qpc_bits qpc; 3012 struct mlx5_ifc_qpc_bits qpc;
3013 3013
3014 u8 reserved_5[0x80]; 3014 u8 reserved_at_800[0x80];
3015}; 3015};
3016 3016
3017struct mlx5_ifc_query_xrc_srq_out_bits { 3017struct mlx5_ifc_query_xrc_srq_out_bits {
3018 u8 status[0x8]; 3018 u8 status[0x8];
3019 u8 reserved_0[0x18]; 3019 u8 reserved_at_8[0x18];
3020 3020
3021 u8 syndrome[0x20]; 3021 u8 syndrome[0x20];
3022 3022
3023 u8 reserved_1[0x40]; 3023 u8 reserved_at_40[0x40];
3024 3024
3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 3025 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
3026 3026
3027 u8 reserved_2[0x600]; 3027 u8 reserved_at_280[0x600];
3028 3028
3029 u8 pas[0][0x40]; 3029 u8 pas[0][0x40];
3030}; 3030};
3031 3031
3032struct mlx5_ifc_query_xrc_srq_in_bits { 3032struct mlx5_ifc_query_xrc_srq_in_bits {
3033 u8 opcode[0x10]; 3033 u8 opcode[0x10];
3034 u8 reserved_0[0x10]; 3034 u8 reserved_at_10[0x10];
3035 3035
3036 u8 reserved_1[0x10]; 3036 u8 reserved_at_20[0x10];
3037 u8 op_mod[0x10]; 3037 u8 op_mod[0x10];
3038 3038
3039 u8 reserved_2[0x8]; 3039 u8 reserved_at_40[0x8];
3040 u8 xrc_srqn[0x18]; 3040 u8 xrc_srqn[0x18];
3041 3041
3042 u8 reserved_3[0x20]; 3042 u8 reserved_at_60[0x20];
3043}; 3043};
3044 3044
3045enum { 3045enum {
@@ -3049,13 +3049,13 @@ enum {
3049 3049
3050struct mlx5_ifc_query_vport_state_out_bits { 3050struct mlx5_ifc_query_vport_state_out_bits {
3051 u8 status[0x8]; 3051 u8 status[0x8];
3052 u8 reserved_0[0x18]; 3052 u8 reserved_at_8[0x18];
3053 3053
3054 u8 syndrome[0x20]; 3054 u8 syndrome[0x20];
3055 3055
3056 u8 reserved_1[0x20]; 3056 u8 reserved_at_40[0x20];
3057 3057
3058 u8 reserved_2[0x18]; 3058 u8 reserved_at_60[0x18];
3059 u8 admin_state[0x4]; 3059 u8 admin_state[0x4];
3060 u8 state[0x4]; 3060 u8 state[0x4];
3061}; 3061};
@@ -3067,25 +3067,25 @@ enum {
3067 3067
3068struct mlx5_ifc_query_vport_state_in_bits { 3068struct mlx5_ifc_query_vport_state_in_bits {
3069 u8 opcode[0x10]; 3069 u8 opcode[0x10];
3070 u8 reserved_0[0x10]; 3070 u8 reserved_at_10[0x10];
3071 3071
3072 u8 reserved_1[0x10]; 3072 u8 reserved_at_20[0x10];
3073 u8 op_mod[0x10]; 3073 u8 op_mod[0x10];
3074 3074
3075 u8 other_vport[0x1]; 3075 u8 other_vport[0x1];
3076 u8 reserved_2[0xf]; 3076 u8 reserved_at_41[0xf];
3077 u8 vport_number[0x10]; 3077 u8 vport_number[0x10];
3078 3078
3079 u8 reserved_3[0x20]; 3079 u8 reserved_at_60[0x20];
3080}; 3080};
3081 3081
3082struct mlx5_ifc_query_vport_counter_out_bits { 3082struct mlx5_ifc_query_vport_counter_out_bits {
3083 u8 status[0x8]; 3083 u8 status[0x8];
3084 u8 reserved_0[0x18]; 3084 u8 reserved_at_8[0x18];
3085 3085
3086 u8 syndrome[0x20]; 3086 u8 syndrome[0x20];
3087 3087
3088 u8 reserved_1[0x40]; 3088 u8 reserved_at_40[0x40];
3089 3089
3090 struct mlx5_ifc_traffic_counter_bits received_errors; 3090 struct mlx5_ifc_traffic_counter_bits received_errors;
3091 3091
@@ -3111,7 +3111,7 @@ struct mlx5_ifc_query_vport_counter_out_bits {
3111 3111
3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; 3112 struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
3113 3113
3114 u8 reserved_2[0xa00]; 3114 u8 reserved_at_680[0xa00];
3115}; 3115};
3116 3116
3117enum { 3117enum {
@@ -3120,328 +3120,328 @@ enum {
3120 3120
3121struct mlx5_ifc_query_vport_counter_in_bits { 3121struct mlx5_ifc_query_vport_counter_in_bits {
3122 u8 opcode[0x10]; 3122 u8 opcode[0x10];
3123 u8 reserved_0[0x10]; 3123 u8 reserved_at_10[0x10];
3124 3124
3125 u8 reserved_1[0x10]; 3125 u8 reserved_at_20[0x10];
3126 u8 op_mod[0x10]; 3126 u8 op_mod[0x10];
3127 3127
3128 u8 other_vport[0x1]; 3128 u8 other_vport[0x1];
3129 u8 reserved_2[0xf]; 3129 u8 reserved_at_41[0xf];
3130 u8 vport_number[0x10]; 3130 u8 vport_number[0x10];
3131 3131
3132 u8 reserved_3[0x60]; 3132 u8 reserved_at_60[0x60];
3133 3133
3134 u8 clear[0x1]; 3134 u8 clear[0x1];
3135 u8 reserved_4[0x1f]; 3135 u8 reserved_at_c1[0x1f];
3136 3136
3137 u8 reserved_5[0x20]; 3137 u8 reserved_at_e0[0x20];
3138}; 3138};
3139 3139
3140struct mlx5_ifc_query_tis_out_bits { 3140struct mlx5_ifc_query_tis_out_bits {
3141 u8 status[0x8]; 3141 u8 status[0x8];
3142 u8 reserved_0[0x18]; 3142 u8 reserved_at_8[0x18];
3143 3143
3144 u8 syndrome[0x20]; 3144 u8 syndrome[0x20];
3145 3145
3146 u8 reserved_1[0x40]; 3146 u8 reserved_at_40[0x40];
3147 3147
3148 struct mlx5_ifc_tisc_bits tis_context; 3148 struct mlx5_ifc_tisc_bits tis_context;
3149}; 3149};
3150 3150
3151struct mlx5_ifc_query_tis_in_bits { 3151struct mlx5_ifc_query_tis_in_bits {
3152 u8 opcode[0x10]; 3152 u8 opcode[0x10];
3153 u8 reserved_0[0x10]; 3153 u8 reserved_at_10[0x10];
3154 3154
3155 u8 reserved_1[0x10]; 3155 u8 reserved_at_20[0x10];
3156 u8 op_mod[0x10]; 3156 u8 op_mod[0x10];
3157 3157
3158 u8 reserved_2[0x8]; 3158 u8 reserved_at_40[0x8];
3159 u8 tisn[0x18]; 3159 u8 tisn[0x18];
3160 3160
3161 u8 reserved_3[0x20]; 3161 u8 reserved_at_60[0x20];
3162}; 3162};
3163 3163
3164struct mlx5_ifc_query_tir_out_bits { 3164struct mlx5_ifc_query_tir_out_bits {
3165 u8 status[0x8]; 3165 u8 status[0x8];
3166 u8 reserved_0[0x18]; 3166 u8 reserved_at_8[0x18];
3167 3167
3168 u8 syndrome[0x20]; 3168 u8 syndrome[0x20];
3169 3169
3170 u8 reserved_1[0xc0]; 3170 u8 reserved_at_40[0xc0];
3171 3171
3172 struct mlx5_ifc_tirc_bits tir_context; 3172 struct mlx5_ifc_tirc_bits tir_context;
3173}; 3173};
3174 3174
3175struct mlx5_ifc_query_tir_in_bits { 3175struct mlx5_ifc_query_tir_in_bits {
3176 u8 opcode[0x10]; 3176 u8 opcode[0x10];
3177 u8 reserved_0[0x10]; 3177 u8 reserved_at_10[0x10];
3178 3178
3179 u8 reserved_1[0x10]; 3179 u8 reserved_at_20[0x10];
3180 u8 op_mod[0x10]; 3180 u8 op_mod[0x10];
3181 3181
3182 u8 reserved_2[0x8]; 3182 u8 reserved_at_40[0x8];
3183 u8 tirn[0x18]; 3183 u8 tirn[0x18];
3184 3184
3185 u8 reserved_3[0x20]; 3185 u8 reserved_at_60[0x20];
3186}; 3186};
3187 3187
3188struct mlx5_ifc_query_srq_out_bits { 3188struct mlx5_ifc_query_srq_out_bits {
3189 u8 status[0x8]; 3189 u8 status[0x8];
3190 u8 reserved_0[0x18]; 3190 u8 reserved_at_8[0x18];
3191 3191
3192 u8 syndrome[0x20]; 3192 u8 syndrome[0x20];
3193 3193
3194 u8 reserved_1[0x40]; 3194 u8 reserved_at_40[0x40];
3195 3195
3196 struct mlx5_ifc_srqc_bits srq_context_entry; 3196 struct mlx5_ifc_srqc_bits srq_context_entry;
3197 3197
3198 u8 reserved_2[0x600]; 3198 u8 reserved_at_280[0x600];
3199 3199
3200 u8 pas[0][0x40]; 3200 u8 pas[0][0x40];
3201}; 3201};
3202 3202
3203struct mlx5_ifc_query_srq_in_bits { 3203struct mlx5_ifc_query_srq_in_bits {
3204 u8 opcode[0x10]; 3204 u8 opcode[0x10];
3205 u8 reserved_0[0x10]; 3205 u8 reserved_at_10[0x10];
3206 3206
3207 u8 reserved_1[0x10]; 3207 u8 reserved_at_20[0x10];
3208 u8 op_mod[0x10]; 3208 u8 op_mod[0x10];
3209 3209
3210 u8 reserved_2[0x8]; 3210 u8 reserved_at_40[0x8];
3211 u8 srqn[0x18]; 3211 u8 srqn[0x18];
3212 3212
3213 u8 reserved_3[0x20]; 3213 u8 reserved_at_60[0x20];
3214}; 3214};
3215 3215
3216struct mlx5_ifc_query_sq_out_bits { 3216struct mlx5_ifc_query_sq_out_bits {
3217 u8 status[0x8]; 3217 u8 status[0x8];
3218 u8 reserved_0[0x18]; 3218 u8 reserved_at_8[0x18];
3219 3219
3220 u8 syndrome[0x20]; 3220 u8 syndrome[0x20];
3221 3221
3222 u8 reserved_1[0xc0]; 3222 u8 reserved_at_40[0xc0];
3223 3223
3224 struct mlx5_ifc_sqc_bits sq_context; 3224 struct mlx5_ifc_sqc_bits sq_context;
3225}; 3225};
3226 3226
3227struct mlx5_ifc_query_sq_in_bits { 3227struct mlx5_ifc_query_sq_in_bits {
3228 u8 opcode[0x10]; 3228 u8 opcode[0x10];
3229 u8 reserved_0[0x10]; 3229 u8 reserved_at_10[0x10];
3230 3230
3231 u8 reserved_1[0x10]; 3231 u8 reserved_at_20[0x10];
3232 u8 op_mod[0x10]; 3232 u8 op_mod[0x10];
3233 3233
3234 u8 reserved_2[0x8]; 3234 u8 reserved_at_40[0x8];
3235 u8 sqn[0x18]; 3235 u8 sqn[0x18];
3236 3236
3237 u8 reserved_3[0x20]; 3237 u8 reserved_at_60[0x20];
3238}; 3238};
3239 3239
3240struct mlx5_ifc_query_special_contexts_out_bits { 3240struct mlx5_ifc_query_special_contexts_out_bits {
3241 u8 status[0x8]; 3241 u8 status[0x8];
3242 u8 reserved_0[0x18]; 3242 u8 reserved_at_8[0x18];
3243 3243
3244 u8 syndrome[0x20]; 3244 u8 syndrome[0x20];
3245 3245
3246 u8 reserved_1[0x20]; 3246 u8 reserved_at_40[0x20];
3247 3247
3248 u8 resd_lkey[0x20]; 3248 u8 resd_lkey[0x20];
3249}; 3249};
3250 3250
3251struct mlx5_ifc_query_special_contexts_in_bits { 3251struct mlx5_ifc_query_special_contexts_in_bits {
3252 u8 opcode[0x10]; 3252 u8 opcode[0x10];
3253 u8 reserved_0[0x10]; 3253 u8 reserved_at_10[0x10];
3254 3254
3255 u8 reserved_1[0x10]; 3255 u8 reserved_at_20[0x10];
3256 u8 op_mod[0x10]; 3256 u8 op_mod[0x10];
3257 3257
3258 u8 reserved_2[0x40]; 3258 u8 reserved_at_40[0x40];
3259}; 3259};
3260 3260
3261struct mlx5_ifc_query_rqt_out_bits { 3261struct mlx5_ifc_query_rqt_out_bits {
3262 u8 status[0x8]; 3262 u8 status[0x8];
3263 u8 reserved_0[0x18]; 3263 u8 reserved_at_8[0x18];
3264 3264
3265 u8 syndrome[0x20]; 3265 u8 syndrome[0x20];
3266 3266
3267 u8 reserved_1[0xc0]; 3267 u8 reserved_at_40[0xc0];
3268 3268
3269 struct mlx5_ifc_rqtc_bits rqt_context; 3269 struct mlx5_ifc_rqtc_bits rqt_context;
3270}; 3270};
3271 3271
3272struct mlx5_ifc_query_rqt_in_bits { 3272struct mlx5_ifc_query_rqt_in_bits {
3273 u8 opcode[0x10]; 3273 u8 opcode[0x10];
3274 u8 reserved_0[0x10]; 3274 u8 reserved_at_10[0x10];
3275 3275
3276 u8 reserved_1[0x10]; 3276 u8 reserved_at_20[0x10];
3277 u8 op_mod[0x10]; 3277 u8 op_mod[0x10];
3278 3278
3279 u8 reserved_2[0x8]; 3279 u8 reserved_at_40[0x8];
3280 u8 rqtn[0x18]; 3280 u8 rqtn[0x18];
3281 3281
3282 u8 reserved_3[0x20]; 3282 u8 reserved_at_60[0x20];
3283}; 3283};
3284 3284
3285struct mlx5_ifc_query_rq_out_bits { 3285struct mlx5_ifc_query_rq_out_bits {
3286 u8 status[0x8]; 3286 u8 status[0x8];
3287 u8 reserved_0[0x18]; 3287 u8 reserved_at_8[0x18];
3288 3288
3289 u8 syndrome[0x20]; 3289 u8 syndrome[0x20];
3290 3290
3291 u8 reserved_1[0xc0]; 3291 u8 reserved_at_40[0xc0];
3292 3292
3293 struct mlx5_ifc_rqc_bits rq_context; 3293 struct mlx5_ifc_rqc_bits rq_context;
3294}; 3294};
3295 3295
3296struct mlx5_ifc_query_rq_in_bits { 3296struct mlx5_ifc_query_rq_in_bits {
3297 u8 opcode[0x10]; 3297 u8 opcode[0x10];
3298 u8 reserved_0[0x10]; 3298 u8 reserved_at_10[0x10];
3299 3299
3300 u8 reserved_1[0x10]; 3300 u8 reserved_at_20[0x10];
3301 u8 op_mod[0x10]; 3301 u8 op_mod[0x10];
3302 3302
3303 u8 reserved_2[0x8]; 3303 u8 reserved_at_40[0x8];
3304 u8 rqn[0x18]; 3304 u8 rqn[0x18];
3305 3305
3306 u8 reserved_3[0x20]; 3306 u8 reserved_at_60[0x20];
3307}; 3307};
3308 3308
3309struct mlx5_ifc_query_roce_address_out_bits { 3309struct mlx5_ifc_query_roce_address_out_bits {
3310 u8 status[0x8]; 3310 u8 status[0x8];
3311 u8 reserved_0[0x18]; 3311 u8 reserved_at_8[0x18];
3312 3312
3313 u8 syndrome[0x20]; 3313 u8 syndrome[0x20];
3314 3314
3315 u8 reserved_1[0x40]; 3315 u8 reserved_at_40[0x40];
3316 3316
3317 struct mlx5_ifc_roce_addr_layout_bits roce_address; 3317 struct mlx5_ifc_roce_addr_layout_bits roce_address;
3318}; 3318};
3319 3319
3320struct mlx5_ifc_query_roce_address_in_bits { 3320struct mlx5_ifc_query_roce_address_in_bits {
3321 u8 opcode[0x10]; 3321 u8 opcode[0x10];
3322 u8 reserved_0[0x10]; 3322 u8 reserved_at_10[0x10];
3323 3323
3324 u8 reserved_1[0x10]; 3324 u8 reserved_at_20[0x10];
3325 u8 op_mod[0x10]; 3325 u8 op_mod[0x10];
3326 3326
3327 u8 roce_address_index[0x10]; 3327 u8 roce_address_index[0x10];
3328 u8 reserved_2[0x10]; 3328 u8 reserved_at_50[0x10];
3329 3329
3330 u8 reserved_3[0x20]; 3330 u8 reserved_at_60[0x20];
3331}; 3331};
3332 3332
3333struct mlx5_ifc_query_rmp_out_bits { 3333struct mlx5_ifc_query_rmp_out_bits {
3334 u8 status[0x8]; 3334 u8 status[0x8];
3335 u8 reserved_0[0x18]; 3335 u8 reserved_at_8[0x18];
3336 3336
3337 u8 syndrome[0x20]; 3337 u8 syndrome[0x20];
3338 3338
3339 u8 reserved_1[0xc0]; 3339 u8 reserved_at_40[0xc0];
3340 3340
3341 struct mlx5_ifc_rmpc_bits rmp_context; 3341 struct mlx5_ifc_rmpc_bits rmp_context;
3342}; 3342};
3343 3343
3344struct mlx5_ifc_query_rmp_in_bits { 3344struct mlx5_ifc_query_rmp_in_bits {
3345 u8 opcode[0x10]; 3345 u8 opcode[0x10];
3346 u8 reserved_0[0x10]; 3346 u8 reserved_at_10[0x10];
3347 3347
3348 u8 reserved_1[0x10]; 3348 u8 reserved_at_20[0x10];
3349 u8 op_mod[0x10]; 3349 u8 op_mod[0x10];
3350 3350
3351 u8 reserved_2[0x8]; 3351 u8 reserved_at_40[0x8];
3352 u8 rmpn[0x18]; 3352 u8 rmpn[0x18];
3353 3353
3354 u8 reserved_3[0x20]; 3354 u8 reserved_at_60[0x20];
3355}; 3355};
3356 3356
3357struct mlx5_ifc_query_qp_out_bits { 3357struct mlx5_ifc_query_qp_out_bits {
3358 u8 status[0x8]; 3358 u8 status[0x8];
3359 u8 reserved_0[0x18]; 3359 u8 reserved_at_8[0x18];
3360 3360
3361 u8 syndrome[0x20]; 3361 u8 syndrome[0x20];
3362 3362
3363 u8 reserved_1[0x40]; 3363 u8 reserved_at_40[0x40];
3364 3364
3365 u8 opt_param_mask[0x20]; 3365 u8 opt_param_mask[0x20];
3366 3366
3367 u8 reserved_2[0x20]; 3367 u8 reserved_at_a0[0x20];
3368 3368
3369 struct mlx5_ifc_qpc_bits qpc; 3369 struct mlx5_ifc_qpc_bits qpc;
3370 3370
3371 u8 reserved_3[0x80]; 3371 u8 reserved_at_800[0x80];
3372 3372
3373 u8 pas[0][0x40]; 3373 u8 pas[0][0x40];
3374}; 3374};
3375 3375
3376struct mlx5_ifc_query_qp_in_bits { 3376struct mlx5_ifc_query_qp_in_bits {
3377 u8 opcode[0x10]; 3377 u8 opcode[0x10];
3378 u8 reserved_0[0x10]; 3378 u8 reserved_at_10[0x10];
3379 3379
3380 u8 reserved_1[0x10]; 3380 u8 reserved_at_20[0x10];
3381 u8 op_mod[0x10]; 3381 u8 op_mod[0x10];
3382 3382
3383 u8 reserved_2[0x8]; 3383 u8 reserved_at_40[0x8];
3384 u8 qpn[0x18]; 3384 u8 qpn[0x18];
3385 3385
3386 u8 reserved_3[0x20]; 3386 u8 reserved_at_60[0x20];
3387}; 3387};
3388 3388
3389struct mlx5_ifc_query_q_counter_out_bits { 3389struct mlx5_ifc_query_q_counter_out_bits {
3390 u8 status[0x8]; 3390 u8 status[0x8];
3391 u8 reserved_0[0x18]; 3391 u8 reserved_at_8[0x18];
3392 3392
3393 u8 syndrome[0x20]; 3393 u8 syndrome[0x20];
3394 3394
3395 u8 reserved_1[0x40]; 3395 u8 reserved_at_40[0x40];
3396 3396
3397 u8 rx_write_requests[0x20]; 3397 u8 rx_write_requests[0x20];
3398 3398
3399 u8 reserved_2[0x20]; 3399 u8 reserved_at_a0[0x20];
3400 3400
3401 u8 rx_read_requests[0x20]; 3401 u8 rx_read_requests[0x20];
3402 3402
3403 u8 reserved_3[0x20]; 3403 u8 reserved_at_e0[0x20];
3404 3404
3405 u8 rx_atomic_requests[0x20]; 3405 u8 rx_atomic_requests[0x20];
3406 3406
3407 u8 reserved_4[0x20]; 3407 u8 reserved_at_120[0x20];
3408 3408
3409 u8 rx_dct_connect[0x20]; 3409 u8 rx_dct_connect[0x20];
3410 3410
3411 u8 reserved_5[0x20]; 3411 u8 reserved_at_160[0x20];
3412 3412
3413 u8 out_of_buffer[0x20]; 3413 u8 out_of_buffer[0x20];
3414 3414
3415 u8 reserved_6[0x20]; 3415 u8 reserved_at_1a0[0x20];
3416 3416
3417 u8 out_of_sequence[0x20]; 3417 u8 out_of_sequence[0x20];
3418 3418
3419 u8 reserved_7[0x620]; 3419 u8 reserved_at_1e0[0x620];
3420}; 3420};
3421 3421
3422struct mlx5_ifc_query_q_counter_in_bits { 3422struct mlx5_ifc_query_q_counter_in_bits {
3423 u8 opcode[0x10]; 3423 u8 opcode[0x10];
3424 u8 reserved_0[0x10]; 3424 u8 reserved_at_10[0x10];
3425 3425
3426 u8 reserved_1[0x10]; 3426 u8 reserved_at_20[0x10];
3427 u8 op_mod[0x10]; 3427 u8 op_mod[0x10];
3428 3428
3429 u8 reserved_2[0x80]; 3429 u8 reserved_at_40[0x80];
3430 3430
3431 u8 clear[0x1]; 3431 u8 clear[0x1];
3432 u8 reserved_3[0x1f]; 3432 u8 reserved_at_c1[0x1f];
3433 3433
3434 u8 reserved_4[0x18]; 3434 u8 reserved_at_e0[0x18];
3435 u8 counter_set_id[0x8]; 3435 u8 counter_set_id[0x8];
3436}; 3436};
3437 3437
3438struct mlx5_ifc_query_pages_out_bits { 3438struct mlx5_ifc_query_pages_out_bits {
3439 u8 status[0x8]; 3439 u8 status[0x8];
3440 u8 reserved_0[0x18]; 3440 u8 reserved_at_8[0x18];
3441 3441
3442 u8 syndrome[0x20]; 3442 u8 syndrome[0x20];
3443 3443
3444 u8 reserved_1[0x10]; 3444 u8 reserved_at_40[0x10];
3445 u8 function_id[0x10]; 3445 u8 function_id[0x10];
3446 3446
3447 u8 num_pages[0x20]; 3447 u8 num_pages[0x20];
@@ -3455,55 +3455,55 @@ enum {
3455 3455
3456struct mlx5_ifc_query_pages_in_bits { 3456struct mlx5_ifc_query_pages_in_bits {
3457 u8 opcode[0x10]; 3457 u8 opcode[0x10];
3458 u8 reserved_0[0x10]; 3458 u8 reserved_at_10[0x10];
3459 3459
3460 u8 reserved_1[0x10]; 3460 u8 reserved_at_20[0x10];
3461 u8 op_mod[0x10]; 3461 u8 op_mod[0x10];
3462 3462
3463 u8 reserved_2[0x10]; 3463 u8 reserved_at_40[0x10];
3464 u8 function_id[0x10]; 3464 u8 function_id[0x10];
3465 3465
3466 u8 reserved_3[0x20]; 3466 u8 reserved_at_60[0x20];
3467}; 3467};
3468 3468
3469struct mlx5_ifc_query_nic_vport_context_out_bits { 3469struct mlx5_ifc_query_nic_vport_context_out_bits {
3470 u8 status[0x8]; 3470 u8 status[0x8];
3471 u8 reserved_0[0x18]; 3471 u8 reserved_at_8[0x18];
3472 3472
3473 u8 syndrome[0x20]; 3473 u8 syndrome[0x20];
3474 3474
3475 u8 reserved_1[0x40]; 3475 u8 reserved_at_40[0x40];
3476 3476
3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 3477 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
3478}; 3478};
3479 3479
3480struct mlx5_ifc_query_nic_vport_context_in_bits { 3480struct mlx5_ifc_query_nic_vport_context_in_bits {
3481 u8 opcode[0x10]; 3481 u8 opcode[0x10];
3482 u8 reserved_0[0x10]; 3482 u8 reserved_at_10[0x10];
3483 3483
3484 u8 reserved_1[0x10]; 3484 u8 reserved_at_20[0x10];
3485 u8 op_mod[0x10]; 3485 u8 op_mod[0x10];
3486 3486
3487 u8 other_vport[0x1]; 3487 u8 other_vport[0x1];
3488 u8 reserved_2[0xf]; 3488 u8 reserved_at_41[0xf];
3489 u8 vport_number[0x10]; 3489 u8 vport_number[0x10];
3490 3490
3491 u8 reserved_3[0x5]; 3491 u8 reserved_at_60[0x5];
3492 u8 allowed_list_type[0x3]; 3492 u8 allowed_list_type[0x3];
3493 u8 reserved_4[0x18]; 3493 u8 reserved_at_68[0x18];
3494}; 3494};
3495 3495
3496struct mlx5_ifc_query_mkey_out_bits { 3496struct mlx5_ifc_query_mkey_out_bits {
3497 u8 status[0x8]; 3497 u8 status[0x8];
3498 u8 reserved_0[0x18]; 3498 u8 reserved_at_8[0x18];
3499 3499
3500 u8 syndrome[0x20]; 3500 u8 syndrome[0x20];
3501 3501
3502 u8 reserved_1[0x40]; 3502 u8 reserved_at_40[0x40];
3503 3503
3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 3504 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
3505 3505
3506 u8 reserved_2[0x600]; 3506 u8 reserved_at_280[0x600];
3507 3507
3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8]; 3508 u8 bsf0_klm0_pas_mtt0_1[16][0x8];
3509 3509
@@ -3512,265 +3512,265 @@ struct mlx5_ifc_query_mkey_out_bits {
3512 3512
3513struct mlx5_ifc_query_mkey_in_bits { 3513struct mlx5_ifc_query_mkey_in_bits {
3514 u8 opcode[0x10]; 3514 u8 opcode[0x10];
3515 u8 reserved_0[0x10]; 3515 u8 reserved_at_10[0x10];
3516 3516
3517 u8 reserved_1[0x10]; 3517 u8 reserved_at_20[0x10];
3518 u8 op_mod[0x10]; 3518 u8 op_mod[0x10];
3519 3519
3520 u8 reserved_2[0x8]; 3520 u8 reserved_at_40[0x8];
3521 u8 mkey_index[0x18]; 3521 u8 mkey_index[0x18];
3522 3522
3523 u8 pg_access[0x1]; 3523 u8 pg_access[0x1];
3524 u8 reserved_3[0x1f]; 3524 u8 reserved_at_61[0x1f];
3525}; 3525};
3526 3526
3527struct mlx5_ifc_query_mad_demux_out_bits { 3527struct mlx5_ifc_query_mad_demux_out_bits {
3528 u8 status[0x8]; 3528 u8 status[0x8];
3529 u8 reserved_0[0x18]; 3529 u8 reserved_at_8[0x18];
3530 3530
3531 u8 syndrome[0x20]; 3531 u8 syndrome[0x20];
3532 3532
3533 u8 reserved_1[0x40]; 3533 u8 reserved_at_40[0x40];
3534 3534
3535 u8 mad_dumux_parameters_block[0x20]; 3535 u8 mad_dumux_parameters_block[0x20];
3536}; 3536};
3537 3537
3538struct mlx5_ifc_query_mad_demux_in_bits { 3538struct mlx5_ifc_query_mad_demux_in_bits {
3539 u8 opcode[0x10]; 3539 u8 opcode[0x10];
3540 u8 reserved_0[0x10]; 3540 u8 reserved_at_10[0x10];
3541 3541
3542 u8 reserved_1[0x10]; 3542 u8 reserved_at_20[0x10];
3543 u8 op_mod[0x10]; 3543 u8 op_mod[0x10];
3544 3544
3545 u8 reserved_2[0x40]; 3545 u8 reserved_at_40[0x40];
3546}; 3546};
3547 3547
3548struct mlx5_ifc_query_l2_table_entry_out_bits { 3548struct mlx5_ifc_query_l2_table_entry_out_bits {
3549 u8 status[0x8]; 3549 u8 status[0x8];
3550 u8 reserved_0[0x18]; 3550 u8 reserved_at_8[0x18];
3551 3551
3552 u8 syndrome[0x20]; 3552 u8 syndrome[0x20];
3553 3553
3554 u8 reserved_1[0xa0]; 3554 u8 reserved_at_40[0xa0];
3555 3555
3556 u8 reserved_2[0x13]; 3556 u8 reserved_at_e0[0x13];
3557 u8 vlan_valid[0x1]; 3557 u8 vlan_valid[0x1];
3558 u8 vlan[0xc]; 3558 u8 vlan[0xc];
3559 3559
3560 struct mlx5_ifc_mac_address_layout_bits mac_address; 3560 struct mlx5_ifc_mac_address_layout_bits mac_address;
3561 3561
3562 u8 reserved_3[0xc0]; 3562 u8 reserved_at_140[0xc0];
3563}; 3563};
3564 3564
3565struct mlx5_ifc_query_l2_table_entry_in_bits { 3565struct mlx5_ifc_query_l2_table_entry_in_bits {
3566 u8 opcode[0x10]; 3566 u8 opcode[0x10];
3567 u8 reserved_0[0x10]; 3567 u8 reserved_at_10[0x10];
3568 3568
3569 u8 reserved_1[0x10]; 3569 u8 reserved_at_20[0x10];
3570 u8 op_mod[0x10]; 3570 u8 op_mod[0x10];
3571 3571
3572 u8 reserved_2[0x60]; 3572 u8 reserved_at_40[0x60];
3573 3573
3574 u8 reserved_3[0x8]; 3574 u8 reserved_at_a0[0x8];
3575 u8 table_index[0x18]; 3575 u8 table_index[0x18];
3576 3576
3577 u8 reserved_4[0x140]; 3577 u8 reserved_at_c0[0x140];
3578}; 3578};
3579 3579
3580struct mlx5_ifc_query_issi_out_bits { 3580struct mlx5_ifc_query_issi_out_bits {
3581 u8 status[0x8]; 3581 u8 status[0x8];
3582 u8 reserved_0[0x18]; 3582 u8 reserved_at_8[0x18];
3583 3583
3584 u8 syndrome[0x20]; 3584 u8 syndrome[0x20];
3585 3585
3586 u8 reserved_1[0x10]; 3586 u8 reserved_at_40[0x10];
3587 u8 current_issi[0x10]; 3587 u8 current_issi[0x10];
3588 3588
3589 u8 reserved_2[0xa0]; 3589 u8 reserved_at_60[0xa0];
3590 3590
3591 u8 supported_issi_reserved[76][0x8]; 3591 u8 reserved_at_100[76][0x8];
3592 u8 supported_issi_dw0[0x20]; 3592 u8 supported_issi_dw0[0x20];
3593}; 3593};
3594 3594
3595struct mlx5_ifc_query_issi_in_bits { 3595struct mlx5_ifc_query_issi_in_bits {
3596 u8 opcode[0x10]; 3596 u8 opcode[0x10];
3597 u8 reserved_0[0x10]; 3597 u8 reserved_at_10[0x10];
3598 3598
3599 u8 reserved_1[0x10]; 3599 u8 reserved_at_20[0x10];
3600 u8 op_mod[0x10]; 3600 u8 op_mod[0x10];
3601 3601
3602 u8 reserved_2[0x40]; 3602 u8 reserved_at_40[0x40];
3603}; 3603};
3604 3604
3605struct mlx5_ifc_query_hca_vport_pkey_out_bits { 3605struct mlx5_ifc_query_hca_vport_pkey_out_bits {
3606 u8 status[0x8]; 3606 u8 status[0x8];
3607 u8 reserved_0[0x18]; 3607 u8 reserved_at_8[0x18];
3608 3608
3609 u8 syndrome[0x20]; 3609 u8 syndrome[0x20];
3610 3610
3611 u8 reserved_1[0x40]; 3611 u8 reserved_at_40[0x40];
3612 3612
3613 struct mlx5_ifc_pkey_bits pkey[0]; 3613 struct mlx5_ifc_pkey_bits pkey[0];
3614}; 3614};
3615 3615
3616struct mlx5_ifc_query_hca_vport_pkey_in_bits { 3616struct mlx5_ifc_query_hca_vport_pkey_in_bits {
3617 u8 opcode[0x10]; 3617 u8 opcode[0x10];
3618 u8 reserved_0[0x10]; 3618 u8 reserved_at_10[0x10];
3619 3619
3620 u8 reserved_1[0x10]; 3620 u8 reserved_at_20[0x10];
3621 u8 op_mod[0x10]; 3621 u8 op_mod[0x10];
3622 3622
3623 u8 other_vport[0x1]; 3623 u8 other_vport[0x1];
3624 u8 reserved_2[0xb]; 3624 u8 reserved_at_41[0xb];
3625 u8 port_num[0x4]; 3625 u8 port_num[0x4];
3626 u8 vport_number[0x10]; 3626 u8 vport_number[0x10];
3627 3627
3628 u8 reserved_3[0x10]; 3628 u8 reserved_at_60[0x10];
3629 u8 pkey_index[0x10]; 3629 u8 pkey_index[0x10];
3630}; 3630};
3631 3631
3632struct mlx5_ifc_query_hca_vport_gid_out_bits { 3632struct mlx5_ifc_query_hca_vport_gid_out_bits {
3633 u8 status[0x8]; 3633 u8 status[0x8];
3634 u8 reserved_0[0x18]; 3634 u8 reserved_at_8[0x18];
3635 3635
3636 u8 syndrome[0x20]; 3636 u8 syndrome[0x20];
3637 3637
3638 u8 reserved_1[0x20]; 3638 u8 reserved_at_40[0x20];
3639 3639
3640 u8 gids_num[0x10]; 3640 u8 gids_num[0x10];
3641 u8 reserved_2[0x10]; 3641 u8 reserved_at_70[0x10];
3642 3642
3643 struct mlx5_ifc_array128_auto_bits gid[0]; 3643 struct mlx5_ifc_array128_auto_bits gid[0];
3644}; 3644};
3645 3645
3646struct mlx5_ifc_query_hca_vport_gid_in_bits { 3646struct mlx5_ifc_query_hca_vport_gid_in_bits {
3647 u8 opcode[0x10]; 3647 u8 opcode[0x10];
3648 u8 reserved_0[0x10]; 3648 u8 reserved_at_10[0x10];
3649 3649
3650 u8 reserved_1[0x10]; 3650 u8 reserved_at_20[0x10];
3651 u8 op_mod[0x10]; 3651 u8 op_mod[0x10];
3652 3652
3653 u8 other_vport[0x1]; 3653 u8 other_vport[0x1];
3654 u8 reserved_2[0xb]; 3654 u8 reserved_at_41[0xb];
3655 u8 port_num[0x4]; 3655 u8 port_num[0x4];
3656 u8 vport_number[0x10]; 3656 u8 vport_number[0x10];
3657 3657
3658 u8 reserved_3[0x10]; 3658 u8 reserved_at_60[0x10];
3659 u8 gid_index[0x10]; 3659 u8 gid_index[0x10];
3660}; 3660};
3661 3661
3662struct mlx5_ifc_query_hca_vport_context_out_bits { 3662struct mlx5_ifc_query_hca_vport_context_out_bits {
3663 u8 status[0x8]; 3663 u8 status[0x8];
3664 u8 reserved_0[0x18]; 3664 u8 reserved_at_8[0x18];
3665 3665
3666 u8 syndrome[0x20]; 3666 u8 syndrome[0x20];
3667 3667
3668 u8 reserved_1[0x40]; 3668 u8 reserved_at_40[0x40];
3669 3669
3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 3670 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
3671}; 3671};
3672 3672
3673struct mlx5_ifc_query_hca_vport_context_in_bits { 3673struct mlx5_ifc_query_hca_vport_context_in_bits {
3674 u8 opcode[0x10]; 3674 u8 opcode[0x10];
3675 u8 reserved_0[0x10]; 3675 u8 reserved_at_10[0x10];
3676 3676
3677 u8 reserved_1[0x10]; 3677 u8 reserved_at_20[0x10];
3678 u8 op_mod[0x10]; 3678 u8 op_mod[0x10];
3679 3679
3680 u8 other_vport[0x1]; 3680 u8 other_vport[0x1];
3681 u8 reserved_2[0xb]; 3681 u8 reserved_at_41[0xb];
3682 u8 port_num[0x4]; 3682 u8 port_num[0x4];
3683 u8 vport_number[0x10]; 3683 u8 vport_number[0x10];
3684 3684
3685 u8 reserved_3[0x20]; 3685 u8 reserved_at_60[0x20];
3686}; 3686};
3687 3687
3688struct mlx5_ifc_query_hca_cap_out_bits { 3688struct mlx5_ifc_query_hca_cap_out_bits {
3689 u8 status[0x8]; 3689 u8 status[0x8];
3690 u8 reserved_0[0x18]; 3690 u8 reserved_at_8[0x18];
3691 3691
3692 u8 syndrome[0x20]; 3692 u8 syndrome[0x20];
3693 3693
3694 u8 reserved_1[0x40]; 3694 u8 reserved_at_40[0x40];
3695 3695
3696 union mlx5_ifc_hca_cap_union_bits capability; 3696 union mlx5_ifc_hca_cap_union_bits capability;
3697}; 3697};
3698 3698
3699struct mlx5_ifc_query_hca_cap_in_bits { 3699struct mlx5_ifc_query_hca_cap_in_bits {
3700 u8 opcode[0x10]; 3700 u8 opcode[0x10];
3701 u8 reserved_0[0x10]; 3701 u8 reserved_at_10[0x10];
3702 3702
3703 u8 reserved_1[0x10]; 3703 u8 reserved_at_20[0x10];
3704 u8 op_mod[0x10]; 3704 u8 op_mod[0x10];
3705 3705
3706 u8 reserved_2[0x40]; 3706 u8 reserved_at_40[0x40];
3707}; 3707};
3708 3708
3709struct mlx5_ifc_query_flow_table_out_bits { 3709struct mlx5_ifc_query_flow_table_out_bits {
3710 u8 status[0x8]; 3710 u8 status[0x8];
3711 u8 reserved_0[0x18]; 3711 u8 reserved_at_8[0x18];
3712 3712
3713 u8 syndrome[0x20]; 3713 u8 syndrome[0x20];
3714 3714
3715 u8 reserved_1[0x80]; 3715 u8 reserved_at_40[0x80];
3716 3716
3717 u8 reserved_2[0x8]; 3717 u8 reserved_at_c0[0x8];
3718 u8 level[0x8]; 3718 u8 level[0x8];
3719 u8 reserved_3[0x8]; 3719 u8 reserved_at_d0[0x8];
3720 u8 log_size[0x8]; 3720 u8 log_size[0x8];
3721 3721
3722 u8 reserved_4[0x120]; 3722 u8 reserved_at_e0[0x120];
3723}; 3723};
3724 3724
3725struct mlx5_ifc_query_flow_table_in_bits { 3725struct mlx5_ifc_query_flow_table_in_bits {
3726 u8 opcode[0x10]; 3726 u8 opcode[0x10];
3727 u8 reserved_0[0x10]; 3727 u8 reserved_at_10[0x10];
3728 3728
3729 u8 reserved_1[0x10]; 3729 u8 reserved_at_20[0x10];
3730 u8 op_mod[0x10]; 3730 u8 op_mod[0x10];
3731 3731
3732 u8 reserved_2[0x40]; 3732 u8 reserved_at_40[0x40];
3733 3733
3734 u8 table_type[0x8]; 3734 u8 table_type[0x8];
3735 u8 reserved_3[0x18]; 3735 u8 reserved_at_88[0x18];
3736 3736
3737 u8 reserved_4[0x8]; 3737 u8 reserved_at_a0[0x8];
3738 u8 table_id[0x18]; 3738 u8 table_id[0x18];
3739 3739
3740 u8 reserved_5[0x140]; 3740 u8 reserved_at_c0[0x140];
3741}; 3741};
3742 3742
3743struct mlx5_ifc_query_fte_out_bits { 3743struct mlx5_ifc_query_fte_out_bits {
3744 u8 status[0x8]; 3744 u8 status[0x8];
3745 u8 reserved_0[0x18]; 3745 u8 reserved_at_8[0x18];
3746 3746
3747 u8 syndrome[0x20]; 3747 u8 syndrome[0x20];
3748 3748
3749 u8 reserved_1[0x1c0]; 3749 u8 reserved_at_40[0x1c0];
3750 3750
3751 struct mlx5_ifc_flow_context_bits flow_context; 3751 struct mlx5_ifc_flow_context_bits flow_context;
3752}; 3752};
3753 3753
3754struct mlx5_ifc_query_fte_in_bits { 3754struct mlx5_ifc_query_fte_in_bits {
3755 u8 opcode[0x10]; 3755 u8 opcode[0x10];
3756 u8 reserved_0[0x10]; 3756 u8 reserved_at_10[0x10];
3757 3757
3758 u8 reserved_1[0x10]; 3758 u8 reserved_at_20[0x10];
3759 u8 op_mod[0x10]; 3759 u8 op_mod[0x10];
3760 3760
3761 u8 reserved_2[0x40]; 3761 u8 reserved_at_40[0x40];
3762 3762
3763 u8 table_type[0x8]; 3763 u8 table_type[0x8];
3764 u8 reserved_3[0x18]; 3764 u8 reserved_at_88[0x18];
3765 3765
3766 u8 reserved_4[0x8]; 3766 u8 reserved_at_a0[0x8];
3767 u8 table_id[0x18]; 3767 u8 table_id[0x18];
3768 3768
3769 u8 reserved_5[0x40]; 3769 u8 reserved_at_c0[0x40];
3770 3770
3771 u8 flow_index[0x20]; 3771 u8 flow_index[0x20];
3772 3772
3773 u8 reserved_6[0xe0]; 3773 u8 reserved_at_120[0xe0];
3774}; 3774};
3775 3775
3776enum { 3776enum {
@@ -3781,84 +3781,84 @@ enum {
3781 3781
3782struct mlx5_ifc_query_flow_group_out_bits { 3782struct mlx5_ifc_query_flow_group_out_bits {
3783 u8 status[0x8]; 3783 u8 status[0x8];
3784 u8 reserved_0[0x18]; 3784 u8 reserved_at_8[0x18];
3785 3785
3786 u8 syndrome[0x20]; 3786 u8 syndrome[0x20];
3787 3787
3788 u8 reserved_1[0xa0]; 3788 u8 reserved_at_40[0xa0];
3789 3789
3790 u8 start_flow_index[0x20]; 3790 u8 start_flow_index[0x20];
3791 3791
3792 u8 reserved_2[0x20]; 3792 u8 reserved_at_100[0x20];
3793 3793
3794 u8 end_flow_index[0x20]; 3794 u8 end_flow_index[0x20];
3795 3795
3796 u8 reserved_3[0xa0]; 3796 u8 reserved_at_140[0xa0];
3797 3797
3798 u8 reserved_4[0x18]; 3798 u8 reserved_at_1e0[0x18];
3799 u8 match_criteria_enable[0x8]; 3799 u8 match_criteria_enable[0x8];
3800 3800
3801 struct mlx5_ifc_fte_match_param_bits match_criteria; 3801 struct mlx5_ifc_fte_match_param_bits match_criteria;
3802 3802
3803 u8 reserved_5[0xe00]; 3803 u8 reserved_at_1200[0xe00];
3804}; 3804};
3805 3805
3806struct mlx5_ifc_query_flow_group_in_bits { 3806struct mlx5_ifc_query_flow_group_in_bits {
3807 u8 opcode[0x10]; 3807 u8 opcode[0x10];
3808 u8 reserved_0[0x10]; 3808 u8 reserved_at_10[0x10];
3809 3809
3810 u8 reserved_1[0x10]; 3810 u8 reserved_at_20[0x10];
3811 u8 op_mod[0x10]; 3811 u8 op_mod[0x10];
3812 3812
3813 u8 reserved_2[0x40]; 3813 u8 reserved_at_40[0x40];
3814 3814
3815 u8 table_type[0x8]; 3815 u8 table_type[0x8];
3816 u8 reserved_3[0x18]; 3816 u8 reserved_at_88[0x18];
3817 3817
3818 u8 reserved_4[0x8]; 3818 u8 reserved_at_a0[0x8];
3819 u8 table_id[0x18]; 3819 u8 table_id[0x18];
3820 3820
3821 u8 group_id[0x20]; 3821 u8 group_id[0x20];
3822 3822
3823 u8 reserved_5[0x120]; 3823 u8 reserved_at_e0[0x120];
3824}; 3824};
3825 3825
3826struct mlx5_ifc_query_esw_vport_context_out_bits { 3826struct mlx5_ifc_query_esw_vport_context_out_bits {
3827 u8 status[0x8]; 3827 u8 status[0x8];
3828 u8 reserved_0[0x18]; 3828 u8 reserved_at_8[0x18];
3829 3829
3830 u8 syndrome[0x20]; 3830 u8 syndrome[0x20];
3831 3831
3832 u8 reserved_1[0x40]; 3832 u8 reserved_at_40[0x40];
3833 3833
3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context; 3834 struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
3835}; 3835};
3836 3836
3837struct mlx5_ifc_query_esw_vport_context_in_bits { 3837struct mlx5_ifc_query_esw_vport_context_in_bits {
3838 u8 opcode[0x10]; 3838 u8 opcode[0x10];
3839 u8 reserved_0[0x10]; 3839 u8 reserved_at_10[0x10];
3840 3840
3841 u8 reserved_1[0x10]; 3841 u8 reserved_at_20[0x10];
3842 u8 op_mod[0x10]; 3842 u8 op_mod[0x10];
3843 3843
3844 u8 other_vport[0x1]; 3844 u8 other_vport[0x1];
3845 u8 reserved_2[0xf]; 3845 u8 reserved_at_41[0xf];
3846 u8 vport_number[0x10]; 3846 u8 vport_number[0x10];
3847 3847
3848 u8 reserved_3[0x20]; 3848 u8 reserved_at_60[0x20];
3849}; 3849};
3850 3850
3851struct mlx5_ifc_modify_esw_vport_context_out_bits { 3851struct mlx5_ifc_modify_esw_vport_context_out_bits {
3852 u8 status[0x8]; 3852 u8 status[0x8];
3853 u8 reserved_0[0x18]; 3853 u8 reserved_at_8[0x18];
3854 3854
3855 u8 syndrome[0x20]; 3855 u8 syndrome[0x20];
3856 3856
3857 u8 reserved_1[0x40]; 3857 u8 reserved_at_40[0x40];
3858}; 3858};
3859 3859
3860struct mlx5_ifc_esw_vport_context_fields_select_bits { 3860struct mlx5_ifc_esw_vport_context_fields_select_bits {
3861 u8 reserved[0x1c]; 3861 u8 reserved_at_0[0x1c];
3862 u8 vport_cvlan_insert[0x1]; 3862 u8 vport_cvlan_insert[0x1];
3863 u8 vport_svlan_insert[0x1]; 3863 u8 vport_svlan_insert[0x1];
3864 u8 vport_cvlan_strip[0x1]; 3864 u8 vport_cvlan_strip[0x1];
@@ -3867,13 +3867,13 @@ struct mlx5_ifc_esw_vport_context_fields_select_bits {
3867 3867
3868struct mlx5_ifc_modify_esw_vport_context_in_bits { 3868struct mlx5_ifc_modify_esw_vport_context_in_bits {
3869 u8 opcode[0x10]; 3869 u8 opcode[0x10];
3870 u8 reserved_0[0x10]; 3870 u8 reserved_at_10[0x10];
3871 3871
3872 u8 reserved_1[0x10]; 3872 u8 reserved_at_20[0x10];
3873 u8 op_mod[0x10]; 3873 u8 op_mod[0x10];
3874 3874
3875 u8 other_vport[0x1]; 3875 u8 other_vport[0x1];
3876 u8 reserved_2[0xf]; 3876 u8 reserved_at_41[0xf];
3877 u8 vport_number[0x10]; 3877 u8 vport_number[0x10];
3878 3878
3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select; 3879 struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
@@ -3883,124 +3883,124 @@ struct mlx5_ifc_modify_esw_vport_context_in_bits {
3883 3883
3884struct mlx5_ifc_query_eq_out_bits { 3884struct mlx5_ifc_query_eq_out_bits {
3885 u8 status[0x8]; 3885 u8 status[0x8];
3886 u8 reserved_0[0x18]; 3886 u8 reserved_at_8[0x18];
3887 3887
3888 u8 syndrome[0x20]; 3888 u8 syndrome[0x20];
3889 3889
3890 u8 reserved_1[0x40]; 3890 u8 reserved_at_40[0x40];
3891 3891
3892 struct mlx5_ifc_eqc_bits eq_context_entry; 3892 struct mlx5_ifc_eqc_bits eq_context_entry;
3893 3893
3894 u8 reserved_2[0x40]; 3894 u8 reserved_at_280[0x40];
3895 3895
3896 u8 event_bitmask[0x40]; 3896 u8 event_bitmask[0x40];
3897 3897
3898 u8 reserved_3[0x580]; 3898 u8 reserved_at_300[0x580];
3899 3899
3900 u8 pas[0][0x40]; 3900 u8 pas[0][0x40];
3901}; 3901};
3902 3902
3903struct mlx5_ifc_query_eq_in_bits { 3903struct mlx5_ifc_query_eq_in_bits {
3904 u8 opcode[0x10]; 3904 u8 opcode[0x10];
3905 u8 reserved_0[0x10]; 3905 u8 reserved_at_10[0x10];
3906 3906
3907 u8 reserved_1[0x10]; 3907 u8 reserved_at_20[0x10];
3908 u8 op_mod[0x10]; 3908 u8 op_mod[0x10];
3909 3909
3910 u8 reserved_2[0x18]; 3910 u8 reserved_at_40[0x18];
3911 u8 eq_number[0x8]; 3911 u8 eq_number[0x8];
3912 3912
3913 u8 reserved_3[0x20]; 3913 u8 reserved_at_60[0x20];
3914}; 3914};
3915 3915
3916struct mlx5_ifc_query_dct_out_bits { 3916struct mlx5_ifc_query_dct_out_bits {
3917 u8 status[0x8]; 3917 u8 status[0x8];
3918 u8 reserved_0[0x18]; 3918 u8 reserved_at_8[0x18];
3919 3919
3920 u8 syndrome[0x20]; 3920 u8 syndrome[0x20];
3921 3921
3922 u8 reserved_1[0x40]; 3922 u8 reserved_at_40[0x40];
3923 3923
3924 struct mlx5_ifc_dctc_bits dct_context_entry; 3924 struct mlx5_ifc_dctc_bits dct_context_entry;
3925 3925
3926 u8 reserved_2[0x180]; 3926 u8 reserved_at_280[0x180];
3927}; 3927};
3928 3928
3929struct mlx5_ifc_query_dct_in_bits { 3929struct mlx5_ifc_query_dct_in_bits {
3930 u8 opcode[0x10]; 3930 u8 opcode[0x10];
3931 u8 reserved_0[0x10]; 3931 u8 reserved_at_10[0x10];
3932 3932
3933 u8 reserved_1[0x10]; 3933 u8 reserved_at_20[0x10];
3934 u8 op_mod[0x10]; 3934 u8 op_mod[0x10];
3935 3935
3936 u8 reserved_2[0x8]; 3936 u8 reserved_at_40[0x8];
3937 u8 dctn[0x18]; 3937 u8 dctn[0x18];
3938 3938
3939 u8 reserved_3[0x20]; 3939 u8 reserved_at_60[0x20];
3940}; 3940};
3941 3941
3942struct mlx5_ifc_query_cq_out_bits { 3942struct mlx5_ifc_query_cq_out_bits {
3943 u8 status[0x8]; 3943 u8 status[0x8];
3944 u8 reserved_0[0x18]; 3944 u8 reserved_at_8[0x18];
3945 3945
3946 u8 syndrome[0x20]; 3946 u8 syndrome[0x20];
3947 3947
3948 u8 reserved_1[0x40]; 3948 u8 reserved_at_40[0x40];
3949 3949
3950 struct mlx5_ifc_cqc_bits cq_context; 3950 struct mlx5_ifc_cqc_bits cq_context;
3951 3951
3952 u8 reserved_2[0x600]; 3952 u8 reserved_at_280[0x600];
3953 3953
3954 u8 pas[0][0x40]; 3954 u8 pas[0][0x40];
3955}; 3955};
3956 3956
3957struct mlx5_ifc_query_cq_in_bits { 3957struct mlx5_ifc_query_cq_in_bits {
3958 u8 opcode[0x10]; 3958 u8 opcode[0x10];
3959 u8 reserved_0[0x10]; 3959 u8 reserved_at_10[0x10];
3960 3960
3961 u8 reserved_1[0x10]; 3961 u8 reserved_at_20[0x10];
3962 u8 op_mod[0x10]; 3962 u8 op_mod[0x10];
3963 3963
3964 u8 reserved_2[0x8]; 3964 u8 reserved_at_40[0x8];
3965 u8 cqn[0x18]; 3965 u8 cqn[0x18];
3966 3966
3967 u8 reserved_3[0x20]; 3967 u8 reserved_at_60[0x20];
3968}; 3968};
3969 3969
3970struct mlx5_ifc_query_cong_status_out_bits { 3970struct mlx5_ifc_query_cong_status_out_bits {
3971 u8 status[0x8]; 3971 u8 status[0x8];
3972 u8 reserved_0[0x18]; 3972 u8 reserved_at_8[0x18];
3973 3973
3974 u8 syndrome[0x20]; 3974 u8 syndrome[0x20];
3975 3975
3976 u8 reserved_1[0x20]; 3976 u8 reserved_at_40[0x20];
3977 3977
3978 u8 enable[0x1]; 3978 u8 enable[0x1];
3979 u8 tag_enable[0x1]; 3979 u8 tag_enable[0x1];
3980 u8 reserved_2[0x1e]; 3980 u8 reserved_at_62[0x1e];
3981}; 3981};
3982 3982
3983struct mlx5_ifc_query_cong_status_in_bits { 3983struct mlx5_ifc_query_cong_status_in_bits {
3984 u8 opcode[0x10]; 3984 u8 opcode[0x10];
3985 u8 reserved_0[0x10]; 3985 u8 reserved_at_10[0x10];
3986 3986
3987 u8 reserved_1[0x10]; 3987 u8 reserved_at_20[0x10];
3988 u8 op_mod[0x10]; 3988 u8 op_mod[0x10];
3989 3989
3990 u8 reserved_2[0x18]; 3990 u8 reserved_at_40[0x18];
3991 u8 priority[0x4]; 3991 u8 priority[0x4];
3992 u8 cong_protocol[0x4]; 3992 u8 cong_protocol[0x4];
3993 3993
3994 u8 reserved_3[0x20]; 3994 u8 reserved_at_60[0x20];
3995}; 3995};
3996 3996
3997struct mlx5_ifc_query_cong_statistics_out_bits { 3997struct mlx5_ifc_query_cong_statistics_out_bits {
3998 u8 status[0x8]; 3998 u8 status[0x8];
3999 u8 reserved_0[0x18]; 3999 u8 reserved_at_8[0x18];
4000 4000
4001 u8 syndrome[0x20]; 4001 u8 syndrome[0x20];
4002 4002
4003 u8 reserved_1[0x40]; 4003 u8 reserved_at_40[0x40];
4004 4004
4005 u8 cur_flows[0x20]; 4005 u8 cur_flows[0x20];
4006 4006
@@ -4014,7 +4014,7 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4014 4014
4015 u8 cnp_handled_low[0x20]; 4015 u8 cnp_handled_low[0x20];
4016 4016
4017 u8 reserved_2[0x100]; 4017 u8 reserved_at_140[0x100];
4018 4018
4019 u8 time_stamp_high[0x20]; 4019 u8 time_stamp_high[0x20];
4020 4020
@@ -4030,453 +4030,453 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
4030 4030
4031 u8 cnps_sent_low[0x20]; 4031 u8 cnps_sent_low[0x20];
4032 4032
4033 u8 reserved_3[0x560]; 4033 u8 reserved_at_320[0x560];
4034}; 4034};
4035 4035
4036struct mlx5_ifc_query_cong_statistics_in_bits { 4036struct mlx5_ifc_query_cong_statistics_in_bits {
4037 u8 opcode[0x10]; 4037 u8 opcode[0x10];
4038 u8 reserved_0[0x10]; 4038 u8 reserved_at_10[0x10];
4039 4039
4040 u8 reserved_1[0x10]; 4040 u8 reserved_at_20[0x10];
4041 u8 op_mod[0x10]; 4041 u8 op_mod[0x10];
4042 4042
4043 u8 clear[0x1]; 4043 u8 clear[0x1];
4044 u8 reserved_2[0x1f]; 4044 u8 reserved_at_41[0x1f];
4045 4045
4046 u8 reserved_3[0x20]; 4046 u8 reserved_at_60[0x20];
4047}; 4047};
4048 4048
4049struct mlx5_ifc_query_cong_params_out_bits { 4049struct mlx5_ifc_query_cong_params_out_bits {
4050 u8 status[0x8]; 4050 u8 status[0x8];
4051 u8 reserved_0[0x18]; 4051 u8 reserved_at_8[0x18];
4052 4052
4053 u8 syndrome[0x20]; 4053 u8 syndrome[0x20];
4054 4054
4055 u8 reserved_1[0x40]; 4055 u8 reserved_at_40[0x40];
4056 4056
4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4057 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4058}; 4058};
4059 4059
4060struct mlx5_ifc_query_cong_params_in_bits { 4060struct mlx5_ifc_query_cong_params_in_bits {
4061 u8 opcode[0x10]; 4061 u8 opcode[0x10];
4062 u8 reserved_0[0x10]; 4062 u8 reserved_at_10[0x10];
4063 4063
4064 u8 reserved_1[0x10]; 4064 u8 reserved_at_20[0x10];
4065 u8 op_mod[0x10]; 4065 u8 op_mod[0x10];
4066 4066
4067 u8 reserved_2[0x1c]; 4067 u8 reserved_at_40[0x1c];
4068 u8 cong_protocol[0x4]; 4068 u8 cong_protocol[0x4];
4069 4069
4070 u8 reserved_3[0x20]; 4070 u8 reserved_at_60[0x20];
4071}; 4071};
4072 4072
4073struct mlx5_ifc_query_adapter_out_bits { 4073struct mlx5_ifc_query_adapter_out_bits {
4074 u8 status[0x8]; 4074 u8 status[0x8];
4075 u8 reserved_0[0x18]; 4075 u8 reserved_at_8[0x18];
4076 4076
4077 u8 syndrome[0x20]; 4077 u8 syndrome[0x20];
4078 4078
4079 u8 reserved_1[0x40]; 4079 u8 reserved_at_40[0x40];
4080 4080
4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; 4081 struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
4082}; 4082};
4083 4083
4084struct mlx5_ifc_query_adapter_in_bits { 4084struct mlx5_ifc_query_adapter_in_bits {
4085 u8 opcode[0x10]; 4085 u8 opcode[0x10];
4086 u8 reserved_0[0x10]; 4086 u8 reserved_at_10[0x10];
4087 4087
4088 u8 reserved_1[0x10]; 4088 u8 reserved_at_20[0x10];
4089 u8 op_mod[0x10]; 4089 u8 op_mod[0x10];
4090 4090
4091 u8 reserved_2[0x40]; 4091 u8 reserved_at_40[0x40];
4092}; 4092};
4093 4093
4094struct mlx5_ifc_qp_2rst_out_bits { 4094struct mlx5_ifc_qp_2rst_out_bits {
4095 u8 status[0x8]; 4095 u8 status[0x8];
4096 u8 reserved_0[0x18]; 4096 u8 reserved_at_8[0x18];
4097 4097
4098 u8 syndrome[0x20]; 4098 u8 syndrome[0x20];
4099 4099
4100 u8 reserved_1[0x40]; 4100 u8 reserved_at_40[0x40];
4101}; 4101};
4102 4102
4103struct mlx5_ifc_qp_2rst_in_bits { 4103struct mlx5_ifc_qp_2rst_in_bits {
4104 u8 opcode[0x10]; 4104 u8 opcode[0x10];
4105 u8 reserved_0[0x10]; 4105 u8 reserved_at_10[0x10];
4106 4106
4107 u8 reserved_1[0x10]; 4107 u8 reserved_at_20[0x10];
4108 u8 op_mod[0x10]; 4108 u8 op_mod[0x10];
4109 4109
4110 u8 reserved_2[0x8]; 4110 u8 reserved_at_40[0x8];
4111 u8 qpn[0x18]; 4111 u8 qpn[0x18];
4112 4112
4113 u8 reserved_3[0x20]; 4113 u8 reserved_at_60[0x20];
4114}; 4114};
4115 4115
4116struct mlx5_ifc_qp_2err_out_bits { 4116struct mlx5_ifc_qp_2err_out_bits {
4117 u8 status[0x8]; 4117 u8 status[0x8];
4118 u8 reserved_0[0x18]; 4118 u8 reserved_at_8[0x18];
4119 4119
4120 u8 syndrome[0x20]; 4120 u8 syndrome[0x20];
4121 4121
4122 u8 reserved_1[0x40]; 4122 u8 reserved_at_40[0x40];
4123}; 4123};
4124 4124
4125struct mlx5_ifc_qp_2err_in_bits { 4125struct mlx5_ifc_qp_2err_in_bits {
4126 u8 opcode[0x10]; 4126 u8 opcode[0x10];
4127 u8 reserved_0[0x10]; 4127 u8 reserved_at_10[0x10];
4128 4128
4129 u8 reserved_1[0x10]; 4129 u8 reserved_at_20[0x10];
4130 u8 op_mod[0x10]; 4130 u8 op_mod[0x10];
4131 4131
4132 u8 reserved_2[0x8]; 4132 u8 reserved_at_40[0x8];
4133 u8 qpn[0x18]; 4133 u8 qpn[0x18];
4134 4134
4135 u8 reserved_3[0x20]; 4135 u8 reserved_at_60[0x20];
4136}; 4136};
4137 4137
4138struct mlx5_ifc_page_fault_resume_out_bits { 4138struct mlx5_ifc_page_fault_resume_out_bits {
4139 u8 status[0x8]; 4139 u8 status[0x8];
4140 u8 reserved_0[0x18]; 4140 u8 reserved_at_8[0x18];
4141 4141
4142 u8 syndrome[0x20]; 4142 u8 syndrome[0x20];
4143 4143
4144 u8 reserved_1[0x40]; 4144 u8 reserved_at_40[0x40];
4145}; 4145};
4146 4146
4147struct mlx5_ifc_page_fault_resume_in_bits { 4147struct mlx5_ifc_page_fault_resume_in_bits {
4148 u8 opcode[0x10]; 4148 u8 opcode[0x10];
4149 u8 reserved_0[0x10]; 4149 u8 reserved_at_10[0x10];
4150 4150
4151 u8 reserved_1[0x10]; 4151 u8 reserved_at_20[0x10];
4152 u8 op_mod[0x10]; 4152 u8 op_mod[0x10];
4153 4153
4154 u8 error[0x1]; 4154 u8 error[0x1];
4155 u8 reserved_2[0x4]; 4155 u8 reserved_at_41[0x4];
4156 u8 rdma[0x1]; 4156 u8 rdma[0x1];
4157 u8 read_write[0x1]; 4157 u8 read_write[0x1];
4158 u8 req_res[0x1]; 4158 u8 req_res[0x1];
4159 u8 qpn[0x18]; 4159 u8 qpn[0x18];
4160 4160
4161 u8 reserved_3[0x20]; 4161 u8 reserved_at_60[0x20];
4162}; 4162};
4163 4163
4164struct mlx5_ifc_nop_out_bits { 4164struct mlx5_ifc_nop_out_bits {
4165 u8 status[0x8]; 4165 u8 status[0x8];
4166 u8 reserved_0[0x18]; 4166 u8 reserved_at_8[0x18];
4167 4167
4168 u8 syndrome[0x20]; 4168 u8 syndrome[0x20];
4169 4169
4170 u8 reserved_1[0x40]; 4170 u8 reserved_at_40[0x40];
4171}; 4171};
4172 4172
4173struct mlx5_ifc_nop_in_bits { 4173struct mlx5_ifc_nop_in_bits {
4174 u8 opcode[0x10]; 4174 u8 opcode[0x10];
4175 u8 reserved_0[0x10]; 4175 u8 reserved_at_10[0x10];
4176 4176
4177 u8 reserved_1[0x10]; 4177 u8 reserved_at_20[0x10];
4178 u8 op_mod[0x10]; 4178 u8 op_mod[0x10];
4179 4179
4180 u8 reserved_2[0x40]; 4180 u8 reserved_at_40[0x40];
4181}; 4181};
4182 4182
4183struct mlx5_ifc_modify_vport_state_out_bits { 4183struct mlx5_ifc_modify_vport_state_out_bits {
4184 u8 status[0x8]; 4184 u8 status[0x8];
4185 u8 reserved_0[0x18]; 4185 u8 reserved_at_8[0x18];
4186 4186
4187 u8 syndrome[0x20]; 4187 u8 syndrome[0x20];
4188 4188
4189 u8 reserved_1[0x40]; 4189 u8 reserved_at_40[0x40];
4190}; 4190};
4191 4191
4192struct mlx5_ifc_modify_vport_state_in_bits { 4192struct mlx5_ifc_modify_vport_state_in_bits {
4193 u8 opcode[0x10]; 4193 u8 opcode[0x10];
4194 u8 reserved_0[0x10]; 4194 u8 reserved_at_10[0x10];
4195 4195
4196 u8 reserved_1[0x10]; 4196 u8 reserved_at_20[0x10];
4197 u8 op_mod[0x10]; 4197 u8 op_mod[0x10];
4198 4198
4199 u8 other_vport[0x1]; 4199 u8 other_vport[0x1];
4200 u8 reserved_2[0xf]; 4200 u8 reserved_at_41[0xf];
4201 u8 vport_number[0x10]; 4201 u8 vport_number[0x10];
4202 4202
4203 u8 reserved_3[0x18]; 4203 u8 reserved_at_60[0x18];
4204 u8 admin_state[0x4]; 4204 u8 admin_state[0x4];
4205 u8 reserved_4[0x4]; 4205 u8 reserved_at_7c[0x4];
4206}; 4206};
4207 4207
4208struct mlx5_ifc_modify_tis_out_bits { 4208struct mlx5_ifc_modify_tis_out_bits {
4209 u8 status[0x8]; 4209 u8 status[0x8];
4210 u8 reserved_0[0x18]; 4210 u8 reserved_at_8[0x18];
4211 4211
4212 u8 syndrome[0x20]; 4212 u8 syndrome[0x20];
4213 4213
4214 u8 reserved_1[0x40]; 4214 u8 reserved_at_40[0x40];
4215}; 4215};
4216 4216
4217struct mlx5_ifc_modify_tis_bitmask_bits { 4217struct mlx5_ifc_modify_tis_bitmask_bits {
4218 u8 reserved_0[0x20]; 4218 u8 reserved_at_0[0x20];
4219 4219
4220 u8 reserved_1[0x1f]; 4220 u8 reserved_at_20[0x1f];
4221 u8 prio[0x1]; 4221 u8 prio[0x1];
4222}; 4222};
4223 4223
4224struct mlx5_ifc_modify_tis_in_bits { 4224struct mlx5_ifc_modify_tis_in_bits {
4225 u8 opcode[0x10]; 4225 u8 opcode[0x10];
4226 u8 reserved_0[0x10]; 4226 u8 reserved_at_10[0x10];
4227 4227
4228 u8 reserved_1[0x10]; 4228 u8 reserved_at_20[0x10];
4229 u8 op_mod[0x10]; 4229 u8 op_mod[0x10];
4230 4230
4231 u8 reserved_2[0x8]; 4231 u8 reserved_at_40[0x8];
4232 u8 tisn[0x18]; 4232 u8 tisn[0x18];
4233 4233
4234 u8 reserved_3[0x20]; 4234 u8 reserved_at_60[0x20];
4235 4235
4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask; 4236 struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
4237 4237
4238 u8 reserved_4[0x40]; 4238 u8 reserved_at_c0[0x40];
4239 4239
4240 struct mlx5_ifc_tisc_bits ctx; 4240 struct mlx5_ifc_tisc_bits ctx;
4241}; 4241};
4242 4242
4243struct mlx5_ifc_modify_tir_bitmask_bits { 4243struct mlx5_ifc_modify_tir_bitmask_bits {
4244 u8 reserved_0[0x20]; 4244 u8 reserved_at_0[0x20];
4245 4245
4246 u8 reserved_1[0x1b]; 4246 u8 reserved_at_20[0x1b];
4247 u8 self_lb_en[0x1]; 4247 u8 self_lb_en[0x1];
4248 u8 reserved_2[0x3]; 4248 u8 reserved_at_3c[0x3];
4249 u8 lro[0x1]; 4249 u8 lro[0x1];
4250}; 4250};
4251 4251
4252struct mlx5_ifc_modify_tir_out_bits { 4252struct mlx5_ifc_modify_tir_out_bits {
4253 u8 status[0x8]; 4253 u8 status[0x8];
4254 u8 reserved_0[0x18]; 4254 u8 reserved_at_8[0x18];
4255 4255
4256 u8 syndrome[0x20]; 4256 u8 syndrome[0x20];
4257 4257
4258 u8 reserved_1[0x40]; 4258 u8 reserved_at_40[0x40];
4259}; 4259};
4260 4260
4261struct mlx5_ifc_modify_tir_in_bits { 4261struct mlx5_ifc_modify_tir_in_bits {
4262 u8 opcode[0x10]; 4262 u8 opcode[0x10];
4263 u8 reserved_0[0x10]; 4263 u8 reserved_at_10[0x10];
4264 4264
4265 u8 reserved_1[0x10]; 4265 u8 reserved_at_20[0x10];
4266 u8 op_mod[0x10]; 4266 u8 op_mod[0x10];
4267 4267
4268 u8 reserved_2[0x8]; 4268 u8 reserved_at_40[0x8];
4269 u8 tirn[0x18]; 4269 u8 tirn[0x18];
4270 4270
4271 u8 reserved_3[0x20]; 4271 u8 reserved_at_60[0x20];
4272 4272
4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask; 4273 struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
4274 4274
4275 u8 reserved_4[0x40]; 4275 u8 reserved_at_c0[0x40];
4276 4276
4277 struct mlx5_ifc_tirc_bits ctx; 4277 struct mlx5_ifc_tirc_bits ctx;
4278}; 4278};
4279 4279
4280struct mlx5_ifc_modify_sq_out_bits { 4280struct mlx5_ifc_modify_sq_out_bits {
4281 u8 status[0x8]; 4281 u8 status[0x8];
4282 u8 reserved_0[0x18]; 4282 u8 reserved_at_8[0x18];
4283 4283
4284 u8 syndrome[0x20]; 4284 u8 syndrome[0x20];
4285 4285
4286 u8 reserved_1[0x40]; 4286 u8 reserved_at_40[0x40];
4287}; 4287};
4288 4288
4289struct mlx5_ifc_modify_sq_in_bits { 4289struct mlx5_ifc_modify_sq_in_bits {
4290 u8 opcode[0x10]; 4290 u8 opcode[0x10];
4291 u8 reserved_0[0x10]; 4291 u8 reserved_at_10[0x10];
4292 4292
4293 u8 reserved_1[0x10]; 4293 u8 reserved_at_20[0x10];
4294 u8 op_mod[0x10]; 4294 u8 op_mod[0x10];
4295 4295
4296 u8 sq_state[0x4]; 4296 u8 sq_state[0x4];
4297 u8 reserved_2[0x4]; 4297 u8 reserved_at_44[0x4];
4298 u8 sqn[0x18]; 4298 u8 sqn[0x18];
4299 4299
4300 u8 reserved_3[0x20]; 4300 u8 reserved_at_60[0x20];
4301 4301
4302 u8 modify_bitmask[0x40]; 4302 u8 modify_bitmask[0x40];
4303 4303
4304 u8 reserved_4[0x40]; 4304 u8 reserved_at_c0[0x40];
4305 4305
4306 struct mlx5_ifc_sqc_bits ctx; 4306 struct mlx5_ifc_sqc_bits ctx;
4307}; 4307};
4308 4308
4309struct mlx5_ifc_modify_rqt_out_bits { 4309struct mlx5_ifc_modify_rqt_out_bits {
4310 u8 status[0x8]; 4310 u8 status[0x8];
4311 u8 reserved_0[0x18]; 4311 u8 reserved_at_8[0x18];
4312 4312
4313 u8 syndrome[0x20]; 4313 u8 syndrome[0x20];
4314 4314
4315 u8 reserved_1[0x40]; 4315 u8 reserved_at_40[0x40];
4316}; 4316};
4317 4317
4318struct mlx5_ifc_rqt_bitmask_bits { 4318struct mlx5_ifc_rqt_bitmask_bits {
4319 u8 reserved[0x20]; 4319 u8 reserved_at_0[0x20];
4320 4320
4321 u8 reserved1[0x1f]; 4321 u8 reserved_at_20[0x1f];
4322 u8 rqn_list[0x1]; 4322 u8 rqn_list[0x1];
4323}; 4323};
4324 4324
4325struct mlx5_ifc_modify_rqt_in_bits { 4325struct mlx5_ifc_modify_rqt_in_bits {
4326 u8 opcode[0x10]; 4326 u8 opcode[0x10];
4327 u8 reserved_0[0x10]; 4327 u8 reserved_at_10[0x10];
4328 4328
4329 u8 reserved_1[0x10]; 4329 u8 reserved_at_20[0x10];
4330 u8 op_mod[0x10]; 4330 u8 op_mod[0x10];
4331 4331
4332 u8 reserved_2[0x8]; 4332 u8 reserved_at_40[0x8];
4333 u8 rqtn[0x18]; 4333 u8 rqtn[0x18];
4334 4334
4335 u8 reserved_3[0x20]; 4335 u8 reserved_at_60[0x20];
4336 4336
4337 struct mlx5_ifc_rqt_bitmask_bits bitmask; 4337 struct mlx5_ifc_rqt_bitmask_bits bitmask;
4338 4338
4339 u8 reserved_4[0x40]; 4339 u8 reserved_at_c0[0x40];
4340 4340
4341 struct mlx5_ifc_rqtc_bits ctx; 4341 struct mlx5_ifc_rqtc_bits ctx;
4342}; 4342};
4343 4343
4344struct mlx5_ifc_modify_rq_out_bits { 4344struct mlx5_ifc_modify_rq_out_bits {
4345 u8 status[0x8]; 4345 u8 status[0x8];
4346 u8 reserved_0[0x18]; 4346 u8 reserved_at_8[0x18];
4347 4347
4348 u8 syndrome[0x20]; 4348 u8 syndrome[0x20];
4349 4349
4350 u8 reserved_1[0x40]; 4350 u8 reserved_at_40[0x40];
4351}; 4351};
4352 4352
4353struct mlx5_ifc_modify_rq_in_bits { 4353struct mlx5_ifc_modify_rq_in_bits {
4354 u8 opcode[0x10]; 4354 u8 opcode[0x10];
4355 u8 reserved_0[0x10]; 4355 u8 reserved_at_10[0x10];
4356 4356
4357 u8 reserved_1[0x10]; 4357 u8 reserved_at_20[0x10];
4358 u8 op_mod[0x10]; 4358 u8 op_mod[0x10];
4359 4359
4360 u8 rq_state[0x4]; 4360 u8 rq_state[0x4];
4361 u8 reserved_2[0x4]; 4361 u8 reserved_at_44[0x4];
4362 u8 rqn[0x18]; 4362 u8 rqn[0x18];
4363 4363
4364 u8 reserved_3[0x20]; 4364 u8 reserved_at_60[0x20];
4365 4365
4366 u8 modify_bitmask[0x40]; 4366 u8 modify_bitmask[0x40];
4367 4367
4368 u8 reserved_4[0x40]; 4368 u8 reserved_at_c0[0x40];
4369 4369
4370 struct mlx5_ifc_rqc_bits ctx; 4370 struct mlx5_ifc_rqc_bits ctx;
4371}; 4371};
4372 4372
4373struct mlx5_ifc_modify_rmp_out_bits { 4373struct mlx5_ifc_modify_rmp_out_bits {
4374 u8 status[0x8]; 4374 u8 status[0x8];
4375 u8 reserved_0[0x18]; 4375 u8 reserved_at_8[0x18];
4376 4376
4377 u8 syndrome[0x20]; 4377 u8 syndrome[0x20];
4378 4378
4379 u8 reserved_1[0x40]; 4379 u8 reserved_at_40[0x40];
4380}; 4380};
4381 4381
4382struct mlx5_ifc_rmp_bitmask_bits { 4382struct mlx5_ifc_rmp_bitmask_bits {
4383 u8 reserved[0x20]; 4383 u8 reserved_at_0[0x20];
4384 4384
4385 u8 reserved1[0x1f]; 4385 u8 reserved_at_20[0x1f];
4386 u8 lwm[0x1]; 4386 u8 lwm[0x1];
4387}; 4387};
4388 4388
4389struct mlx5_ifc_modify_rmp_in_bits { 4389struct mlx5_ifc_modify_rmp_in_bits {
4390 u8 opcode[0x10]; 4390 u8 opcode[0x10];
4391 u8 reserved_0[0x10]; 4391 u8 reserved_at_10[0x10];
4392 4392
4393 u8 reserved_1[0x10]; 4393 u8 reserved_at_20[0x10];
4394 u8 op_mod[0x10]; 4394 u8 op_mod[0x10];
4395 4395
4396 u8 rmp_state[0x4]; 4396 u8 rmp_state[0x4];
4397 u8 reserved_2[0x4]; 4397 u8 reserved_at_44[0x4];
4398 u8 rmpn[0x18]; 4398 u8 rmpn[0x18];
4399 4399
4400 u8 reserved_3[0x20]; 4400 u8 reserved_at_60[0x20];
4401 4401
4402 struct mlx5_ifc_rmp_bitmask_bits bitmask; 4402 struct mlx5_ifc_rmp_bitmask_bits bitmask;
4403 4403
4404 u8 reserved_4[0x40]; 4404 u8 reserved_at_c0[0x40];
4405 4405
4406 struct mlx5_ifc_rmpc_bits ctx; 4406 struct mlx5_ifc_rmpc_bits ctx;
4407}; 4407};
4408 4408
4409struct mlx5_ifc_modify_nic_vport_context_out_bits { 4409struct mlx5_ifc_modify_nic_vport_context_out_bits {
4410 u8 status[0x8]; 4410 u8 status[0x8];
4411 u8 reserved_0[0x18]; 4411 u8 reserved_at_8[0x18];
4412 4412
4413 u8 syndrome[0x20]; 4413 u8 syndrome[0x20];
4414 4414
4415 u8 reserved_1[0x40]; 4415 u8 reserved_at_40[0x40];
4416}; 4416};
4417 4417
4418struct mlx5_ifc_modify_nic_vport_field_select_bits { 4418struct mlx5_ifc_modify_nic_vport_field_select_bits {
4419 u8 reserved_0[0x19]; 4419 u8 reserved_at_0[0x19];
4420 u8 mtu[0x1]; 4420 u8 mtu[0x1];
4421 u8 change_event[0x1]; 4421 u8 change_event[0x1];
4422 u8 promisc[0x1]; 4422 u8 promisc[0x1];
4423 u8 permanent_address[0x1]; 4423 u8 permanent_address[0x1];
4424 u8 addresses_list[0x1]; 4424 u8 addresses_list[0x1];
4425 u8 roce_en[0x1]; 4425 u8 roce_en[0x1];
4426 u8 reserved_1[0x1]; 4426 u8 reserved_at_1f[0x1];
4427}; 4427};
4428 4428
4429struct mlx5_ifc_modify_nic_vport_context_in_bits { 4429struct mlx5_ifc_modify_nic_vport_context_in_bits {
4430 u8 opcode[0x10]; 4430 u8 opcode[0x10];
4431 u8 reserved_0[0x10]; 4431 u8 reserved_at_10[0x10];
4432 4432
4433 u8 reserved_1[0x10]; 4433 u8 reserved_at_20[0x10];
4434 u8 op_mod[0x10]; 4434 u8 op_mod[0x10];
4435 4435
4436 u8 other_vport[0x1]; 4436 u8 other_vport[0x1];
4437 u8 reserved_2[0xf]; 4437 u8 reserved_at_41[0xf];
4438 u8 vport_number[0x10]; 4438 u8 vport_number[0x10];
4439 4439
4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; 4440 struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
4441 4441
4442 u8 reserved_3[0x780]; 4442 u8 reserved_at_80[0x780];
4443 4443
4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context; 4444 struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
4445}; 4445};
4446 4446
4447struct mlx5_ifc_modify_hca_vport_context_out_bits { 4447struct mlx5_ifc_modify_hca_vport_context_out_bits {
4448 u8 status[0x8]; 4448 u8 status[0x8];
4449 u8 reserved_0[0x18]; 4449 u8 reserved_at_8[0x18];
4450 4450
4451 u8 syndrome[0x20]; 4451 u8 syndrome[0x20];
4452 4452
4453 u8 reserved_1[0x40]; 4453 u8 reserved_at_40[0x40];
4454}; 4454};
4455 4455
4456struct mlx5_ifc_modify_hca_vport_context_in_bits { 4456struct mlx5_ifc_modify_hca_vport_context_in_bits {
4457 u8 opcode[0x10]; 4457 u8 opcode[0x10];
4458 u8 reserved_0[0x10]; 4458 u8 reserved_at_10[0x10];
4459 4459
4460 u8 reserved_1[0x10]; 4460 u8 reserved_at_20[0x10];
4461 u8 op_mod[0x10]; 4461 u8 op_mod[0x10];
4462 4462
4463 u8 other_vport[0x1]; 4463 u8 other_vport[0x1];
4464 u8 reserved_2[0xb]; 4464 u8 reserved_at_41[0xb];
4465 u8 port_num[0x4]; 4465 u8 port_num[0x4];
4466 u8 vport_number[0x10]; 4466 u8 vport_number[0x10];
4467 4467
4468 u8 reserved_3[0x20]; 4468 u8 reserved_at_60[0x20];
4469 4469
4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context; 4470 struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
4471}; 4471};
4472 4472
4473struct mlx5_ifc_modify_cq_out_bits { 4473struct mlx5_ifc_modify_cq_out_bits {
4474 u8 status[0x8]; 4474 u8 status[0x8];
4475 u8 reserved_0[0x18]; 4475 u8 reserved_at_8[0x18];
4476 4476
4477 u8 syndrome[0x20]; 4477 u8 syndrome[0x20];
4478 4478
4479 u8 reserved_1[0x40]; 4479 u8 reserved_at_40[0x40];
4480}; 4480};
4481 4481
4482enum { 4482enum {
@@ -4486,83 +4486,83 @@ enum {
4486 4486
4487struct mlx5_ifc_modify_cq_in_bits { 4487struct mlx5_ifc_modify_cq_in_bits {
4488 u8 opcode[0x10]; 4488 u8 opcode[0x10];
4489 u8 reserved_0[0x10]; 4489 u8 reserved_at_10[0x10];
4490 4490
4491 u8 reserved_1[0x10]; 4491 u8 reserved_at_20[0x10];
4492 u8 op_mod[0x10]; 4492 u8 op_mod[0x10];
4493 4493
4494 u8 reserved_2[0x8]; 4494 u8 reserved_at_40[0x8];
4495 u8 cqn[0x18]; 4495 u8 cqn[0x18];
4496 4496
4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; 4497 union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
4498 4498
4499 struct mlx5_ifc_cqc_bits cq_context; 4499 struct mlx5_ifc_cqc_bits cq_context;
4500 4500
4501 u8 reserved_3[0x600]; 4501 u8 reserved_at_280[0x600];
4502 4502
4503 u8 pas[0][0x40]; 4503 u8 pas[0][0x40];
4504}; 4504};
4505 4505
4506struct mlx5_ifc_modify_cong_status_out_bits { 4506struct mlx5_ifc_modify_cong_status_out_bits {
4507 u8 status[0x8]; 4507 u8 status[0x8];
4508 u8 reserved_0[0x18]; 4508 u8 reserved_at_8[0x18];
4509 4509
4510 u8 syndrome[0x20]; 4510 u8 syndrome[0x20];
4511 4511
4512 u8 reserved_1[0x40]; 4512 u8 reserved_at_40[0x40];
4513}; 4513};
4514 4514
4515struct mlx5_ifc_modify_cong_status_in_bits { 4515struct mlx5_ifc_modify_cong_status_in_bits {
4516 u8 opcode[0x10]; 4516 u8 opcode[0x10];
4517 u8 reserved_0[0x10]; 4517 u8 reserved_at_10[0x10];
4518 4518
4519 u8 reserved_1[0x10]; 4519 u8 reserved_at_20[0x10];
4520 u8 op_mod[0x10]; 4520 u8 op_mod[0x10];
4521 4521
4522 u8 reserved_2[0x18]; 4522 u8 reserved_at_40[0x18];
4523 u8 priority[0x4]; 4523 u8 priority[0x4];
4524 u8 cong_protocol[0x4]; 4524 u8 cong_protocol[0x4];
4525 4525
4526 u8 enable[0x1]; 4526 u8 enable[0x1];
4527 u8 tag_enable[0x1]; 4527 u8 tag_enable[0x1];
4528 u8 reserved_3[0x1e]; 4528 u8 reserved_at_62[0x1e];
4529}; 4529};
4530 4530
4531struct mlx5_ifc_modify_cong_params_out_bits { 4531struct mlx5_ifc_modify_cong_params_out_bits {
4532 u8 status[0x8]; 4532 u8 status[0x8];
4533 u8 reserved_0[0x18]; 4533 u8 reserved_at_8[0x18];
4534 4534
4535 u8 syndrome[0x20]; 4535 u8 syndrome[0x20];
4536 4536
4537 u8 reserved_1[0x40]; 4537 u8 reserved_at_40[0x40];
4538}; 4538};
4539 4539
4540struct mlx5_ifc_modify_cong_params_in_bits { 4540struct mlx5_ifc_modify_cong_params_in_bits {
4541 u8 opcode[0x10]; 4541 u8 opcode[0x10];
4542 u8 reserved_0[0x10]; 4542 u8 reserved_at_10[0x10];
4543 4543
4544 u8 reserved_1[0x10]; 4544 u8 reserved_at_20[0x10];
4545 u8 op_mod[0x10]; 4545 u8 op_mod[0x10];
4546 4546
4547 u8 reserved_2[0x1c]; 4547 u8 reserved_at_40[0x1c];
4548 u8 cong_protocol[0x4]; 4548 u8 cong_protocol[0x4];
4549 4549
4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; 4550 union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
4551 4551
4552 u8 reserved_3[0x80]; 4552 u8 reserved_at_80[0x80];
4553 4553
4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; 4554 union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
4555}; 4555};
4556 4556
4557struct mlx5_ifc_manage_pages_out_bits { 4557struct mlx5_ifc_manage_pages_out_bits {
4558 u8 status[0x8]; 4558 u8 status[0x8];
4559 u8 reserved_0[0x18]; 4559 u8 reserved_at_8[0x18];
4560 4560
4561 u8 syndrome[0x20]; 4561 u8 syndrome[0x20];
4562 4562
4563 u8 output_num_entries[0x20]; 4563 u8 output_num_entries[0x20];
4564 4564
4565 u8 reserved_1[0x20]; 4565 u8 reserved_at_60[0x20];
4566 4566
4567 u8 pas[0][0x40]; 4567 u8 pas[0][0x40];
4568}; 4568};
@@ -4575,12 +4575,12 @@ enum {
4575 4575
4576struct mlx5_ifc_manage_pages_in_bits { 4576struct mlx5_ifc_manage_pages_in_bits {
4577 u8 opcode[0x10]; 4577 u8 opcode[0x10];
4578 u8 reserved_0[0x10]; 4578 u8 reserved_at_10[0x10];
4579 4579
4580 u8 reserved_1[0x10]; 4580 u8 reserved_at_20[0x10];
4581 u8 op_mod[0x10]; 4581 u8 op_mod[0x10];
4582 4582
4583 u8 reserved_2[0x10]; 4583 u8 reserved_at_40[0x10];
4584 u8 function_id[0x10]; 4584 u8 function_id[0x10];
4585 4585
4586 u8 input_num_entries[0x20]; 4586 u8 input_num_entries[0x20];
@@ -4590,117 +4590,117 @@ struct mlx5_ifc_manage_pages_in_bits {
4590 4590
4591struct mlx5_ifc_mad_ifc_out_bits { 4591struct mlx5_ifc_mad_ifc_out_bits {
4592 u8 status[0x8]; 4592 u8 status[0x8];
4593 u8 reserved_0[0x18]; 4593 u8 reserved_at_8[0x18];
4594 4594
4595 u8 syndrome[0x20]; 4595 u8 syndrome[0x20];
4596 4596
4597 u8 reserved_1[0x40]; 4597 u8 reserved_at_40[0x40];
4598 4598
4599 u8 response_mad_packet[256][0x8]; 4599 u8 response_mad_packet[256][0x8];
4600}; 4600};
4601 4601
4602struct mlx5_ifc_mad_ifc_in_bits { 4602struct mlx5_ifc_mad_ifc_in_bits {
4603 u8 opcode[0x10]; 4603 u8 opcode[0x10];
4604 u8 reserved_0[0x10]; 4604 u8 reserved_at_10[0x10];
4605 4605
4606 u8 reserved_1[0x10]; 4606 u8 reserved_at_20[0x10];
4607 u8 op_mod[0x10]; 4607 u8 op_mod[0x10];
4608 4608
4609 u8 remote_lid[0x10]; 4609 u8 remote_lid[0x10];
4610 u8 reserved_2[0x8]; 4610 u8 reserved_at_50[0x8];
4611 u8 port[0x8]; 4611 u8 port[0x8];
4612 4612
4613 u8 reserved_3[0x20]; 4613 u8 reserved_at_60[0x20];
4614 4614
4615 u8 mad[256][0x8]; 4615 u8 mad[256][0x8];
4616}; 4616};
4617 4617
4618struct mlx5_ifc_init_hca_out_bits { 4618struct mlx5_ifc_init_hca_out_bits {
4619 u8 status[0x8]; 4619 u8 status[0x8];
4620 u8 reserved_0[0x18]; 4620 u8 reserved_at_8[0x18];
4621 4621
4622 u8 syndrome[0x20]; 4622 u8 syndrome[0x20];
4623 4623
4624 u8 reserved_1[0x40]; 4624 u8 reserved_at_40[0x40];
4625}; 4625};
4626 4626
4627struct mlx5_ifc_init_hca_in_bits { 4627struct mlx5_ifc_init_hca_in_bits {
4628 u8 opcode[0x10]; 4628 u8 opcode[0x10];
4629 u8 reserved_0[0x10]; 4629 u8 reserved_at_10[0x10];
4630 4630
4631 u8 reserved_1[0x10]; 4631 u8 reserved_at_20[0x10];
4632 u8 op_mod[0x10]; 4632 u8 op_mod[0x10];
4633 4633
4634 u8 reserved_2[0x40]; 4634 u8 reserved_at_40[0x40];
4635}; 4635};
4636 4636
4637struct mlx5_ifc_init2rtr_qp_out_bits { 4637struct mlx5_ifc_init2rtr_qp_out_bits {
4638 u8 status[0x8]; 4638 u8 status[0x8];
4639 u8 reserved_0[0x18]; 4639 u8 reserved_at_8[0x18];
4640 4640
4641 u8 syndrome[0x20]; 4641 u8 syndrome[0x20];
4642 4642
4643 u8 reserved_1[0x40]; 4643 u8 reserved_at_40[0x40];
4644}; 4644};
4645 4645
4646struct mlx5_ifc_init2rtr_qp_in_bits { 4646struct mlx5_ifc_init2rtr_qp_in_bits {
4647 u8 opcode[0x10]; 4647 u8 opcode[0x10];
4648 u8 reserved_0[0x10]; 4648 u8 reserved_at_10[0x10];
4649 4649
4650 u8 reserved_1[0x10]; 4650 u8 reserved_at_20[0x10];
4651 u8 op_mod[0x10]; 4651 u8 op_mod[0x10];
4652 4652
4653 u8 reserved_2[0x8]; 4653 u8 reserved_at_40[0x8];
4654 u8 qpn[0x18]; 4654 u8 qpn[0x18];
4655 4655
4656 u8 reserved_3[0x20]; 4656 u8 reserved_at_60[0x20];
4657 4657
4658 u8 opt_param_mask[0x20]; 4658 u8 opt_param_mask[0x20];
4659 4659
4660 u8 reserved_4[0x20]; 4660 u8 reserved_at_a0[0x20];
4661 4661
4662 struct mlx5_ifc_qpc_bits qpc; 4662 struct mlx5_ifc_qpc_bits qpc;
4663 4663
4664 u8 reserved_5[0x80]; 4664 u8 reserved_at_800[0x80];
4665}; 4665};
4666 4666
4667struct mlx5_ifc_init2init_qp_out_bits { 4667struct mlx5_ifc_init2init_qp_out_bits {
4668 u8 status[0x8]; 4668 u8 status[0x8];
4669 u8 reserved_0[0x18]; 4669 u8 reserved_at_8[0x18];
4670 4670
4671 u8 syndrome[0x20]; 4671 u8 syndrome[0x20];
4672 4672
4673 u8 reserved_1[0x40]; 4673 u8 reserved_at_40[0x40];
4674}; 4674};
4675 4675
4676struct mlx5_ifc_init2init_qp_in_bits { 4676struct mlx5_ifc_init2init_qp_in_bits {
4677 u8 opcode[0x10]; 4677 u8 opcode[0x10];
4678 u8 reserved_0[0x10]; 4678 u8 reserved_at_10[0x10];
4679 4679
4680 u8 reserved_1[0x10]; 4680 u8 reserved_at_20[0x10];
4681 u8 op_mod[0x10]; 4681 u8 op_mod[0x10];
4682 4682
4683 u8 reserved_2[0x8]; 4683 u8 reserved_at_40[0x8];
4684 u8 qpn[0x18]; 4684 u8 qpn[0x18];
4685 4685
4686 u8 reserved_3[0x20]; 4686 u8 reserved_at_60[0x20];
4687 4687
4688 u8 opt_param_mask[0x20]; 4688 u8 opt_param_mask[0x20];
4689 4689
4690 u8 reserved_4[0x20]; 4690 u8 reserved_at_a0[0x20];
4691 4691
4692 struct mlx5_ifc_qpc_bits qpc; 4692 struct mlx5_ifc_qpc_bits qpc;
4693 4693
4694 u8 reserved_5[0x80]; 4694 u8 reserved_at_800[0x80];
4695}; 4695};
4696 4696
4697struct mlx5_ifc_get_dropped_packet_log_out_bits { 4697struct mlx5_ifc_get_dropped_packet_log_out_bits {
4698 u8 status[0x8]; 4698 u8 status[0x8];
4699 u8 reserved_0[0x18]; 4699 u8 reserved_at_8[0x18];
4700 4700
4701 u8 syndrome[0x20]; 4701 u8 syndrome[0x20];
4702 4702
4703 u8 reserved_1[0x40]; 4703 u8 reserved_at_40[0x40];
4704 4704
4705 u8 packet_headers_log[128][0x8]; 4705 u8 packet_headers_log[128][0x8];
4706 4706
@@ -4709,1029 +4709,1029 @@ struct mlx5_ifc_get_dropped_packet_log_out_bits {
4709 4709
4710struct mlx5_ifc_get_dropped_packet_log_in_bits { 4710struct mlx5_ifc_get_dropped_packet_log_in_bits {
4711 u8 opcode[0x10]; 4711 u8 opcode[0x10];
4712 u8 reserved_0[0x10]; 4712 u8 reserved_at_10[0x10];
4713 4713
4714 u8 reserved_1[0x10]; 4714 u8 reserved_at_20[0x10];
4715 u8 op_mod[0x10]; 4715 u8 op_mod[0x10];
4716 4716
4717 u8 reserved_2[0x40]; 4717 u8 reserved_at_40[0x40];
4718}; 4718};
4719 4719
4720struct mlx5_ifc_gen_eqe_in_bits { 4720struct mlx5_ifc_gen_eqe_in_bits {
4721 u8 opcode[0x10]; 4721 u8 opcode[0x10];
4722 u8 reserved_0[0x10]; 4722 u8 reserved_at_10[0x10];
4723 4723
4724 u8 reserved_1[0x10]; 4724 u8 reserved_at_20[0x10];
4725 u8 op_mod[0x10]; 4725 u8 op_mod[0x10];
4726 4726
4727 u8 reserved_2[0x18]; 4727 u8 reserved_at_40[0x18];
4728 u8 eq_number[0x8]; 4728 u8 eq_number[0x8];
4729 4729
4730 u8 reserved_3[0x20]; 4730 u8 reserved_at_60[0x20];
4731 4731
4732 u8 eqe[64][0x8]; 4732 u8 eqe[64][0x8];
4733}; 4733};
4734 4734
4735struct mlx5_ifc_gen_eq_out_bits { 4735struct mlx5_ifc_gen_eq_out_bits {
4736 u8 status[0x8]; 4736 u8 status[0x8];
4737 u8 reserved_0[0x18]; 4737 u8 reserved_at_8[0x18];
4738 4738
4739 u8 syndrome[0x20]; 4739 u8 syndrome[0x20];
4740 4740
4741 u8 reserved_1[0x40]; 4741 u8 reserved_at_40[0x40];
4742}; 4742};
4743 4743
4744struct mlx5_ifc_enable_hca_out_bits { 4744struct mlx5_ifc_enable_hca_out_bits {
4745 u8 status[0x8]; 4745 u8 status[0x8];
4746 u8 reserved_0[0x18]; 4746 u8 reserved_at_8[0x18];
4747 4747
4748 u8 syndrome[0x20]; 4748 u8 syndrome[0x20];
4749 4749
4750 u8 reserved_1[0x20]; 4750 u8 reserved_at_40[0x20];
4751}; 4751};
4752 4752
4753struct mlx5_ifc_enable_hca_in_bits { 4753struct mlx5_ifc_enable_hca_in_bits {
4754 u8 opcode[0x10]; 4754 u8 opcode[0x10];
4755 u8 reserved_0[0x10]; 4755 u8 reserved_at_10[0x10];
4756 4756
4757 u8 reserved_1[0x10]; 4757 u8 reserved_at_20[0x10];
4758 u8 op_mod[0x10]; 4758 u8 op_mod[0x10];
4759 4759
4760 u8 reserved_2[0x10]; 4760 u8 reserved_at_40[0x10];
4761 u8 function_id[0x10]; 4761 u8 function_id[0x10];
4762 4762
4763 u8 reserved_3[0x20]; 4763 u8 reserved_at_60[0x20];
4764}; 4764};
4765 4765
4766struct mlx5_ifc_drain_dct_out_bits { 4766struct mlx5_ifc_drain_dct_out_bits {
4767 u8 status[0x8]; 4767 u8 status[0x8];
4768 u8 reserved_0[0x18]; 4768 u8 reserved_at_8[0x18];
4769 4769
4770 u8 syndrome[0x20]; 4770 u8 syndrome[0x20];
4771 4771
4772 u8 reserved_1[0x40]; 4772 u8 reserved_at_40[0x40];
4773}; 4773};
4774 4774
4775struct mlx5_ifc_drain_dct_in_bits { 4775struct mlx5_ifc_drain_dct_in_bits {
4776 u8 opcode[0x10]; 4776 u8 opcode[0x10];
4777 u8 reserved_0[0x10]; 4777 u8 reserved_at_10[0x10];
4778 4778
4779 u8 reserved_1[0x10]; 4779 u8 reserved_at_20[0x10];
4780 u8 op_mod[0x10]; 4780 u8 op_mod[0x10];
4781 4781
4782 u8 reserved_2[0x8]; 4782 u8 reserved_at_40[0x8];
4783 u8 dctn[0x18]; 4783 u8 dctn[0x18];
4784 4784
4785 u8 reserved_3[0x20]; 4785 u8 reserved_at_60[0x20];
4786}; 4786};
4787 4787
4788struct mlx5_ifc_disable_hca_out_bits { 4788struct mlx5_ifc_disable_hca_out_bits {
4789 u8 status[0x8]; 4789 u8 status[0x8];
4790 u8 reserved_0[0x18]; 4790 u8 reserved_at_8[0x18];
4791 4791
4792 u8 syndrome[0x20]; 4792 u8 syndrome[0x20];
4793 4793
4794 u8 reserved_1[0x20]; 4794 u8 reserved_at_40[0x20];
4795}; 4795};
4796 4796
4797struct mlx5_ifc_disable_hca_in_bits { 4797struct mlx5_ifc_disable_hca_in_bits {
4798 u8 opcode[0x10]; 4798 u8 opcode[0x10];
4799 u8 reserved_0[0x10]; 4799 u8 reserved_at_10[0x10];
4800 4800
4801 u8 reserved_1[0x10]; 4801 u8 reserved_at_20[0x10];
4802 u8 op_mod[0x10]; 4802 u8 op_mod[0x10];
4803 4803
4804 u8 reserved_2[0x10]; 4804 u8 reserved_at_40[0x10];
4805 u8 function_id[0x10]; 4805 u8 function_id[0x10];
4806 4806
4807 u8 reserved_3[0x20]; 4807 u8 reserved_at_60[0x20];
4808}; 4808};
4809 4809
4810struct mlx5_ifc_detach_from_mcg_out_bits { 4810struct mlx5_ifc_detach_from_mcg_out_bits {
4811 u8 status[0x8]; 4811 u8 status[0x8];
4812 u8 reserved_0[0x18]; 4812 u8 reserved_at_8[0x18];
4813 4813
4814 u8 syndrome[0x20]; 4814 u8 syndrome[0x20];
4815 4815
4816 u8 reserved_1[0x40]; 4816 u8 reserved_at_40[0x40];
4817}; 4817};
4818 4818
4819struct mlx5_ifc_detach_from_mcg_in_bits { 4819struct mlx5_ifc_detach_from_mcg_in_bits {
4820 u8 opcode[0x10]; 4820 u8 opcode[0x10];
4821 u8 reserved_0[0x10]; 4821 u8 reserved_at_10[0x10];
4822 4822
4823 u8 reserved_1[0x10]; 4823 u8 reserved_at_20[0x10];
4824 u8 op_mod[0x10]; 4824 u8 op_mod[0x10];
4825 4825
4826 u8 reserved_2[0x8]; 4826 u8 reserved_at_40[0x8];
4827 u8 qpn[0x18]; 4827 u8 qpn[0x18];
4828 4828
4829 u8 reserved_3[0x20]; 4829 u8 reserved_at_60[0x20];
4830 4830
4831 u8 multicast_gid[16][0x8]; 4831 u8 multicast_gid[16][0x8];
4832}; 4832};
4833 4833
4834struct mlx5_ifc_destroy_xrc_srq_out_bits { 4834struct mlx5_ifc_destroy_xrc_srq_out_bits {
4835 u8 status[0x8]; 4835 u8 status[0x8];
4836 u8 reserved_0[0x18]; 4836 u8 reserved_at_8[0x18];
4837 4837
4838 u8 syndrome[0x20]; 4838 u8 syndrome[0x20];
4839 4839
4840 u8 reserved_1[0x40]; 4840 u8 reserved_at_40[0x40];
4841}; 4841};
4842 4842
4843struct mlx5_ifc_destroy_xrc_srq_in_bits { 4843struct mlx5_ifc_destroy_xrc_srq_in_bits {
4844 u8 opcode[0x10]; 4844 u8 opcode[0x10];
4845 u8 reserved_0[0x10]; 4845 u8 reserved_at_10[0x10];
4846 4846
4847 u8 reserved_1[0x10]; 4847 u8 reserved_at_20[0x10];
4848 u8 op_mod[0x10]; 4848 u8 op_mod[0x10];
4849 4849
4850 u8 reserved_2[0x8]; 4850 u8 reserved_at_40[0x8];
4851 u8 xrc_srqn[0x18]; 4851 u8 xrc_srqn[0x18];
4852 4852
4853 u8 reserved_3[0x20]; 4853 u8 reserved_at_60[0x20];
4854}; 4854};
4855 4855
4856struct mlx5_ifc_destroy_tis_out_bits { 4856struct mlx5_ifc_destroy_tis_out_bits {
4857 u8 status[0x8]; 4857 u8 status[0x8];
4858 u8 reserved_0[0x18]; 4858 u8 reserved_at_8[0x18];
4859 4859
4860 u8 syndrome[0x20]; 4860 u8 syndrome[0x20];
4861 4861
4862 u8 reserved_1[0x40]; 4862 u8 reserved_at_40[0x40];
4863}; 4863};
4864 4864
4865struct mlx5_ifc_destroy_tis_in_bits { 4865struct mlx5_ifc_destroy_tis_in_bits {
4866 u8 opcode[0x10]; 4866 u8 opcode[0x10];
4867 u8 reserved_0[0x10]; 4867 u8 reserved_at_10[0x10];
4868 4868
4869 u8 reserved_1[0x10]; 4869 u8 reserved_at_20[0x10];
4870 u8 op_mod[0x10]; 4870 u8 op_mod[0x10];
4871 4871
4872 u8 reserved_2[0x8]; 4872 u8 reserved_at_40[0x8];
4873 u8 tisn[0x18]; 4873 u8 tisn[0x18];
4874 4874
4875 u8 reserved_3[0x20]; 4875 u8 reserved_at_60[0x20];
4876}; 4876};
4877 4877
4878struct mlx5_ifc_destroy_tir_out_bits { 4878struct mlx5_ifc_destroy_tir_out_bits {
4879 u8 status[0x8]; 4879 u8 status[0x8];
4880 u8 reserved_0[0x18]; 4880 u8 reserved_at_8[0x18];
4881 4881
4882 u8 syndrome[0x20]; 4882 u8 syndrome[0x20];
4883 4883
4884 u8 reserved_1[0x40]; 4884 u8 reserved_at_40[0x40];
4885}; 4885};
4886 4886
4887struct mlx5_ifc_destroy_tir_in_bits { 4887struct mlx5_ifc_destroy_tir_in_bits {
4888 u8 opcode[0x10]; 4888 u8 opcode[0x10];
4889 u8 reserved_0[0x10]; 4889 u8 reserved_at_10[0x10];
4890 4890
4891 u8 reserved_1[0x10]; 4891 u8 reserved_at_20[0x10];
4892 u8 op_mod[0x10]; 4892 u8 op_mod[0x10];
4893 4893
4894 u8 reserved_2[0x8]; 4894 u8 reserved_at_40[0x8];
4895 u8 tirn[0x18]; 4895 u8 tirn[0x18];
4896 4896
4897 u8 reserved_3[0x20]; 4897 u8 reserved_at_60[0x20];
4898}; 4898};
4899 4899
4900struct mlx5_ifc_destroy_srq_out_bits { 4900struct mlx5_ifc_destroy_srq_out_bits {
4901 u8 status[0x8]; 4901 u8 status[0x8];
4902 u8 reserved_0[0x18]; 4902 u8 reserved_at_8[0x18];
4903 4903
4904 u8 syndrome[0x20]; 4904 u8 syndrome[0x20];
4905 4905
4906 u8 reserved_1[0x40]; 4906 u8 reserved_at_40[0x40];
4907}; 4907};
4908 4908
4909struct mlx5_ifc_destroy_srq_in_bits { 4909struct mlx5_ifc_destroy_srq_in_bits {
4910 u8 opcode[0x10]; 4910 u8 opcode[0x10];
4911 u8 reserved_0[0x10]; 4911 u8 reserved_at_10[0x10];
4912 4912
4913 u8 reserved_1[0x10]; 4913 u8 reserved_at_20[0x10];
4914 u8 op_mod[0x10]; 4914 u8 op_mod[0x10];
4915 4915
4916 u8 reserved_2[0x8]; 4916 u8 reserved_at_40[0x8];
4917 u8 srqn[0x18]; 4917 u8 srqn[0x18];
4918 4918
4919 u8 reserved_3[0x20]; 4919 u8 reserved_at_60[0x20];
4920}; 4920};
4921 4921
4922struct mlx5_ifc_destroy_sq_out_bits { 4922struct mlx5_ifc_destroy_sq_out_bits {
4923 u8 status[0x8]; 4923 u8 status[0x8];
4924 u8 reserved_0[0x18]; 4924 u8 reserved_at_8[0x18];
4925 4925
4926 u8 syndrome[0x20]; 4926 u8 syndrome[0x20];
4927 4927
4928 u8 reserved_1[0x40]; 4928 u8 reserved_at_40[0x40];
4929}; 4929};
4930 4930
4931struct mlx5_ifc_destroy_sq_in_bits { 4931struct mlx5_ifc_destroy_sq_in_bits {
4932 u8 opcode[0x10]; 4932 u8 opcode[0x10];
4933 u8 reserved_0[0x10]; 4933 u8 reserved_at_10[0x10];
4934 4934
4935 u8 reserved_1[0x10]; 4935 u8 reserved_at_20[0x10];
4936 u8 op_mod[0x10]; 4936 u8 op_mod[0x10];
4937 4937
4938 u8 reserved_2[0x8]; 4938 u8 reserved_at_40[0x8];
4939 u8 sqn[0x18]; 4939 u8 sqn[0x18];
4940 4940
4941 u8 reserved_3[0x20]; 4941 u8 reserved_at_60[0x20];
4942}; 4942};
4943 4943
4944struct mlx5_ifc_destroy_rqt_out_bits { 4944struct mlx5_ifc_destroy_rqt_out_bits {
4945 u8 status[0x8]; 4945 u8 status[0x8];
4946 u8 reserved_0[0x18]; 4946 u8 reserved_at_8[0x18];
4947 4947
4948 u8 syndrome[0x20]; 4948 u8 syndrome[0x20];
4949 4949
4950 u8 reserved_1[0x40]; 4950 u8 reserved_at_40[0x40];
4951}; 4951};
4952 4952
4953struct mlx5_ifc_destroy_rqt_in_bits { 4953struct mlx5_ifc_destroy_rqt_in_bits {
4954 u8 opcode[0x10]; 4954 u8 opcode[0x10];
4955 u8 reserved_0[0x10]; 4955 u8 reserved_at_10[0x10];
4956 4956
4957 u8 reserved_1[0x10]; 4957 u8 reserved_at_20[0x10];
4958 u8 op_mod[0x10]; 4958 u8 op_mod[0x10];
4959 4959
4960 u8 reserved_2[0x8]; 4960 u8 reserved_at_40[0x8];
4961 u8 rqtn[0x18]; 4961 u8 rqtn[0x18];
4962 4962
4963 u8 reserved_3[0x20]; 4963 u8 reserved_at_60[0x20];
4964}; 4964};
4965 4965
4966struct mlx5_ifc_destroy_rq_out_bits { 4966struct mlx5_ifc_destroy_rq_out_bits {
4967 u8 status[0x8]; 4967 u8 status[0x8];
4968 u8 reserved_0[0x18]; 4968 u8 reserved_at_8[0x18];
4969 4969
4970 u8 syndrome[0x20]; 4970 u8 syndrome[0x20];
4971 4971
4972 u8 reserved_1[0x40]; 4972 u8 reserved_at_40[0x40];
4973}; 4973};
4974 4974
4975struct mlx5_ifc_destroy_rq_in_bits { 4975struct mlx5_ifc_destroy_rq_in_bits {
4976 u8 opcode[0x10]; 4976 u8 opcode[0x10];
4977 u8 reserved_0[0x10]; 4977 u8 reserved_at_10[0x10];
4978 4978
4979 u8 reserved_1[0x10]; 4979 u8 reserved_at_20[0x10];
4980 u8 op_mod[0x10]; 4980 u8 op_mod[0x10];
4981 4981
4982 u8 reserved_2[0x8]; 4982 u8 reserved_at_40[0x8];
4983 u8 rqn[0x18]; 4983 u8 rqn[0x18];
4984 4984
4985 u8 reserved_3[0x20]; 4985 u8 reserved_at_60[0x20];
4986}; 4986};
4987 4987
4988struct mlx5_ifc_destroy_rmp_out_bits { 4988struct mlx5_ifc_destroy_rmp_out_bits {
4989 u8 status[0x8]; 4989 u8 status[0x8];
4990 u8 reserved_0[0x18]; 4990 u8 reserved_at_8[0x18];
4991 4991
4992 u8 syndrome[0x20]; 4992 u8 syndrome[0x20];
4993 4993
4994 u8 reserved_1[0x40]; 4994 u8 reserved_at_40[0x40];
4995}; 4995};
4996 4996
4997struct mlx5_ifc_destroy_rmp_in_bits { 4997struct mlx5_ifc_destroy_rmp_in_bits {
4998 u8 opcode[0x10]; 4998 u8 opcode[0x10];
4999 u8 reserved_0[0x10]; 4999 u8 reserved_at_10[0x10];
5000 5000
5001 u8 reserved_1[0x10]; 5001 u8 reserved_at_20[0x10];
5002 u8 op_mod[0x10]; 5002 u8 op_mod[0x10];
5003 5003
5004 u8 reserved_2[0x8]; 5004 u8 reserved_at_40[0x8];
5005 u8 rmpn[0x18]; 5005 u8 rmpn[0x18];
5006 5006
5007 u8 reserved_3[0x20]; 5007 u8 reserved_at_60[0x20];
5008}; 5008};
5009 5009
5010struct mlx5_ifc_destroy_qp_out_bits { 5010struct mlx5_ifc_destroy_qp_out_bits {
5011 u8 status[0x8]; 5011 u8 status[0x8];
5012 u8 reserved_0[0x18]; 5012 u8 reserved_at_8[0x18];
5013 5013
5014 u8 syndrome[0x20]; 5014 u8 syndrome[0x20];
5015 5015
5016 u8 reserved_1[0x40]; 5016 u8 reserved_at_40[0x40];
5017}; 5017};
5018 5018
5019struct mlx5_ifc_destroy_qp_in_bits { 5019struct mlx5_ifc_destroy_qp_in_bits {
5020 u8 opcode[0x10]; 5020 u8 opcode[0x10];
5021 u8 reserved_0[0x10]; 5021 u8 reserved_at_10[0x10];
5022 5022
5023 u8 reserved_1[0x10]; 5023 u8 reserved_at_20[0x10];
5024 u8 op_mod[0x10]; 5024 u8 op_mod[0x10];
5025 5025
5026 u8 reserved_2[0x8]; 5026 u8 reserved_at_40[0x8];
5027 u8 qpn[0x18]; 5027 u8 qpn[0x18];
5028 5028
5029 u8 reserved_3[0x20]; 5029 u8 reserved_at_60[0x20];
5030}; 5030};
5031 5031
5032struct mlx5_ifc_destroy_psv_out_bits { 5032struct mlx5_ifc_destroy_psv_out_bits {
5033 u8 status[0x8]; 5033 u8 status[0x8];
5034 u8 reserved_0[0x18]; 5034 u8 reserved_at_8[0x18];
5035 5035
5036 u8 syndrome[0x20]; 5036 u8 syndrome[0x20];
5037 5037
5038 u8 reserved_1[0x40]; 5038 u8 reserved_at_40[0x40];
5039}; 5039};
5040 5040
5041struct mlx5_ifc_destroy_psv_in_bits { 5041struct mlx5_ifc_destroy_psv_in_bits {
5042 u8 opcode[0x10]; 5042 u8 opcode[0x10];
5043 u8 reserved_0[0x10]; 5043 u8 reserved_at_10[0x10];
5044 5044
5045 u8 reserved_1[0x10]; 5045 u8 reserved_at_20[0x10];
5046 u8 op_mod[0x10]; 5046 u8 op_mod[0x10];
5047 5047
5048 u8 reserved_2[0x8]; 5048 u8 reserved_at_40[0x8];
5049 u8 psvn[0x18]; 5049 u8 psvn[0x18];
5050 5050
5051 u8 reserved_3[0x20]; 5051 u8 reserved_at_60[0x20];
5052}; 5052};
5053 5053
5054struct mlx5_ifc_destroy_mkey_out_bits { 5054struct mlx5_ifc_destroy_mkey_out_bits {
5055 u8 status[0x8]; 5055 u8 status[0x8];
5056 u8 reserved_0[0x18]; 5056 u8 reserved_at_8[0x18];
5057 5057
5058 u8 syndrome[0x20]; 5058 u8 syndrome[0x20];
5059 5059
5060 u8 reserved_1[0x40]; 5060 u8 reserved_at_40[0x40];
5061}; 5061};
5062 5062
5063struct mlx5_ifc_destroy_mkey_in_bits { 5063struct mlx5_ifc_destroy_mkey_in_bits {
5064 u8 opcode[0x10]; 5064 u8 opcode[0x10];
5065 u8 reserved_0[0x10]; 5065 u8 reserved_at_10[0x10];
5066 5066
5067 u8 reserved_1[0x10]; 5067 u8 reserved_at_20[0x10];
5068 u8 op_mod[0x10]; 5068 u8 op_mod[0x10];
5069 5069
5070 u8 reserved_2[0x8]; 5070 u8 reserved_at_40[0x8];
5071 u8 mkey_index[0x18]; 5071 u8 mkey_index[0x18];
5072 5072
5073 u8 reserved_3[0x20]; 5073 u8 reserved_at_60[0x20];
5074}; 5074};
5075 5075
5076struct mlx5_ifc_destroy_flow_table_out_bits { 5076struct mlx5_ifc_destroy_flow_table_out_bits {
5077 u8 status[0x8]; 5077 u8 status[0x8];
5078 u8 reserved_0[0x18]; 5078 u8 reserved_at_8[0x18];
5079 5079
5080 u8 syndrome[0x20]; 5080 u8 syndrome[0x20];
5081 5081
5082 u8 reserved_1[0x40]; 5082 u8 reserved_at_40[0x40];
5083}; 5083};
5084 5084
5085struct mlx5_ifc_destroy_flow_table_in_bits { 5085struct mlx5_ifc_destroy_flow_table_in_bits {
5086 u8 opcode[0x10]; 5086 u8 opcode[0x10];
5087 u8 reserved_0[0x10]; 5087 u8 reserved_at_10[0x10];
5088 5088
5089 u8 reserved_1[0x10]; 5089 u8 reserved_at_20[0x10];
5090 u8 op_mod[0x10]; 5090 u8 op_mod[0x10];
5091 5091
5092 u8 reserved_2[0x40]; 5092 u8 reserved_at_40[0x40];
5093 5093
5094 u8 table_type[0x8]; 5094 u8 table_type[0x8];
5095 u8 reserved_3[0x18]; 5095 u8 reserved_at_88[0x18];
5096 5096
5097 u8 reserved_4[0x8]; 5097 u8 reserved_at_a0[0x8];
5098 u8 table_id[0x18]; 5098 u8 table_id[0x18];
5099 5099
5100 u8 reserved_5[0x140]; 5100 u8 reserved_at_c0[0x140];
5101}; 5101};
5102 5102
5103struct mlx5_ifc_destroy_flow_group_out_bits { 5103struct mlx5_ifc_destroy_flow_group_out_bits {
5104 u8 status[0x8]; 5104 u8 status[0x8];
5105 u8 reserved_0[0x18]; 5105 u8 reserved_at_8[0x18];
5106 5106
5107 u8 syndrome[0x20]; 5107 u8 syndrome[0x20];
5108 5108
5109 u8 reserved_1[0x40]; 5109 u8 reserved_at_40[0x40];
5110}; 5110};
5111 5111
5112struct mlx5_ifc_destroy_flow_group_in_bits { 5112struct mlx5_ifc_destroy_flow_group_in_bits {
5113 u8 opcode[0x10]; 5113 u8 opcode[0x10];
5114 u8 reserved_0[0x10]; 5114 u8 reserved_at_10[0x10];
5115 5115
5116 u8 reserved_1[0x10]; 5116 u8 reserved_at_20[0x10];
5117 u8 op_mod[0x10]; 5117 u8 op_mod[0x10];
5118 5118
5119 u8 reserved_2[0x40]; 5119 u8 reserved_at_40[0x40];
5120 5120
5121 u8 table_type[0x8]; 5121 u8 table_type[0x8];
5122 u8 reserved_3[0x18]; 5122 u8 reserved_at_88[0x18];
5123 5123
5124 u8 reserved_4[0x8]; 5124 u8 reserved_at_a0[0x8];
5125 u8 table_id[0x18]; 5125 u8 table_id[0x18];
5126 5126
5127 u8 group_id[0x20]; 5127 u8 group_id[0x20];
5128 5128
5129 u8 reserved_5[0x120]; 5129 u8 reserved_at_e0[0x120];
5130}; 5130};
5131 5131
5132struct mlx5_ifc_destroy_eq_out_bits { 5132struct mlx5_ifc_destroy_eq_out_bits {
5133 u8 status[0x8]; 5133 u8 status[0x8];
5134 u8 reserved_0[0x18]; 5134 u8 reserved_at_8[0x18];
5135 5135
5136 u8 syndrome[0x20]; 5136 u8 syndrome[0x20];
5137 5137
5138 u8 reserved_1[0x40]; 5138 u8 reserved_at_40[0x40];
5139}; 5139};
5140 5140
5141struct mlx5_ifc_destroy_eq_in_bits { 5141struct mlx5_ifc_destroy_eq_in_bits {
5142 u8 opcode[0x10]; 5142 u8 opcode[0x10];
5143 u8 reserved_0[0x10]; 5143 u8 reserved_at_10[0x10];
5144 5144
5145 u8 reserved_1[0x10]; 5145 u8 reserved_at_20[0x10];
5146 u8 op_mod[0x10]; 5146 u8 op_mod[0x10];
5147 5147
5148 u8 reserved_2[0x18]; 5148 u8 reserved_at_40[0x18];
5149 u8 eq_number[0x8]; 5149 u8 eq_number[0x8];
5150 5150
5151 u8 reserved_3[0x20]; 5151 u8 reserved_at_60[0x20];
5152}; 5152};
5153 5153
5154struct mlx5_ifc_destroy_dct_out_bits { 5154struct mlx5_ifc_destroy_dct_out_bits {
5155 u8 status[0x8]; 5155 u8 status[0x8];
5156 u8 reserved_0[0x18]; 5156 u8 reserved_at_8[0x18];
5157 5157
5158 u8 syndrome[0x20]; 5158 u8 syndrome[0x20];
5159 5159
5160 u8 reserved_1[0x40]; 5160 u8 reserved_at_40[0x40];
5161}; 5161};
5162 5162
5163struct mlx5_ifc_destroy_dct_in_bits { 5163struct mlx5_ifc_destroy_dct_in_bits {
5164 u8 opcode[0x10]; 5164 u8 opcode[0x10];
5165 u8 reserved_0[0x10]; 5165 u8 reserved_at_10[0x10];
5166 5166
5167 u8 reserved_1[0x10]; 5167 u8 reserved_at_20[0x10];
5168 u8 op_mod[0x10]; 5168 u8 op_mod[0x10];
5169 5169
5170 u8 reserved_2[0x8]; 5170 u8 reserved_at_40[0x8];
5171 u8 dctn[0x18]; 5171 u8 dctn[0x18];
5172 5172
5173 u8 reserved_3[0x20]; 5173 u8 reserved_at_60[0x20];
5174}; 5174};
5175 5175
5176struct mlx5_ifc_destroy_cq_out_bits { 5176struct mlx5_ifc_destroy_cq_out_bits {
5177 u8 status[0x8]; 5177 u8 status[0x8];
5178 u8 reserved_0[0x18]; 5178 u8 reserved_at_8[0x18];
5179 5179
5180 u8 syndrome[0x20]; 5180 u8 syndrome[0x20];
5181 5181
5182 u8 reserved_1[0x40]; 5182 u8 reserved_at_40[0x40];
5183}; 5183};
5184 5184
5185struct mlx5_ifc_destroy_cq_in_bits { 5185struct mlx5_ifc_destroy_cq_in_bits {
5186 u8 opcode[0x10]; 5186 u8 opcode[0x10];
5187 u8 reserved_0[0x10]; 5187 u8 reserved_at_10[0x10];
5188 5188
5189 u8 reserved_1[0x10]; 5189 u8 reserved_at_20[0x10];
5190 u8 op_mod[0x10]; 5190 u8 op_mod[0x10];
5191 5191
5192 u8 reserved_2[0x8]; 5192 u8 reserved_at_40[0x8];
5193 u8 cqn[0x18]; 5193 u8 cqn[0x18];
5194 5194
5195 u8 reserved_3[0x20]; 5195 u8 reserved_at_60[0x20];
5196}; 5196};
5197 5197
5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { 5198struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
5199 u8 status[0x8]; 5199 u8 status[0x8];
5200 u8 reserved_0[0x18]; 5200 u8 reserved_at_8[0x18];
5201 5201
5202 u8 syndrome[0x20]; 5202 u8 syndrome[0x20];
5203 5203
5204 u8 reserved_1[0x40]; 5204 u8 reserved_at_40[0x40];
5205}; 5205};
5206 5206
5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { 5207struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
5208 u8 opcode[0x10]; 5208 u8 opcode[0x10];
5209 u8 reserved_0[0x10]; 5209 u8 reserved_at_10[0x10];
5210 5210
5211 u8 reserved_1[0x10]; 5211 u8 reserved_at_20[0x10];
5212 u8 op_mod[0x10]; 5212 u8 op_mod[0x10];
5213 5213
5214 u8 reserved_2[0x20]; 5214 u8 reserved_at_40[0x20];
5215 5215
5216 u8 reserved_3[0x10]; 5216 u8 reserved_at_60[0x10];
5217 u8 vxlan_udp_port[0x10]; 5217 u8 vxlan_udp_port[0x10];
5218}; 5218};
5219 5219
5220struct mlx5_ifc_delete_l2_table_entry_out_bits { 5220struct mlx5_ifc_delete_l2_table_entry_out_bits {
5221 u8 status[0x8]; 5221 u8 status[0x8];
5222 u8 reserved_0[0x18]; 5222 u8 reserved_at_8[0x18];
5223 5223
5224 u8 syndrome[0x20]; 5224 u8 syndrome[0x20];
5225 5225
5226 u8 reserved_1[0x40]; 5226 u8 reserved_at_40[0x40];
5227}; 5227};
5228 5228
5229struct mlx5_ifc_delete_l2_table_entry_in_bits { 5229struct mlx5_ifc_delete_l2_table_entry_in_bits {
5230 u8 opcode[0x10]; 5230 u8 opcode[0x10];
5231 u8 reserved_0[0x10]; 5231 u8 reserved_at_10[0x10];
5232 5232
5233 u8 reserved_1[0x10]; 5233 u8 reserved_at_20[0x10];
5234 u8 op_mod[0x10]; 5234 u8 op_mod[0x10];
5235 5235
5236 u8 reserved_2[0x60]; 5236 u8 reserved_at_40[0x60];
5237 5237
5238 u8 reserved_3[0x8]; 5238 u8 reserved_at_a0[0x8];
5239 u8 table_index[0x18]; 5239 u8 table_index[0x18];
5240 5240
5241 u8 reserved_4[0x140]; 5241 u8 reserved_at_c0[0x140];
5242}; 5242};
5243 5243
5244struct mlx5_ifc_delete_fte_out_bits { 5244struct mlx5_ifc_delete_fte_out_bits {
5245 u8 status[0x8]; 5245 u8 status[0x8];
5246 u8 reserved_0[0x18]; 5246 u8 reserved_at_8[0x18];
5247 5247
5248 u8 syndrome[0x20]; 5248 u8 syndrome[0x20];
5249 5249
5250 u8 reserved_1[0x40]; 5250 u8 reserved_at_40[0x40];
5251}; 5251};
5252 5252
5253struct mlx5_ifc_delete_fte_in_bits { 5253struct mlx5_ifc_delete_fte_in_bits {
5254 u8 opcode[0x10]; 5254 u8 opcode[0x10];
5255 u8 reserved_0[0x10]; 5255 u8 reserved_at_10[0x10];
5256 5256
5257 u8 reserved_1[0x10]; 5257 u8 reserved_at_20[0x10];
5258 u8 op_mod[0x10]; 5258 u8 op_mod[0x10];
5259 5259
5260 u8 reserved_2[0x40]; 5260 u8 reserved_at_40[0x40];
5261 5261
5262 u8 table_type[0x8]; 5262 u8 table_type[0x8];
5263 u8 reserved_3[0x18]; 5263 u8 reserved_at_88[0x18];
5264 5264
5265 u8 reserved_4[0x8]; 5265 u8 reserved_at_a0[0x8];
5266 u8 table_id[0x18]; 5266 u8 table_id[0x18];
5267 5267
5268 u8 reserved_5[0x40]; 5268 u8 reserved_at_c0[0x40];
5269 5269
5270 u8 flow_index[0x20]; 5270 u8 flow_index[0x20];
5271 5271
5272 u8 reserved_6[0xe0]; 5272 u8 reserved_at_120[0xe0];
5273}; 5273};
5274 5274
5275struct mlx5_ifc_dealloc_xrcd_out_bits { 5275struct mlx5_ifc_dealloc_xrcd_out_bits {
5276 u8 status[0x8]; 5276 u8 status[0x8];
5277 u8 reserved_0[0x18]; 5277 u8 reserved_at_8[0x18];
5278 5278
5279 u8 syndrome[0x20]; 5279 u8 syndrome[0x20];
5280 5280
5281 u8 reserved_1[0x40]; 5281 u8 reserved_at_40[0x40];
5282}; 5282};
5283 5283
5284struct mlx5_ifc_dealloc_xrcd_in_bits { 5284struct mlx5_ifc_dealloc_xrcd_in_bits {
5285 u8 opcode[0x10]; 5285 u8 opcode[0x10];
5286 u8 reserved_0[0x10]; 5286 u8 reserved_at_10[0x10];
5287 5287
5288 u8 reserved_1[0x10]; 5288 u8 reserved_at_20[0x10];
5289 u8 op_mod[0x10]; 5289 u8 op_mod[0x10];
5290 5290
5291 u8 reserved_2[0x8]; 5291 u8 reserved_at_40[0x8];
5292 u8 xrcd[0x18]; 5292 u8 xrcd[0x18];
5293 5293
5294 u8 reserved_3[0x20]; 5294 u8 reserved_at_60[0x20];
5295}; 5295};
5296 5296
5297struct mlx5_ifc_dealloc_uar_out_bits { 5297struct mlx5_ifc_dealloc_uar_out_bits {
5298 u8 status[0x8]; 5298 u8 status[0x8];
5299 u8 reserved_0[0x18]; 5299 u8 reserved_at_8[0x18];
5300 5300
5301 u8 syndrome[0x20]; 5301 u8 syndrome[0x20];
5302 5302
5303 u8 reserved_1[0x40]; 5303 u8 reserved_at_40[0x40];
5304}; 5304};
5305 5305
5306struct mlx5_ifc_dealloc_uar_in_bits { 5306struct mlx5_ifc_dealloc_uar_in_bits {
5307 u8 opcode[0x10]; 5307 u8 opcode[0x10];
5308 u8 reserved_0[0x10]; 5308 u8 reserved_at_10[0x10];
5309 5309
5310 u8 reserved_1[0x10]; 5310 u8 reserved_at_20[0x10];
5311 u8 op_mod[0x10]; 5311 u8 op_mod[0x10];
5312 5312
5313 u8 reserved_2[0x8]; 5313 u8 reserved_at_40[0x8];
5314 u8 uar[0x18]; 5314 u8 uar[0x18];
5315 5315
5316 u8 reserved_3[0x20]; 5316 u8 reserved_at_60[0x20];
5317}; 5317};
5318 5318
5319struct mlx5_ifc_dealloc_transport_domain_out_bits { 5319struct mlx5_ifc_dealloc_transport_domain_out_bits {
5320 u8 status[0x8]; 5320 u8 status[0x8];
5321 u8 reserved_0[0x18]; 5321 u8 reserved_at_8[0x18];
5322 5322
5323 u8 syndrome[0x20]; 5323 u8 syndrome[0x20];
5324 5324
5325 u8 reserved_1[0x40]; 5325 u8 reserved_at_40[0x40];
5326}; 5326};
5327 5327
5328struct mlx5_ifc_dealloc_transport_domain_in_bits { 5328struct mlx5_ifc_dealloc_transport_domain_in_bits {
5329 u8 opcode[0x10]; 5329 u8 opcode[0x10];
5330 u8 reserved_0[0x10]; 5330 u8 reserved_at_10[0x10];
5331 5331
5332 u8 reserved_1[0x10]; 5332 u8 reserved_at_20[0x10];
5333 u8 op_mod[0x10]; 5333 u8 op_mod[0x10];
5334 5334
5335 u8 reserved_2[0x8]; 5335 u8 reserved_at_40[0x8];
5336 u8 transport_domain[0x18]; 5336 u8 transport_domain[0x18];
5337 5337
5338 u8 reserved_3[0x20]; 5338 u8 reserved_at_60[0x20];
5339}; 5339};
5340 5340
5341struct mlx5_ifc_dealloc_q_counter_out_bits { 5341struct mlx5_ifc_dealloc_q_counter_out_bits {
5342 u8 status[0x8]; 5342 u8 status[0x8];
5343 u8 reserved_0[0x18]; 5343 u8 reserved_at_8[0x18];
5344 5344
5345 u8 syndrome[0x20]; 5345 u8 syndrome[0x20];
5346 5346
5347 u8 reserved_1[0x40]; 5347 u8 reserved_at_40[0x40];
5348}; 5348};
5349 5349
5350struct mlx5_ifc_dealloc_q_counter_in_bits { 5350struct mlx5_ifc_dealloc_q_counter_in_bits {
5351 u8 opcode[0x10]; 5351 u8 opcode[0x10];
5352 u8 reserved_0[0x10]; 5352 u8 reserved_at_10[0x10];
5353 5353
5354 u8 reserved_1[0x10]; 5354 u8 reserved_at_20[0x10];
5355 u8 op_mod[0x10]; 5355 u8 op_mod[0x10];
5356 5356
5357 u8 reserved_2[0x18]; 5357 u8 reserved_at_40[0x18];
5358 u8 counter_set_id[0x8]; 5358 u8 counter_set_id[0x8];
5359 5359
5360 u8 reserved_3[0x20]; 5360 u8 reserved_at_60[0x20];
5361}; 5361};
5362 5362
5363struct mlx5_ifc_dealloc_pd_out_bits { 5363struct mlx5_ifc_dealloc_pd_out_bits {
5364 u8 status[0x8]; 5364 u8 status[0x8];
5365 u8 reserved_0[0x18]; 5365 u8 reserved_at_8[0x18];
5366 5366
5367 u8 syndrome[0x20]; 5367 u8 syndrome[0x20];
5368 5368
5369 u8 reserved_1[0x40]; 5369 u8 reserved_at_40[0x40];
5370}; 5370};
5371 5371
5372struct mlx5_ifc_dealloc_pd_in_bits { 5372struct mlx5_ifc_dealloc_pd_in_bits {
5373 u8 opcode[0x10]; 5373 u8 opcode[0x10];
5374 u8 reserved_0[0x10]; 5374 u8 reserved_at_10[0x10];
5375 5375
5376 u8 reserved_1[0x10]; 5376 u8 reserved_at_20[0x10];
5377 u8 op_mod[0x10]; 5377 u8 op_mod[0x10];
5378 5378
5379 u8 reserved_2[0x8]; 5379 u8 reserved_at_40[0x8];
5380 u8 pd[0x18]; 5380 u8 pd[0x18];
5381 5381
5382 u8 reserved_3[0x20]; 5382 u8 reserved_at_60[0x20];
5383}; 5383};
5384 5384
5385struct mlx5_ifc_create_xrc_srq_out_bits { 5385struct mlx5_ifc_create_xrc_srq_out_bits {
5386 u8 status[0x8]; 5386 u8 status[0x8];
5387 u8 reserved_0[0x18]; 5387 u8 reserved_at_8[0x18];
5388 5388
5389 u8 syndrome[0x20]; 5389 u8 syndrome[0x20];
5390 5390
5391 u8 reserved_1[0x8]; 5391 u8 reserved_at_40[0x8];
5392 u8 xrc_srqn[0x18]; 5392 u8 xrc_srqn[0x18];
5393 5393
5394 u8 reserved_2[0x20]; 5394 u8 reserved_at_60[0x20];
5395}; 5395};
5396 5396
5397struct mlx5_ifc_create_xrc_srq_in_bits { 5397struct mlx5_ifc_create_xrc_srq_in_bits {
5398 u8 opcode[0x10]; 5398 u8 opcode[0x10];
5399 u8 reserved_0[0x10]; 5399 u8 reserved_at_10[0x10];
5400 5400
5401 u8 reserved_1[0x10]; 5401 u8 reserved_at_20[0x10];
5402 u8 op_mod[0x10]; 5402 u8 op_mod[0x10];
5403 5403
5404 u8 reserved_2[0x40]; 5404 u8 reserved_at_40[0x40];
5405 5405
5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; 5406 struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
5407 5407
5408 u8 reserved_3[0x600]; 5408 u8 reserved_at_280[0x600];
5409 5409
5410 u8 pas[0][0x40]; 5410 u8 pas[0][0x40];
5411}; 5411};
5412 5412
5413struct mlx5_ifc_create_tis_out_bits { 5413struct mlx5_ifc_create_tis_out_bits {
5414 u8 status[0x8]; 5414 u8 status[0x8];
5415 u8 reserved_0[0x18]; 5415 u8 reserved_at_8[0x18];
5416 5416
5417 u8 syndrome[0x20]; 5417 u8 syndrome[0x20];
5418 5418
5419 u8 reserved_1[0x8]; 5419 u8 reserved_at_40[0x8];
5420 u8 tisn[0x18]; 5420 u8 tisn[0x18];
5421 5421
5422 u8 reserved_2[0x20]; 5422 u8 reserved_at_60[0x20];
5423}; 5423};
5424 5424
5425struct mlx5_ifc_create_tis_in_bits { 5425struct mlx5_ifc_create_tis_in_bits {
5426 u8 opcode[0x10]; 5426 u8 opcode[0x10];
5427 u8 reserved_0[0x10]; 5427 u8 reserved_at_10[0x10];
5428 5428
5429 u8 reserved_1[0x10]; 5429 u8 reserved_at_20[0x10];
5430 u8 op_mod[0x10]; 5430 u8 op_mod[0x10];
5431 5431
5432 u8 reserved_2[0xc0]; 5432 u8 reserved_at_40[0xc0];
5433 5433
5434 struct mlx5_ifc_tisc_bits ctx; 5434 struct mlx5_ifc_tisc_bits ctx;
5435}; 5435};
5436 5436
5437struct mlx5_ifc_create_tir_out_bits { 5437struct mlx5_ifc_create_tir_out_bits {
5438 u8 status[0x8]; 5438 u8 status[0x8];
5439 u8 reserved_0[0x18]; 5439 u8 reserved_at_8[0x18];
5440 5440
5441 u8 syndrome[0x20]; 5441 u8 syndrome[0x20];
5442 5442
5443 u8 reserved_1[0x8]; 5443 u8 reserved_at_40[0x8];
5444 u8 tirn[0x18]; 5444 u8 tirn[0x18];
5445 5445
5446 u8 reserved_2[0x20]; 5446 u8 reserved_at_60[0x20];
5447}; 5447};
5448 5448
5449struct mlx5_ifc_create_tir_in_bits { 5449struct mlx5_ifc_create_tir_in_bits {
5450 u8 opcode[0x10]; 5450 u8 opcode[0x10];
5451 u8 reserved_0[0x10]; 5451 u8 reserved_at_10[0x10];
5452 5452
5453 u8 reserved_1[0x10]; 5453 u8 reserved_at_20[0x10];
5454 u8 op_mod[0x10]; 5454 u8 op_mod[0x10];
5455 5455
5456 u8 reserved_2[0xc0]; 5456 u8 reserved_at_40[0xc0];
5457 5457
5458 struct mlx5_ifc_tirc_bits ctx; 5458 struct mlx5_ifc_tirc_bits ctx;
5459}; 5459};
5460 5460
5461struct mlx5_ifc_create_srq_out_bits { 5461struct mlx5_ifc_create_srq_out_bits {
5462 u8 status[0x8]; 5462 u8 status[0x8];
5463 u8 reserved_0[0x18]; 5463 u8 reserved_at_8[0x18];
5464 5464
5465 u8 syndrome[0x20]; 5465 u8 syndrome[0x20];
5466 5466
5467 u8 reserved_1[0x8]; 5467 u8 reserved_at_40[0x8];
5468 u8 srqn[0x18]; 5468 u8 srqn[0x18];
5469 5469
5470 u8 reserved_2[0x20]; 5470 u8 reserved_at_60[0x20];
5471}; 5471};
5472 5472
5473struct mlx5_ifc_create_srq_in_bits { 5473struct mlx5_ifc_create_srq_in_bits {
5474 u8 opcode[0x10]; 5474 u8 opcode[0x10];
5475 u8 reserved_0[0x10]; 5475 u8 reserved_at_10[0x10];
5476 5476
5477 u8 reserved_1[0x10]; 5477 u8 reserved_at_20[0x10];
5478 u8 op_mod[0x10]; 5478 u8 op_mod[0x10];
5479 5479
5480 u8 reserved_2[0x40]; 5480 u8 reserved_at_40[0x40];
5481 5481
5482 struct mlx5_ifc_srqc_bits srq_context_entry; 5482 struct mlx5_ifc_srqc_bits srq_context_entry;
5483 5483
5484 u8 reserved_3[0x600]; 5484 u8 reserved_at_280[0x600];
5485 5485
5486 u8 pas[0][0x40]; 5486 u8 pas[0][0x40];
5487}; 5487};
5488 5488
5489struct mlx5_ifc_create_sq_out_bits { 5489struct mlx5_ifc_create_sq_out_bits {
5490 u8 status[0x8]; 5490 u8 status[0x8];
5491 u8 reserved_0[0x18]; 5491 u8 reserved_at_8[0x18];
5492 5492
5493 u8 syndrome[0x20]; 5493 u8 syndrome[0x20];
5494 5494
5495 u8 reserved_1[0x8]; 5495 u8 reserved_at_40[0x8];
5496 u8 sqn[0x18]; 5496 u8 sqn[0x18];
5497 5497
5498 u8 reserved_2[0x20]; 5498 u8 reserved_at_60[0x20];
5499}; 5499};
5500 5500
5501struct mlx5_ifc_create_sq_in_bits { 5501struct mlx5_ifc_create_sq_in_bits {
5502 u8 opcode[0x10]; 5502 u8 opcode[0x10];
5503 u8 reserved_0[0x10]; 5503 u8 reserved_at_10[0x10];
5504 5504
5505 u8 reserved_1[0x10]; 5505 u8 reserved_at_20[0x10];
5506 u8 op_mod[0x10]; 5506 u8 op_mod[0x10];
5507 5507
5508 u8 reserved_2[0xc0]; 5508 u8 reserved_at_40[0xc0];
5509 5509
5510 struct mlx5_ifc_sqc_bits ctx; 5510 struct mlx5_ifc_sqc_bits ctx;
5511}; 5511};
5512 5512
5513struct mlx5_ifc_create_rqt_out_bits { 5513struct mlx5_ifc_create_rqt_out_bits {
5514 u8 status[0x8]; 5514 u8 status[0x8];
5515 u8 reserved_0[0x18]; 5515 u8 reserved_at_8[0x18];
5516 5516
5517 u8 syndrome[0x20]; 5517 u8 syndrome[0x20];
5518 5518
5519 u8 reserved_1[0x8]; 5519 u8 reserved_at_40[0x8];
5520 u8 rqtn[0x18]; 5520 u8 rqtn[0x18];
5521 5521
5522 u8 reserved_2[0x20]; 5522 u8 reserved_at_60[0x20];
5523}; 5523};
5524 5524
5525struct mlx5_ifc_create_rqt_in_bits { 5525struct mlx5_ifc_create_rqt_in_bits {
5526 u8 opcode[0x10]; 5526 u8 opcode[0x10];
5527 u8 reserved_0[0x10]; 5527 u8 reserved_at_10[0x10];
5528 5528
5529 u8 reserved_1[0x10]; 5529 u8 reserved_at_20[0x10];
5530 u8 op_mod[0x10]; 5530 u8 op_mod[0x10];
5531 5531
5532 u8 reserved_2[0xc0]; 5532 u8 reserved_at_40[0xc0];
5533 5533
5534 struct mlx5_ifc_rqtc_bits rqt_context; 5534 struct mlx5_ifc_rqtc_bits rqt_context;
5535}; 5535};
5536 5536
5537struct mlx5_ifc_create_rq_out_bits { 5537struct mlx5_ifc_create_rq_out_bits {
5538 u8 status[0x8]; 5538 u8 status[0x8];
5539 u8 reserved_0[0x18]; 5539 u8 reserved_at_8[0x18];
5540 5540
5541 u8 syndrome[0x20]; 5541 u8 syndrome[0x20];
5542 5542
5543 u8 reserved_1[0x8]; 5543 u8 reserved_at_40[0x8];
5544 u8 rqn[0x18]; 5544 u8 rqn[0x18];
5545 5545
5546 u8 reserved_2[0x20]; 5546 u8 reserved_at_60[0x20];
5547}; 5547};
5548 5548
5549struct mlx5_ifc_create_rq_in_bits { 5549struct mlx5_ifc_create_rq_in_bits {
5550 u8 opcode[0x10]; 5550 u8 opcode[0x10];
5551 u8 reserved_0[0x10]; 5551 u8 reserved_at_10[0x10];
5552 5552
5553 u8 reserved_1[0x10]; 5553 u8 reserved_at_20[0x10];
5554 u8 op_mod[0x10]; 5554 u8 op_mod[0x10];
5555 5555
5556 u8 reserved_2[0xc0]; 5556 u8 reserved_at_40[0xc0];
5557 5557
5558 struct mlx5_ifc_rqc_bits ctx; 5558 struct mlx5_ifc_rqc_bits ctx;
5559}; 5559};
5560 5560
5561struct mlx5_ifc_create_rmp_out_bits { 5561struct mlx5_ifc_create_rmp_out_bits {
5562 u8 status[0x8]; 5562 u8 status[0x8];
5563 u8 reserved_0[0x18]; 5563 u8 reserved_at_8[0x18];
5564 5564
5565 u8 syndrome[0x20]; 5565 u8 syndrome[0x20];
5566 5566
5567 u8 reserved_1[0x8]; 5567 u8 reserved_at_40[0x8];
5568 u8 rmpn[0x18]; 5568 u8 rmpn[0x18];
5569 5569
5570 u8 reserved_2[0x20]; 5570 u8 reserved_at_60[0x20];
5571}; 5571};
5572 5572
5573struct mlx5_ifc_create_rmp_in_bits { 5573struct mlx5_ifc_create_rmp_in_bits {
5574 u8 opcode[0x10]; 5574 u8 opcode[0x10];
5575 u8 reserved_0[0x10]; 5575 u8 reserved_at_10[0x10];
5576 5576
5577 u8 reserved_1[0x10]; 5577 u8 reserved_at_20[0x10];
5578 u8 op_mod[0x10]; 5578 u8 op_mod[0x10];
5579 5579
5580 u8 reserved_2[0xc0]; 5580 u8 reserved_at_40[0xc0];
5581 5581
5582 struct mlx5_ifc_rmpc_bits ctx; 5582 struct mlx5_ifc_rmpc_bits ctx;
5583}; 5583};
5584 5584
5585struct mlx5_ifc_create_qp_out_bits { 5585struct mlx5_ifc_create_qp_out_bits {
5586 u8 status[0x8]; 5586 u8 status[0x8];
5587 u8 reserved_0[0x18]; 5587 u8 reserved_at_8[0x18];
5588 5588
5589 u8 syndrome[0x20]; 5589 u8 syndrome[0x20];
5590 5590
5591 u8 reserved_1[0x8]; 5591 u8 reserved_at_40[0x8];
5592 u8 qpn[0x18]; 5592 u8 qpn[0x18];
5593 5593
5594 u8 reserved_2[0x20]; 5594 u8 reserved_at_60[0x20];
5595}; 5595};
5596 5596
5597struct mlx5_ifc_create_qp_in_bits { 5597struct mlx5_ifc_create_qp_in_bits {
5598 u8 opcode[0x10]; 5598 u8 opcode[0x10];
5599 u8 reserved_0[0x10]; 5599 u8 reserved_at_10[0x10];
5600 5600
5601 u8 reserved_1[0x10]; 5601 u8 reserved_at_20[0x10];
5602 u8 op_mod[0x10]; 5602 u8 op_mod[0x10];
5603 5603
5604 u8 reserved_2[0x40]; 5604 u8 reserved_at_40[0x40];
5605 5605
5606 u8 opt_param_mask[0x20]; 5606 u8 opt_param_mask[0x20];
5607 5607
5608 u8 reserved_3[0x20]; 5608 u8 reserved_at_a0[0x20];
5609 5609
5610 struct mlx5_ifc_qpc_bits qpc; 5610 struct mlx5_ifc_qpc_bits qpc;
5611 5611
5612 u8 reserved_4[0x80]; 5612 u8 reserved_at_800[0x80];
5613 5613
5614 u8 pas[0][0x40]; 5614 u8 pas[0][0x40];
5615}; 5615};
5616 5616
5617struct mlx5_ifc_create_psv_out_bits { 5617struct mlx5_ifc_create_psv_out_bits {
5618 u8 status[0x8]; 5618 u8 status[0x8];
5619 u8 reserved_0[0x18]; 5619 u8 reserved_at_8[0x18];
5620 5620
5621 u8 syndrome[0x20]; 5621 u8 syndrome[0x20];
5622 5622
5623 u8 reserved_1[0x40]; 5623 u8 reserved_at_40[0x40];
5624 5624
5625 u8 reserved_2[0x8]; 5625 u8 reserved_at_80[0x8];
5626 u8 psv0_index[0x18]; 5626 u8 psv0_index[0x18];
5627 5627
5628 u8 reserved_3[0x8]; 5628 u8 reserved_at_a0[0x8];
5629 u8 psv1_index[0x18]; 5629 u8 psv1_index[0x18];
5630 5630
5631 u8 reserved_4[0x8]; 5631 u8 reserved_at_c0[0x8];
5632 u8 psv2_index[0x18]; 5632 u8 psv2_index[0x18];
5633 5633
5634 u8 reserved_5[0x8]; 5634 u8 reserved_at_e0[0x8];
5635 u8 psv3_index[0x18]; 5635 u8 psv3_index[0x18];
5636}; 5636};
5637 5637
5638struct mlx5_ifc_create_psv_in_bits { 5638struct mlx5_ifc_create_psv_in_bits {
5639 u8 opcode[0x10]; 5639 u8 opcode[0x10];
5640 u8 reserved_0[0x10]; 5640 u8 reserved_at_10[0x10];
5641 5641
5642 u8 reserved_1[0x10]; 5642 u8 reserved_at_20[0x10];
5643 u8 op_mod[0x10]; 5643 u8 op_mod[0x10];
5644 5644
5645 u8 num_psv[0x4]; 5645 u8 num_psv[0x4];
5646 u8 reserved_2[0x4]; 5646 u8 reserved_at_44[0x4];
5647 u8 pd[0x18]; 5647 u8 pd[0x18];
5648 5648
5649 u8 reserved_3[0x20]; 5649 u8 reserved_at_60[0x20];
5650}; 5650};
5651 5651
5652struct mlx5_ifc_create_mkey_out_bits { 5652struct mlx5_ifc_create_mkey_out_bits {
5653 u8 status[0x8]; 5653 u8 status[0x8];
5654 u8 reserved_0[0x18]; 5654 u8 reserved_at_8[0x18];
5655 5655
5656 u8 syndrome[0x20]; 5656 u8 syndrome[0x20];
5657 5657
5658 u8 reserved_1[0x8]; 5658 u8 reserved_at_40[0x8];
5659 u8 mkey_index[0x18]; 5659 u8 mkey_index[0x18];
5660 5660
5661 u8 reserved_2[0x20]; 5661 u8 reserved_at_60[0x20];
5662}; 5662};
5663 5663
5664struct mlx5_ifc_create_mkey_in_bits { 5664struct mlx5_ifc_create_mkey_in_bits {
5665 u8 opcode[0x10]; 5665 u8 opcode[0x10];
5666 u8 reserved_0[0x10]; 5666 u8 reserved_at_10[0x10];
5667 5667
5668 u8 reserved_1[0x10]; 5668 u8 reserved_at_20[0x10];
5669 u8 op_mod[0x10]; 5669 u8 op_mod[0x10];
5670 5670
5671 u8 reserved_2[0x20]; 5671 u8 reserved_at_40[0x20];
5672 5672
5673 u8 pg_access[0x1]; 5673 u8 pg_access[0x1];
5674 u8 reserved_3[0x1f]; 5674 u8 reserved_at_61[0x1f];
5675 5675
5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry; 5676 struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
5677 5677
5678 u8 reserved_4[0x80]; 5678 u8 reserved_at_280[0x80];
5679 5679
5680 u8 translations_octword_actual_size[0x20]; 5680 u8 translations_octword_actual_size[0x20];
5681 5681
5682 u8 reserved_5[0x560]; 5682 u8 reserved_at_320[0x560];
5683 5683
5684 u8 klm_pas_mtt[0][0x20]; 5684 u8 klm_pas_mtt[0][0x20];
5685}; 5685};
5686 5686
5687struct mlx5_ifc_create_flow_table_out_bits { 5687struct mlx5_ifc_create_flow_table_out_bits {
5688 u8 status[0x8]; 5688 u8 status[0x8];
5689 u8 reserved_0[0x18]; 5689 u8 reserved_at_8[0x18];
5690 5690
5691 u8 syndrome[0x20]; 5691 u8 syndrome[0x20];
5692 5692
5693 u8 reserved_1[0x8]; 5693 u8 reserved_at_40[0x8];
5694 u8 table_id[0x18]; 5694 u8 table_id[0x18];
5695 5695
5696 u8 reserved_2[0x20]; 5696 u8 reserved_at_60[0x20];
5697}; 5697};
5698 5698
5699struct mlx5_ifc_create_flow_table_in_bits { 5699struct mlx5_ifc_create_flow_table_in_bits {
5700 u8 opcode[0x10]; 5700 u8 opcode[0x10];
5701 u8 reserved_0[0x10]; 5701 u8 reserved_at_10[0x10];
5702 5702
5703 u8 reserved_1[0x10]; 5703 u8 reserved_at_20[0x10];
5704 u8 op_mod[0x10]; 5704 u8 op_mod[0x10];
5705 5705
5706 u8 reserved_2[0x40]; 5706 u8 reserved_at_40[0x40];
5707 5707
5708 u8 table_type[0x8]; 5708 u8 table_type[0x8];
5709 u8 reserved_3[0x18]; 5709 u8 reserved_at_88[0x18];
5710 5710
5711 u8 reserved_4[0x20]; 5711 u8 reserved_at_a0[0x20];
5712 5712
5713 u8 reserved_5[0x4]; 5713 u8 reserved_at_c0[0x4];
5714 u8 table_miss_mode[0x4]; 5714 u8 table_miss_mode[0x4];
5715 u8 level[0x8]; 5715 u8 level[0x8];
5716 u8 reserved_6[0x8]; 5716 u8 reserved_at_d0[0x8];
5717 u8 log_size[0x8]; 5717 u8 log_size[0x8];
5718 5718
5719 u8 reserved_7[0x8]; 5719 u8 reserved_at_e0[0x8];
5720 u8 table_miss_id[0x18]; 5720 u8 table_miss_id[0x18];
5721 5721
5722 u8 reserved_8[0x100]; 5722 u8 reserved_at_100[0x100];
5723}; 5723};
5724 5724
5725struct mlx5_ifc_create_flow_group_out_bits { 5725struct mlx5_ifc_create_flow_group_out_bits {
5726 u8 status[0x8]; 5726 u8 status[0x8];
5727 u8 reserved_0[0x18]; 5727 u8 reserved_at_8[0x18];
5728 5728
5729 u8 syndrome[0x20]; 5729 u8 syndrome[0x20];
5730 5730
5731 u8 reserved_1[0x8]; 5731 u8 reserved_at_40[0x8];
5732 u8 group_id[0x18]; 5732 u8 group_id[0x18];
5733 5733
5734 u8 reserved_2[0x20]; 5734 u8 reserved_at_60[0x20];
5735}; 5735};
5736 5736
5737enum { 5737enum {
@@ -5742,134 +5742,134 @@ enum {
5742 5742
5743struct mlx5_ifc_create_flow_group_in_bits { 5743struct mlx5_ifc_create_flow_group_in_bits {
5744 u8 opcode[0x10]; 5744 u8 opcode[0x10];
5745 u8 reserved_0[0x10]; 5745 u8 reserved_at_10[0x10];
5746 5746
5747 u8 reserved_1[0x10]; 5747 u8 reserved_at_20[0x10];
5748 u8 op_mod[0x10]; 5748 u8 op_mod[0x10];
5749 5749
5750 u8 reserved_2[0x40]; 5750 u8 reserved_at_40[0x40];
5751 5751
5752 u8 table_type[0x8]; 5752 u8 table_type[0x8];
5753 u8 reserved_3[0x18]; 5753 u8 reserved_at_88[0x18];
5754 5754
5755 u8 reserved_4[0x8]; 5755 u8 reserved_at_a0[0x8];
5756 u8 table_id[0x18]; 5756 u8 table_id[0x18];
5757 5757
5758 u8 reserved_5[0x20]; 5758 u8 reserved_at_c0[0x20];
5759 5759
5760 u8 start_flow_index[0x20]; 5760 u8 start_flow_index[0x20];
5761 5761
5762 u8 reserved_6[0x20]; 5762 u8 reserved_at_100[0x20];
5763 5763
5764 u8 end_flow_index[0x20]; 5764 u8 end_flow_index[0x20];
5765 5765
5766 u8 reserved_7[0xa0]; 5766 u8 reserved_at_140[0xa0];
5767 5767
5768 u8 reserved_8[0x18]; 5768 u8 reserved_at_1e0[0x18];
5769 u8 match_criteria_enable[0x8]; 5769 u8 match_criteria_enable[0x8];
5770 5770
5771 struct mlx5_ifc_fte_match_param_bits match_criteria; 5771 struct mlx5_ifc_fte_match_param_bits match_criteria;
5772 5772
5773 u8 reserved_9[0xe00]; 5773 u8 reserved_at_1200[0xe00];
5774}; 5774};
5775 5775
5776struct mlx5_ifc_create_eq_out_bits { 5776struct mlx5_ifc_create_eq_out_bits {
5777 u8 status[0x8]; 5777 u8 status[0x8];
5778 u8 reserved_0[0x18]; 5778 u8 reserved_at_8[0x18];
5779 5779
5780 u8 syndrome[0x20]; 5780 u8 syndrome[0x20];
5781 5781
5782 u8 reserved_1[0x18]; 5782 u8 reserved_at_40[0x18];
5783 u8 eq_number[0x8]; 5783 u8 eq_number[0x8];
5784 5784
5785 u8 reserved_2[0x20]; 5785 u8 reserved_at_60[0x20];
5786}; 5786};
5787 5787
5788struct mlx5_ifc_create_eq_in_bits { 5788struct mlx5_ifc_create_eq_in_bits {
5789 u8 opcode[0x10]; 5789 u8 opcode[0x10];
5790 u8 reserved_0[0x10]; 5790 u8 reserved_at_10[0x10];
5791 5791
5792 u8 reserved_1[0x10]; 5792 u8 reserved_at_20[0x10];
5793 u8 op_mod[0x10]; 5793 u8 op_mod[0x10];
5794 5794
5795 u8 reserved_2[0x40]; 5795 u8 reserved_at_40[0x40];
5796 5796
5797 struct mlx5_ifc_eqc_bits eq_context_entry; 5797 struct mlx5_ifc_eqc_bits eq_context_entry;
5798 5798
5799 u8 reserved_3[0x40]; 5799 u8 reserved_at_280[0x40];
5800 5800
5801 u8 event_bitmask[0x40]; 5801 u8 event_bitmask[0x40];
5802 5802
5803 u8 reserved_4[0x580]; 5803 u8 reserved_at_300[0x580];
5804 5804
5805 u8 pas[0][0x40]; 5805 u8 pas[0][0x40];
5806}; 5806};
5807 5807
5808struct mlx5_ifc_create_dct_out_bits { 5808struct mlx5_ifc_create_dct_out_bits {
5809 u8 status[0x8]; 5809 u8 status[0x8];
5810 u8 reserved_0[0x18]; 5810 u8 reserved_at_8[0x18];
5811 5811
5812 u8 syndrome[0x20]; 5812 u8 syndrome[0x20];
5813 5813
5814 u8 reserved_1[0x8]; 5814 u8 reserved_at_40[0x8];
5815 u8 dctn[0x18]; 5815 u8 dctn[0x18];
5816 5816
5817 u8 reserved_2[0x20]; 5817 u8 reserved_at_60[0x20];
5818}; 5818};
5819 5819
5820struct mlx5_ifc_create_dct_in_bits { 5820struct mlx5_ifc_create_dct_in_bits {
5821 u8 opcode[0x10]; 5821 u8 opcode[0x10];
5822 u8 reserved_0[0x10]; 5822 u8 reserved_at_10[0x10];
5823 5823
5824 u8 reserved_1[0x10]; 5824 u8 reserved_at_20[0x10];
5825 u8 op_mod[0x10]; 5825 u8 op_mod[0x10];
5826 5826
5827 u8 reserved_2[0x40]; 5827 u8 reserved_at_40[0x40];
5828 5828
5829 struct mlx5_ifc_dctc_bits dct_context_entry; 5829 struct mlx5_ifc_dctc_bits dct_context_entry;
5830 5830
5831 u8 reserved_3[0x180]; 5831 u8 reserved_at_280[0x180];
5832}; 5832};
5833 5833
5834struct mlx5_ifc_create_cq_out_bits { 5834struct mlx5_ifc_create_cq_out_bits {
5835 u8 status[0x8]; 5835 u8 status[0x8];
5836 u8 reserved_0[0x18]; 5836 u8 reserved_at_8[0x18];
5837 5837
5838 u8 syndrome[0x20]; 5838 u8 syndrome[0x20];
5839 5839
5840 u8 reserved_1[0x8]; 5840 u8 reserved_at_40[0x8];
5841 u8 cqn[0x18]; 5841 u8 cqn[0x18];
5842 5842
5843 u8 reserved_2[0x20]; 5843 u8 reserved_at_60[0x20];
5844}; 5844};
5845 5845
5846struct mlx5_ifc_create_cq_in_bits { 5846struct mlx5_ifc_create_cq_in_bits {
5847 u8 opcode[0x10]; 5847 u8 opcode[0x10];
5848 u8 reserved_0[0x10]; 5848 u8 reserved_at_10[0x10];
5849 5849
5850 u8 reserved_1[0x10]; 5850 u8 reserved_at_20[0x10];
5851 u8 op_mod[0x10]; 5851 u8 op_mod[0x10];
5852 5852
5853 u8 reserved_2[0x40]; 5853 u8 reserved_at_40[0x40];
5854 5854
5855 struct mlx5_ifc_cqc_bits cq_context; 5855 struct mlx5_ifc_cqc_bits cq_context;
5856 5856
5857 u8 reserved_3[0x600]; 5857 u8 reserved_at_280[0x600];
5858 5858
5859 u8 pas[0][0x40]; 5859 u8 pas[0][0x40];
5860}; 5860};
5861 5861
5862struct mlx5_ifc_config_int_moderation_out_bits { 5862struct mlx5_ifc_config_int_moderation_out_bits {
5863 u8 status[0x8]; 5863 u8 status[0x8];
5864 u8 reserved_0[0x18]; 5864 u8 reserved_at_8[0x18];
5865 5865
5866 u8 syndrome[0x20]; 5866 u8 syndrome[0x20];
5867 5867
5868 u8 reserved_1[0x4]; 5868 u8 reserved_at_40[0x4];
5869 u8 min_delay[0xc]; 5869 u8 min_delay[0xc];
5870 u8 int_vector[0x10]; 5870 u8 int_vector[0x10];
5871 5871
5872 u8 reserved_2[0x20]; 5872 u8 reserved_at_60[0x20];
5873}; 5873};
5874 5874
5875enum { 5875enum {
@@ -5879,49 +5879,49 @@ enum {
5879 5879
5880struct mlx5_ifc_config_int_moderation_in_bits { 5880struct mlx5_ifc_config_int_moderation_in_bits {
5881 u8 opcode[0x10]; 5881 u8 opcode[0x10];
5882 u8 reserved_0[0x10]; 5882 u8 reserved_at_10[0x10];
5883 5883
5884 u8 reserved_1[0x10]; 5884 u8 reserved_at_20[0x10];
5885 u8 op_mod[0x10]; 5885 u8 op_mod[0x10];
5886 5886
5887 u8 reserved_2[0x4]; 5887 u8 reserved_at_40[0x4];
5888 u8 min_delay[0xc]; 5888 u8 min_delay[0xc];
5889 u8 int_vector[0x10]; 5889 u8 int_vector[0x10];
5890 5890
5891 u8 reserved_3[0x20]; 5891 u8 reserved_at_60[0x20];
5892}; 5892};
5893 5893
5894struct mlx5_ifc_attach_to_mcg_out_bits { 5894struct mlx5_ifc_attach_to_mcg_out_bits {
5895 u8 status[0x8]; 5895 u8 status[0x8];
5896 u8 reserved_0[0x18]; 5896 u8 reserved_at_8[0x18];
5897 5897
5898 u8 syndrome[0x20]; 5898 u8 syndrome[0x20];
5899 5899
5900 u8 reserved_1[0x40]; 5900 u8 reserved_at_40[0x40];
5901}; 5901};
5902 5902
5903struct mlx5_ifc_attach_to_mcg_in_bits { 5903struct mlx5_ifc_attach_to_mcg_in_bits {
5904 u8 opcode[0x10]; 5904 u8 opcode[0x10];
5905 u8 reserved_0[0x10]; 5905 u8 reserved_at_10[0x10];
5906 5906
5907 u8 reserved_1[0x10]; 5907 u8 reserved_at_20[0x10];
5908 u8 op_mod[0x10]; 5908 u8 op_mod[0x10];
5909 5909
5910 u8 reserved_2[0x8]; 5910 u8 reserved_at_40[0x8];
5911 u8 qpn[0x18]; 5911 u8 qpn[0x18];
5912 5912
5913 u8 reserved_3[0x20]; 5913 u8 reserved_at_60[0x20];
5914 5914
5915 u8 multicast_gid[16][0x8]; 5915 u8 multicast_gid[16][0x8];
5916}; 5916};
5917 5917
5918struct mlx5_ifc_arm_xrc_srq_out_bits { 5918struct mlx5_ifc_arm_xrc_srq_out_bits {
5919 u8 status[0x8]; 5919 u8 status[0x8];
5920 u8 reserved_0[0x18]; 5920 u8 reserved_at_8[0x18];
5921 5921
5922 u8 syndrome[0x20]; 5922 u8 syndrome[0x20];
5923 5923
5924 u8 reserved_1[0x40]; 5924 u8 reserved_at_40[0x40];
5925}; 5925};
5926 5926
5927enum { 5927enum {
@@ -5930,25 +5930,25 @@ enum {
5930 5930
5931struct mlx5_ifc_arm_xrc_srq_in_bits { 5931struct mlx5_ifc_arm_xrc_srq_in_bits {
5932 u8 opcode[0x10]; 5932 u8 opcode[0x10];
5933 u8 reserved_0[0x10]; 5933 u8 reserved_at_10[0x10];
5934 5934
5935 u8 reserved_1[0x10]; 5935 u8 reserved_at_20[0x10];
5936 u8 op_mod[0x10]; 5936 u8 op_mod[0x10];
5937 5937
5938 u8 reserved_2[0x8]; 5938 u8 reserved_at_40[0x8];
5939 u8 xrc_srqn[0x18]; 5939 u8 xrc_srqn[0x18];
5940 5940
5941 u8 reserved_3[0x10]; 5941 u8 reserved_at_60[0x10];
5942 u8 lwm[0x10]; 5942 u8 lwm[0x10];
5943}; 5943};
5944 5944
5945struct mlx5_ifc_arm_rq_out_bits { 5945struct mlx5_ifc_arm_rq_out_bits {
5946 u8 status[0x8]; 5946 u8 status[0x8];
5947 u8 reserved_0[0x18]; 5947 u8 reserved_at_8[0x18];
5948 5948
5949 u8 syndrome[0x20]; 5949 u8 syndrome[0x20];
5950 5950
5951 u8 reserved_1[0x40]; 5951 u8 reserved_at_40[0x40];
5952}; 5952};
5953 5953
5954enum { 5954enum {
@@ -5957,179 +5957,179 @@ enum {
5957 5957
5958struct mlx5_ifc_arm_rq_in_bits { 5958struct mlx5_ifc_arm_rq_in_bits {
5959 u8 opcode[0x10]; 5959 u8 opcode[0x10];
5960 u8 reserved_0[0x10]; 5960 u8 reserved_at_10[0x10];
5961 5961
5962 u8 reserved_1[0x10]; 5962 u8 reserved_at_20[0x10];
5963 u8 op_mod[0x10]; 5963 u8 op_mod[0x10];
5964 5964
5965 u8 reserved_2[0x8]; 5965 u8 reserved_at_40[0x8];
5966 u8 srq_number[0x18]; 5966 u8 srq_number[0x18];
5967 5967
5968 u8 reserved_3[0x10]; 5968 u8 reserved_at_60[0x10];
5969 u8 lwm[0x10]; 5969 u8 lwm[0x10];
5970}; 5970};
5971 5971
5972struct mlx5_ifc_arm_dct_out_bits { 5972struct mlx5_ifc_arm_dct_out_bits {
5973 u8 status[0x8]; 5973 u8 status[0x8];
5974 u8 reserved_0[0x18]; 5974 u8 reserved_at_8[0x18];
5975 5975
5976 u8 syndrome[0x20]; 5976 u8 syndrome[0x20];
5977 5977
5978 u8 reserved_1[0x40]; 5978 u8 reserved_at_40[0x40];
5979}; 5979};
5980 5980
5981struct mlx5_ifc_arm_dct_in_bits { 5981struct mlx5_ifc_arm_dct_in_bits {
5982 u8 opcode[0x10]; 5982 u8 opcode[0x10];
5983 u8 reserved_0[0x10]; 5983 u8 reserved_at_10[0x10];
5984 5984
5985 u8 reserved_1[0x10]; 5985 u8 reserved_at_20[0x10];
5986 u8 op_mod[0x10]; 5986 u8 op_mod[0x10];
5987 5987
5988 u8 reserved_2[0x8]; 5988 u8 reserved_at_40[0x8];
5989 u8 dct_number[0x18]; 5989 u8 dct_number[0x18];
5990 5990
5991 u8 reserved_3[0x20]; 5991 u8 reserved_at_60[0x20];
5992}; 5992};
5993 5993
5994struct mlx5_ifc_alloc_xrcd_out_bits { 5994struct mlx5_ifc_alloc_xrcd_out_bits {
5995 u8 status[0x8]; 5995 u8 status[0x8];
5996 u8 reserved_0[0x18]; 5996 u8 reserved_at_8[0x18];
5997 5997
5998 u8 syndrome[0x20]; 5998 u8 syndrome[0x20];
5999 5999
6000 u8 reserved_1[0x8]; 6000 u8 reserved_at_40[0x8];
6001 u8 xrcd[0x18]; 6001 u8 xrcd[0x18];
6002 6002
6003 u8 reserved_2[0x20]; 6003 u8 reserved_at_60[0x20];
6004}; 6004};
6005 6005
6006struct mlx5_ifc_alloc_xrcd_in_bits { 6006struct mlx5_ifc_alloc_xrcd_in_bits {
6007 u8 opcode[0x10]; 6007 u8 opcode[0x10];
6008 u8 reserved_0[0x10]; 6008 u8 reserved_at_10[0x10];
6009 6009
6010 u8 reserved_1[0x10]; 6010 u8 reserved_at_20[0x10];
6011 u8 op_mod[0x10]; 6011 u8 op_mod[0x10];
6012 6012
6013 u8 reserved_2[0x40]; 6013 u8 reserved_at_40[0x40];
6014}; 6014};
6015 6015
6016struct mlx5_ifc_alloc_uar_out_bits { 6016struct mlx5_ifc_alloc_uar_out_bits {
6017 u8 status[0x8]; 6017 u8 status[0x8];
6018 u8 reserved_0[0x18]; 6018 u8 reserved_at_8[0x18];
6019 6019
6020 u8 syndrome[0x20]; 6020 u8 syndrome[0x20];
6021 6021
6022 u8 reserved_1[0x8]; 6022 u8 reserved_at_40[0x8];
6023 u8 uar[0x18]; 6023 u8 uar[0x18];
6024 6024
6025 u8 reserved_2[0x20]; 6025 u8 reserved_at_60[0x20];
6026}; 6026};
6027 6027
6028struct mlx5_ifc_alloc_uar_in_bits { 6028struct mlx5_ifc_alloc_uar_in_bits {
6029 u8 opcode[0x10]; 6029 u8 opcode[0x10];
6030 u8 reserved_0[0x10]; 6030 u8 reserved_at_10[0x10];
6031 6031
6032 u8 reserved_1[0x10]; 6032 u8 reserved_at_20[0x10];
6033 u8 op_mod[0x10]; 6033 u8 op_mod[0x10];
6034 6034
6035 u8 reserved_2[0x40]; 6035 u8 reserved_at_40[0x40];
6036}; 6036};
6037 6037
6038struct mlx5_ifc_alloc_transport_domain_out_bits { 6038struct mlx5_ifc_alloc_transport_domain_out_bits {
6039 u8 status[0x8]; 6039 u8 status[0x8];
6040 u8 reserved_0[0x18]; 6040 u8 reserved_at_8[0x18];
6041 6041
6042 u8 syndrome[0x20]; 6042 u8 syndrome[0x20];
6043 6043
6044 u8 reserved_1[0x8]; 6044 u8 reserved_at_40[0x8];
6045 u8 transport_domain[0x18]; 6045 u8 transport_domain[0x18];
6046 6046
6047 u8 reserved_2[0x20]; 6047 u8 reserved_at_60[0x20];
6048}; 6048};
6049 6049
6050struct mlx5_ifc_alloc_transport_domain_in_bits { 6050struct mlx5_ifc_alloc_transport_domain_in_bits {
6051 u8 opcode[0x10]; 6051 u8 opcode[0x10];
6052 u8 reserved_0[0x10]; 6052 u8 reserved_at_10[0x10];
6053 6053
6054 u8 reserved_1[0x10]; 6054 u8 reserved_at_20[0x10];
6055 u8 op_mod[0x10]; 6055 u8 op_mod[0x10];
6056 6056
6057 u8 reserved_2[0x40]; 6057 u8 reserved_at_40[0x40];
6058}; 6058};
6059 6059
6060struct mlx5_ifc_alloc_q_counter_out_bits { 6060struct mlx5_ifc_alloc_q_counter_out_bits {
6061 u8 status[0x8]; 6061 u8 status[0x8];
6062 u8 reserved_0[0x18]; 6062 u8 reserved_at_8[0x18];
6063 6063
6064 u8 syndrome[0x20]; 6064 u8 syndrome[0x20];
6065 6065
6066 u8 reserved_1[0x18]; 6066 u8 reserved_at_40[0x18];
6067 u8 counter_set_id[0x8]; 6067 u8 counter_set_id[0x8];
6068 6068
6069 u8 reserved_2[0x20]; 6069 u8 reserved_at_60[0x20];
6070}; 6070};
6071 6071
6072struct mlx5_ifc_alloc_q_counter_in_bits { 6072struct mlx5_ifc_alloc_q_counter_in_bits {
6073 u8 opcode[0x10]; 6073 u8 opcode[0x10];
6074 u8 reserved_0[0x10]; 6074 u8 reserved_at_10[0x10];
6075 6075
6076 u8 reserved_1[0x10]; 6076 u8 reserved_at_20[0x10];
6077 u8 op_mod[0x10]; 6077 u8 op_mod[0x10];
6078 6078
6079 u8 reserved_2[0x40]; 6079 u8 reserved_at_40[0x40];
6080}; 6080};
6081 6081
6082struct mlx5_ifc_alloc_pd_out_bits { 6082struct mlx5_ifc_alloc_pd_out_bits {
6083 u8 status[0x8]; 6083 u8 status[0x8];
6084 u8 reserved_0[0x18]; 6084 u8 reserved_at_8[0x18];
6085 6085
6086 u8 syndrome[0x20]; 6086 u8 syndrome[0x20];
6087 6087
6088 u8 reserved_1[0x8]; 6088 u8 reserved_at_40[0x8];
6089 u8 pd[0x18]; 6089 u8 pd[0x18];
6090 6090
6091 u8 reserved_2[0x20]; 6091 u8 reserved_at_60[0x20];
6092}; 6092};
6093 6093
6094struct mlx5_ifc_alloc_pd_in_bits { 6094struct mlx5_ifc_alloc_pd_in_bits {
6095 u8 opcode[0x10]; 6095 u8 opcode[0x10];
6096 u8 reserved_0[0x10]; 6096 u8 reserved_at_10[0x10];
6097 6097
6098 u8 reserved_1[0x10]; 6098 u8 reserved_at_20[0x10];
6099 u8 op_mod[0x10]; 6099 u8 op_mod[0x10];
6100 6100
6101 u8 reserved_2[0x40]; 6101 u8 reserved_at_40[0x40];
6102}; 6102};
6103 6103
6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits { 6104struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
6105 u8 status[0x8]; 6105 u8 status[0x8];
6106 u8 reserved_0[0x18]; 6106 u8 reserved_at_8[0x18];
6107 6107
6108 u8 syndrome[0x20]; 6108 u8 syndrome[0x20];
6109 6109
6110 u8 reserved_1[0x40]; 6110 u8 reserved_at_40[0x40];
6111}; 6111};
6112 6112
6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits { 6113struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
6114 u8 opcode[0x10]; 6114 u8 opcode[0x10];
6115 u8 reserved_0[0x10]; 6115 u8 reserved_at_10[0x10];
6116 6116
6117 u8 reserved_1[0x10]; 6117 u8 reserved_at_20[0x10];
6118 u8 op_mod[0x10]; 6118 u8 op_mod[0x10];
6119 6119
6120 u8 reserved_2[0x20]; 6120 u8 reserved_at_40[0x20];
6121 6121
6122 u8 reserved_3[0x10]; 6122 u8 reserved_at_60[0x10];
6123 u8 vxlan_udp_port[0x10]; 6123 u8 vxlan_udp_port[0x10];
6124}; 6124};
6125 6125
6126struct mlx5_ifc_access_register_out_bits { 6126struct mlx5_ifc_access_register_out_bits {
6127 u8 status[0x8]; 6127 u8 status[0x8];
6128 u8 reserved_0[0x18]; 6128 u8 reserved_at_8[0x18];
6129 6129
6130 u8 syndrome[0x20]; 6130 u8 syndrome[0x20];
6131 6131
6132 u8 reserved_1[0x40]; 6132 u8 reserved_at_40[0x40];
6133 6133
6134 u8 register_data[0][0x20]; 6134 u8 register_data[0][0x20];
6135}; 6135};
@@ -6141,12 +6141,12 @@ enum {
6141 6141
6142struct mlx5_ifc_access_register_in_bits { 6142struct mlx5_ifc_access_register_in_bits {
6143 u8 opcode[0x10]; 6143 u8 opcode[0x10];
6144 u8 reserved_0[0x10]; 6144 u8 reserved_at_10[0x10];
6145 6145
6146 u8 reserved_1[0x10]; 6146 u8 reserved_at_20[0x10];
6147 u8 op_mod[0x10]; 6147 u8 op_mod[0x10];
6148 6148
6149 u8 reserved_2[0x10]; 6149 u8 reserved_at_40[0x10];
6150 u8 register_id[0x10]; 6150 u8 register_id[0x10];
6151 6151
6152 u8 argument[0x20]; 6152 u8 argument[0x20];
@@ -6159,24 +6159,24 @@ struct mlx5_ifc_sltp_reg_bits {
6159 u8 version[0x4]; 6159 u8 version[0x4];
6160 u8 local_port[0x8]; 6160 u8 local_port[0x8];
6161 u8 pnat[0x2]; 6161 u8 pnat[0x2];
6162 u8 reserved_0[0x2]; 6162 u8 reserved_at_12[0x2];
6163 u8 lane[0x4]; 6163 u8 lane[0x4];
6164 u8 reserved_1[0x8]; 6164 u8 reserved_at_18[0x8];
6165 6165
6166 u8 reserved_2[0x20]; 6166 u8 reserved_at_20[0x20];
6167 6167
6168 u8 reserved_3[0x7]; 6168 u8 reserved_at_40[0x7];
6169 u8 polarity[0x1]; 6169 u8 polarity[0x1];
6170 u8 ob_tap0[0x8]; 6170 u8 ob_tap0[0x8];
6171 u8 ob_tap1[0x8]; 6171 u8 ob_tap1[0x8];
6172 u8 ob_tap2[0x8]; 6172 u8 ob_tap2[0x8];
6173 6173
6174 u8 reserved_4[0xc]; 6174 u8 reserved_at_60[0xc];
6175 u8 ob_preemp_mode[0x4]; 6175 u8 ob_preemp_mode[0x4];
6176 u8 ob_reg[0x8]; 6176 u8 ob_reg[0x8];
6177 u8 ob_bias[0x8]; 6177 u8 ob_bias[0x8];
6178 6178
6179 u8 reserved_5[0x20]; 6179 u8 reserved_at_80[0x20];
6180}; 6180};
6181 6181
6182struct mlx5_ifc_slrg_reg_bits { 6182struct mlx5_ifc_slrg_reg_bits {
@@ -6184,36 +6184,36 @@ struct mlx5_ifc_slrg_reg_bits {
6184 u8 version[0x4]; 6184 u8 version[0x4];
6185 u8 local_port[0x8]; 6185 u8 local_port[0x8];
6186 u8 pnat[0x2]; 6186 u8 pnat[0x2];
6187 u8 reserved_0[0x2]; 6187 u8 reserved_at_12[0x2];
6188 u8 lane[0x4]; 6188 u8 lane[0x4];
6189 u8 reserved_1[0x8]; 6189 u8 reserved_at_18[0x8];
6190 6190
6191 u8 time_to_link_up[0x10]; 6191 u8 time_to_link_up[0x10];
6192 u8 reserved_2[0xc]; 6192 u8 reserved_at_30[0xc];
6193 u8 grade_lane_speed[0x4]; 6193 u8 grade_lane_speed[0x4];
6194 6194
6195 u8 grade_version[0x8]; 6195 u8 grade_version[0x8];
6196 u8 grade[0x18]; 6196 u8 grade[0x18];
6197 6197
6198 u8 reserved_3[0x4]; 6198 u8 reserved_at_60[0x4];
6199 u8 height_grade_type[0x4]; 6199 u8 height_grade_type[0x4];
6200 u8 height_grade[0x18]; 6200 u8 height_grade[0x18];
6201 6201
6202 u8 height_dz[0x10]; 6202 u8 height_dz[0x10];
6203 u8 height_dv[0x10]; 6203 u8 height_dv[0x10];
6204 6204
6205 u8 reserved_4[0x10]; 6205 u8 reserved_at_a0[0x10];
6206 u8 height_sigma[0x10]; 6206 u8 height_sigma[0x10];
6207 6207
6208 u8 reserved_5[0x20]; 6208 u8 reserved_at_c0[0x20];
6209 6209
6210 u8 reserved_6[0x4]; 6210 u8 reserved_at_e0[0x4];
6211 u8 phase_grade_type[0x4]; 6211 u8 phase_grade_type[0x4];
6212 u8 phase_grade[0x18]; 6212 u8 phase_grade[0x18];
6213 6213
6214 u8 reserved_7[0x8]; 6214 u8 reserved_at_100[0x8];
6215 u8 phase_eo_pos[0x8]; 6215 u8 phase_eo_pos[0x8];
6216 u8 reserved_8[0x8]; 6216 u8 reserved_at_110[0x8];
6217 u8 phase_eo_neg[0x8]; 6217 u8 phase_eo_neg[0x8];
6218 6218
6219 u8 ffe_set_tested[0x10]; 6219 u8 ffe_set_tested[0x10];
@@ -6221,70 +6221,70 @@ struct mlx5_ifc_slrg_reg_bits {
6221}; 6221};
6222 6222
6223struct mlx5_ifc_pvlc_reg_bits { 6223struct mlx5_ifc_pvlc_reg_bits {
6224 u8 reserved_0[0x8]; 6224 u8 reserved_at_0[0x8];
6225 u8 local_port[0x8]; 6225 u8 local_port[0x8];
6226 u8 reserved_1[0x10]; 6226 u8 reserved_at_10[0x10];
6227 6227
6228 u8 reserved_2[0x1c]; 6228 u8 reserved_at_20[0x1c];
6229 u8 vl_hw_cap[0x4]; 6229 u8 vl_hw_cap[0x4];
6230 6230
6231 u8 reserved_3[0x1c]; 6231 u8 reserved_at_40[0x1c];
6232 u8 vl_admin[0x4]; 6232 u8 vl_admin[0x4];
6233 6233
6234 u8 reserved_4[0x1c]; 6234 u8 reserved_at_60[0x1c];
6235 u8 vl_operational[0x4]; 6235 u8 vl_operational[0x4];
6236}; 6236};
6237 6237
6238struct mlx5_ifc_pude_reg_bits { 6238struct mlx5_ifc_pude_reg_bits {
6239 u8 swid[0x8]; 6239 u8 swid[0x8];
6240 u8 local_port[0x8]; 6240 u8 local_port[0x8];
6241 u8 reserved_0[0x4]; 6241 u8 reserved_at_10[0x4];
6242 u8 admin_status[0x4]; 6242 u8 admin_status[0x4];
6243 u8 reserved_1[0x4]; 6243 u8 reserved_at_18[0x4];
6244 u8 oper_status[0x4]; 6244 u8 oper_status[0x4];
6245 6245
6246 u8 reserved_2[0x60]; 6246 u8 reserved_at_20[0x60];
6247}; 6247};
6248 6248
6249struct mlx5_ifc_ptys_reg_bits { 6249struct mlx5_ifc_ptys_reg_bits {
6250 u8 reserved_0[0x8]; 6250 u8 reserved_at_0[0x8];
6251 u8 local_port[0x8]; 6251 u8 local_port[0x8];
6252 u8 reserved_1[0xd]; 6252 u8 reserved_at_10[0xd];
6253 u8 proto_mask[0x3]; 6253 u8 proto_mask[0x3];
6254 6254
6255 u8 reserved_2[0x40]; 6255 u8 reserved_at_20[0x40];
6256 6256
6257 u8 eth_proto_capability[0x20]; 6257 u8 eth_proto_capability[0x20];
6258 6258
6259 u8 ib_link_width_capability[0x10]; 6259 u8 ib_link_width_capability[0x10];
6260 u8 ib_proto_capability[0x10]; 6260 u8 ib_proto_capability[0x10];
6261 6261
6262 u8 reserved_3[0x20]; 6262 u8 reserved_at_a0[0x20];
6263 6263
6264 u8 eth_proto_admin[0x20]; 6264 u8 eth_proto_admin[0x20];
6265 6265
6266 u8 ib_link_width_admin[0x10]; 6266 u8 ib_link_width_admin[0x10];
6267 u8 ib_proto_admin[0x10]; 6267 u8 ib_proto_admin[0x10];
6268 6268
6269 u8 reserved_4[0x20]; 6269 u8 reserved_at_100[0x20];
6270 6270
6271 u8 eth_proto_oper[0x20]; 6271 u8 eth_proto_oper[0x20];
6272 6272
6273 u8 ib_link_width_oper[0x10]; 6273 u8 ib_link_width_oper[0x10];
6274 u8 ib_proto_oper[0x10]; 6274 u8 ib_proto_oper[0x10];
6275 6275
6276 u8 reserved_5[0x20]; 6276 u8 reserved_at_160[0x20];
6277 6277
6278 u8 eth_proto_lp_advertise[0x20]; 6278 u8 eth_proto_lp_advertise[0x20];
6279 6279
6280 u8 reserved_6[0x60]; 6280 u8 reserved_at_1a0[0x60];
6281}; 6281};
6282 6282
6283struct mlx5_ifc_ptas_reg_bits { 6283struct mlx5_ifc_ptas_reg_bits {
6284 u8 reserved_0[0x20]; 6284 u8 reserved_at_0[0x20];
6285 6285
6286 u8 algorithm_options[0x10]; 6286 u8 algorithm_options[0x10];
6287 u8 reserved_1[0x4]; 6287 u8 reserved_at_30[0x4];
6288 u8 repetitions_mode[0x4]; 6288 u8 repetitions_mode[0x4];
6289 u8 num_of_repetitions[0x8]; 6289 u8 num_of_repetitions[0x8];
6290 6290
@@ -6310,13 +6310,13 @@ struct mlx5_ifc_ptas_reg_bits {
6310 u8 ndeo_error_threshold[0x10]; 6310 u8 ndeo_error_threshold[0x10];
6311 6311
6312 u8 mixer_offset_step_size[0x10]; 6312 u8 mixer_offset_step_size[0x10];
6313 u8 reserved_2[0x8]; 6313 u8 reserved_at_110[0x8];
6314 u8 mix90_phase_for_voltage_bath[0x8]; 6314 u8 mix90_phase_for_voltage_bath[0x8];
6315 6315
6316 u8 mixer_offset_start[0x10]; 6316 u8 mixer_offset_start[0x10];
6317 u8 mixer_offset_end[0x10]; 6317 u8 mixer_offset_end[0x10];
6318 6318
6319 u8 reserved_3[0x15]; 6319 u8 reserved_at_140[0x15];
6320 u8 ber_test_time[0xb]; 6320 u8 ber_test_time[0xb];
6321}; 6321};
6322 6322
@@ -6324,154 +6324,154 @@ struct mlx5_ifc_pspa_reg_bits {
6324 u8 swid[0x8]; 6324 u8 swid[0x8];
6325 u8 local_port[0x8]; 6325 u8 local_port[0x8];
6326 u8 sub_port[0x8]; 6326 u8 sub_port[0x8];
6327 u8 reserved_0[0x8]; 6327 u8 reserved_at_18[0x8];
6328 6328
6329 u8 reserved_1[0x20]; 6329 u8 reserved_at_20[0x20];
6330}; 6330};
6331 6331
6332struct mlx5_ifc_pqdr_reg_bits { 6332struct mlx5_ifc_pqdr_reg_bits {
6333 u8 reserved_0[0x8]; 6333 u8 reserved_at_0[0x8];
6334 u8 local_port[0x8]; 6334 u8 local_port[0x8];
6335 u8 reserved_1[0x5]; 6335 u8 reserved_at_10[0x5];
6336 u8 prio[0x3]; 6336 u8 prio[0x3];
6337 u8 reserved_2[0x6]; 6337 u8 reserved_at_18[0x6];
6338 u8 mode[0x2]; 6338 u8 mode[0x2];
6339 6339
6340 u8 reserved_3[0x20]; 6340 u8 reserved_at_20[0x20];
6341 6341
6342 u8 reserved_4[0x10]; 6342 u8 reserved_at_40[0x10];
6343 u8 min_threshold[0x10]; 6343 u8 min_threshold[0x10];
6344 6344
6345 u8 reserved_5[0x10]; 6345 u8 reserved_at_60[0x10];
6346 u8 max_threshold[0x10]; 6346 u8 max_threshold[0x10];
6347 6347
6348 u8 reserved_6[0x10]; 6348 u8 reserved_at_80[0x10];
6349 u8 mark_probability_denominator[0x10]; 6349 u8 mark_probability_denominator[0x10];
6350 6350
6351 u8 reserved_7[0x60]; 6351 u8 reserved_at_a0[0x60];
6352}; 6352};
6353 6353
6354struct mlx5_ifc_ppsc_reg_bits { 6354struct mlx5_ifc_ppsc_reg_bits {
6355 u8 reserved_0[0x8]; 6355 u8 reserved_at_0[0x8];
6356 u8 local_port[0x8]; 6356 u8 local_port[0x8];
6357 u8 reserved_1[0x10]; 6357 u8 reserved_at_10[0x10];
6358 6358
6359 u8 reserved_2[0x60]; 6359 u8 reserved_at_20[0x60];
6360 6360
6361 u8 reserved_3[0x1c]; 6361 u8 reserved_at_80[0x1c];
6362 u8 wrps_admin[0x4]; 6362 u8 wrps_admin[0x4];
6363 6363
6364 u8 reserved_4[0x1c]; 6364 u8 reserved_at_a0[0x1c];
6365 u8 wrps_status[0x4]; 6365 u8 wrps_status[0x4];
6366 6366
6367 u8 reserved_5[0x8]; 6367 u8 reserved_at_c0[0x8];
6368 u8 up_threshold[0x8]; 6368 u8 up_threshold[0x8];
6369 u8 reserved_6[0x8]; 6369 u8 reserved_at_d0[0x8];
6370 u8 down_threshold[0x8]; 6370 u8 down_threshold[0x8];
6371 6371
6372 u8 reserved_7[0x20]; 6372 u8 reserved_at_e0[0x20];
6373 6373
6374 u8 reserved_8[0x1c]; 6374 u8 reserved_at_100[0x1c];
6375 u8 srps_admin[0x4]; 6375 u8 srps_admin[0x4];
6376 6376
6377 u8 reserved_9[0x1c]; 6377 u8 reserved_at_120[0x1c];
6378 u8 srps_status[0x4]; 6378 u8 srps_status[0x4];
6379 6379
6380 u8 reserved_10[0x40]; 6380 u8 reserved_at_140[0x40];
6381}; 6381};
6382 6382
6383struct mlx5_ifc_pplr_reg_bits { 6383struct mlx5_ifc_pplr_reg_bits {
6384 u8 reserved_0[0x8]; 6384 u8 reserved_at_0[0x8];
6385 u8 local_port[0x8]; 6385 u8 local_port[0x8];
6386 u8 reserved_1[0x10]; 6386 u8 reserved_at_10[0x10];
6387 6387
6388 u8 reserved_2[0x8]; 6388 u8 reserved_at_20[0x8];
6389 u8 lb_cap[0x8]; 6389 u8 lb_cap[0x8];
6390 u8 reserved_3[0x8]; 6390 u8 reserved_at_30[0x8];
6391 u8 lb_en[0x8]; 6391 u8 lb_en[0x8];
6392}; 6392};
6393 6393
6394struct mlx5_ifc_pplm_reg_bits { 6394struct mlx5_ifc_pplm_reg_bits {
6395 u8 reserved_0[0x8]; 6395 u8 reserved_at_0[0x8];
6396 u8 local_port[0x8]; 6396 u8 local_port[0x8];
6397 u8 reserved_1[0x10]; 6397 u8 reserved_at_10[0x10];
6398 6398
6399 u8 reserved_2[0x20]; 6399 u8 reserved_at_20[0x20];
6400 6400
6401 u8 port_profile_mode[0x8]; 6401 u8 port_profile_mode[0x8];
6402 u8 static_port_profile[0x8]; 6402 u8 static_port_profile[0x8];
6403 u8 active_port_profile[0x8]; 6403 u8 active_port_profile[0x8];
6404 u8 reserved_3[0x8]; 6404 u8 reserved_at_58[0x8];
6405 6405
6406 u8 retransmission_active[0x8]; 6406 u8 retransmission_active[0x8];
6407 u8 fec_mode_active[0x18]; 6407 u8 fec_mode_active[0x18];
6408 6408
6409 u8 reserved_4[0x20]; 6409 u8 reserved_at_80[0x20];
6410}; 6410};
6411 6411
6412struct mlx5_ifc_ppcnt_reg_bits { 6412struct mlx5_ifc_ppcnt_reg_bits {
6413 u8 swid[0x8]; 6413 u8 swid[0x8];
6414 u8 local_port[0x8]; 6414 u8 local_port[0x8];
6415 u8 pnat[0x2]; 6415 u8 pnat[0x2];
6416 u8 reserved_0[0x8]; 6416 u8 reserved_at_12[0x8];
6417 u8 grp[0x6]; 6417 u8 grp[0x6];
6418 6418
6419 u8 clr[0x1]; 6419 u8 clr[0x1];
6420 u8 reserved_1[0x1c]; 6420 u8 reserved_at_21[0x1c];
6421 u8 prio_tc[0x3]; 6421 u8 prio_tc[0x3];
6422 6422
6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 6423 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
6424}; 6424};
6425 6425
6426struct mlx5_ifc_ppad_reg_bits { 6426struct mlx5_ifc_ppad_reg_bits {
6427 u8 reserved_0[0x3]; 6427 u8 reserved_at_0[0x3];
6428 u8 single_mac[0x1]; 6428 u8 single_mac[0x1];
6429 u8 reserved_1[0x4]; 6429 u8 reserved_at_4[0x4];
6430 u8 local_port[0x8]; 6430 u8 local_port[0x8];
6431 u8 mac_47_32[0x10]; 6431 u8 mac_47_32[0x10];
6432 6432
6433 u8 mac_31_0[0x20]; 6433 u8 mac_31_0[0x20];
6434 6434
6435 u8 reserved_2[0x40]; 6435 u8 reserved_at_40[0x40];
6436}; 6436};
6437 6437
6438struct mlx5_ifc_pmtu_reg_bits { 6438struct mlx5_ifc_pmtu_reg_bits {
6439 u8 reserved_0[0x8]; 6439 u8 reserved_at_0[0x8];
6440 u8 local_port[0x8]; 6440 u8 local_port[0x8];
6441 u8 reserved_1[0x10]; 6441 u8 reserved_at_10[0x10];
6442 6442
6443 u8 max_mtu[0x10]; 6443 u8 max_mtu[0x10];
6444 u8 reserved_2[0x10]; 6444 u8 reserved_at_30[0x10];
6445 6445
6446 u8 admin_mtu[0x10]; 6446 u8 admin_mtu[0x10];
6447 u8 reserved_3[0x10]; 6447 u8 reserved_at_50[0x10];
6448 6448
6449 u8 oper_mtu[0x10]; 6449 u8 oper_mtu[0x10];
6450 u8 reserved_4[0x10]; 6450 u8 reserved_at_70[0x10];
6451}; 6451};
6452 6452
6453struct mlx5_ifc_pmpr_reg_bits { 6453struct mlx5_ifc_pmpr_reg_bits {
6454 u8 reserved_0[0x8]; 6454 u8 reserved_at_0[0x8];
6455 u8 module[0x8]; 6455 u8 module[0x8];
6456 u8 reserved_1[0x10]; 6456 u8 reserved_at_10[0x10];
6457 6457
6458 u8 reserved_2[0x18]; 6458 u8 reserved_at_20[0x18];
6459 u8 attenuation_5g[0x8]; 6459 u8 attenuation_5g[0x8];
6460 6460
6461 u8 reserved_3[0x18]; 6461 u8 reserved_at_40[0x18];
6462 u8 attenuation_7g[0x8]; 6462 u8 attenuation_7g[0x8];
6463 6463
6464 u8 reserved_4[0x18]; 6464 u8 reserved_at_60[0x18];
6465 u8 attenuation_12g[0x8]; 6465 u8 attenuation_12g[0x8];
6466}; 6466};
6467 6467
6468struct mlx5_ifc_pmpe_reg_bits { 6468struct mlx5_ifc_pmpe_reg_bits {
6469 u8 reserved_0[0x8]; 6469 u8 reserved_at_0[0x8];
6470 u8 module[0x8]; 6470 u8 module[0x8];
6471 u8 reserved_1[0xc]; 6471 u8 reserved_at_10[0xc];
6472 u8 module_status[0x4]; 6472 u8 module_status[0x4];
6473 6473
6474 u8 reserved_2[0x60]; 6474 u8 reserved_at_20[0x60];
6475}; 6475};
6476 6476
6477struct mlx5_ifc_pmpc_reg_bits { 6477struct mlx5_ifc_pmpc_reg_bits {
@@ -6479,20 +6479,20 @@ struct mlx5_ifc_pmpc_reg_bits {
6479}; 6479};
6480 6480
6481struct mlx5_ifc_pmlpn_reg_bits { 6481struct mlx5_ifc_pmlpn_reg_bits {
6482 u8 reserved_0[0x4]; 6482 u8 reserved_at_0[0x4];
6483 u8 mlpn_status[0x4]; 6483 u8 mlpn_status[0x4];
6484 u8 local_port[0x8]; 6484 u8 local_port[0x8];
6485 u8 reserved_1[0x10]; 6485 u8 reserved_at_10[0x10];
6486 6486
6487 u8 e[0x1]; 6487 u8 e[0x1];
6488 u8 reserved_2[0x1f]; 6488 u8 reserved_at_21[0x1f];
6489}; 6489};
6490 6490
6491struct mlx5_ifc_pmlp_reg_bits { 6491struct mlx5_ifc_pmlp_reg_bits {
6492 u8 rxtx[0x1]; 6492 u8 rxtx[0x1];
6493 u8 reserved_0[0x7]; 6493 u8 reserved_at_1[0x7];
6494 u8 local_port[0x8]; 6494 u8 local_port[0x8];
6495 u8 reserved_1[0x8]; 6495 u8 reserved_at_10[0x8];
6496 u8 width[0x8]; 6496 u8 width[0x8];
6497 6497
6498 u8 lane0_module_mapping[0x20]; 6498 u8 lane0_module_mapping[0x20];
@@ -6503,36 +6503,36 @@ struct mlx5_ifc_pmlp_reg_bits {
6503 6503
6504 u8 lane3_module_mapping[0x20]; 6504 u8 lane3_module_mapping[0x20];
6505 6505
6506 u8 reserved_2[0x160]; 6506 u8 reserved_at_a0[0x160];
6507}; 6507};
6508 6508
6509struct mlx5_ifc_pmaos_reg_bits { 6509struct mlx5_ifc_pmaos_reg_bits {
6510 u8 reserved_0[0x8]; 6510 u8 reserved_at_0[0x8];
6511 u8 module[0x8]; 6511 u8 module[0x8];
6512 u8 reserved_1[0x4]; 6512 u8 reserved_at_10[0x4];
6513 u8 admin_status[0x4]; 6513 u8 admin_status[0x4];
6514 u8 reserved_2[0x4]; 6514 u8 reserved_at_18[0x4];
6515 u8 oper_status[0x4]; 6515 u8 oper_status[0x4];
6516 6516
6517 u8 ase[0x1]; 6517 u8 ase[0x1];
6518 u8 ee[0x1]; 6518 u8 ee[0x1];
6519 u8 reserved_3[0x1c]; 6519 u8 reserved_at_22[0x1c];
6520 u8 e[0x2]; 6520 u8 e[0x2];
6521 6521
6522 u8 reserved_4[0x40]; 6522 u8 reserved_at_40[0x40];
6523}; 6523};
6524 6524
6525struct mlx5_ifc_plpc_reg_bits { 6525struct mlx5_ifc_plpc_reg_bits {
6526 u8 reserved_0[0x4]; 6526 u8 reserved_at_0[0x4];
6527 u8 profile_id[0xc]; 6527 u8 profile_id[0xc];
6528 u8 reserved_1[0x4]; 6528 u8 reserved_at_10[0x4];
6529 u8 proto_mask[0x4]; 6529 u8 proto_mask[0x4];
6530 u8 reserved_2[0x8]; 6530 u8 reserved_at_18[0x8];
6531 6531
6532 u8 reserved_3[0x10]; 6532 u8 reserved_at_20[0x10];
6533 u8 lane_speed[0x10]; 6533 u8 lane_speed[0x10];
6534 6534
6535 u8 reserved_4[0x17]; 6535 u8 reserved_at_40[0x17];
6536 u8 lpbf[0x1]; 6536 u8 lpbf[0x1];
6537 u8 fec_mode_policy[0x8]; 6537 u8 fec_mode_policy[0x8];
6538 6538
@@ -6545,44 +6545,44 @@ struct mlx5_ifc_plpc_reg_bits {
6545 u8 retransmission_request_admin[0x8]; 6545 u8 retransmission_request_admin[0x8];
6546 u8 fec_mode_request_admin[0x18]; 6546 u8 fec_mode_request_admin[0x18];
6547 6547
6548 u8 reserved_5[0x80]; 6548 u8 reserved_at_c0[0x80];
6549}; 6549};
6550 6550
6551struct mlx5_ifc_plib_reg_bits { 6551struct mlx5_ifc_plib_reg_bits {
6552 u8 reserved_0[0x8]; 6552 u8 reserved_at_0[0x8];
6553 u8 local_port[0x8]; 6553 u8 local_port[0x8];
6554 u8 reserved_1[0x8]; 6554 u8 reserved_at_10[0x8];
6555 u8 ib_port[0x8]; 6555 u8 ib_port[0x8];
6556 6556
6557 u8 reserved_2[0x60]; 6557 u8 reserved_at_20[0x60];
6558}; 6558};
6559 6559
6560struct mlx5_ifc_plbf_reg_bits { 6560struct mlx5_ifc_plbf_reg_bits {
6561 u8 reserved_0[0x8]; 6561 u8 reserved_at_0[0x8];
6562 u8 local_port[0x8]; 6562 u8 local_port[0x8];
6563 u8 reserved_1[0xd]; 6563 u8 reserved_at_10[0xd];
6564 u8 lbf_mode[0x3]; 6564 u8 lbf_mode[0x3];
6565 6565
6566 u8 reserved_2[0x20]; 6566 u8 reserved_at_20[0x20];
6567}; 6567};
6568 6568
6569struct mlx5_ifc_pipg_reg_bits { 6569struct mlx5_ifc_pipg_reg_bits {
6570 u8 reserved_0[0x8]; 6570 u8 reserved_at_0[0x8];
6571 u8 local_port[0x8]; 6571 u8 local_port[0x8];
6572 u8 reserved_1[0x10]; 6572 u8 reserved_at_10[0x10];
6573 6573
6574 u8 dic[0x1]; 6574 u8 dic[0x1];
6575 u8 reserved_2[0x19]; 6575 u8 reserved_at_21[0x19];
6576 u8 ipg[0x4]; 6576 u8 ipg[0x4];
6577 u8 reserved_3[0x2]; 6577 u8 reserved_at_3e[0x2];
6578}; 6578};
6579 6579
6580struct mlx5_ifc_pifr_reg_bits { 6580struct mlx5_ifc_pifr_reg_bits {
6581 u8 reserved_0[0x8]; 6581 u8 reserved_at_0[0x8];
6582 u8 local_port[0x8]; 6582 u8 local_port[0x8];
6583 u8 reserved_1[0x10]; 6583 u8 reserved_at_10[0x10];
6584 6584
6585 u8 reserved_2[0xe0]; 6585 u8 reserved_at_20[0xe0];
6586 6586
6587 u8 port_filter[8][0x20]; 6587 u8 port_filter[8][0x20];
6588 6588
@@ -6590,36 +6590,36 @@ struct mlx5_ifc_pifr_reg_bits {
6590}; 6590};
6591 6591
6592struct mlx5_ifc_pfcc_reg_bits { 6592struct mlx5_ifc_pfcc_reg_bits {
6593 u8 reserved_0[0x8]; 6593 u8 reserved_at_0[0x8];
6594 u8 local_port[0x8]; 6594 u8 local_port[0x8];
6595 u8 reserved_1[0x10]; 6595 u8 reserved_at_10[0x10];
6596 6596
6597 u8 ppan[0x4]; 6597 u8 ppan[0x4];
6598 u8 reserved_2[0x4]; 6598 u8 reserved_at_24[0x4];
6599 u8 prio_mask_tx[0x8]; 6599 u8 prio_mask_tx[0x8];
6600 u8 reserved_3[0x8]; 6600 u8 reserved_at_30[0x8];
6601 u8 prio_mask_rx[0x8]; 6601 u8 prio_mask_rx[0x8];
6602 6602
6603 u8 pptx[0x1]; 6603 u8 pptx[0x1];
6604 u8 aptx[0x1]; 6604 u8 aptx[0x1];
6605 u8 reserved_4[0x6]; 6605 u8 reserved_at_42[0x6];
6606 u8 pfctx[0x8]; 6606 u8 pfctx[0x8];
6607 u8 reserved_5[0x10]; 6607 u8 reserved_at_50[0x10];
6608 6608
6609 u8 pprx[0x1]; 6609 u8 pprx[0x1];
6610 u8 aprx[0x1]; 6610 u8 aprx[0x1];
6611 u8 reserved_6[0x6]; 6611 u8 reserved_at_62[0x6];
6612 u8 pfcrx[0x8]; 6612 u8 pfcrx[0x8];
6613 u8 reserved_7[0x10]; 6613 u8 reserved_at_70[0x10];
6614 6614
6615 u8 reserved_8[0x80]; 6615 u8 reserved_at_80[0x80];
6616}; 6616};
6617 6617
6618struct mlx5_ifc_pelc_reg_bits { 6618struct mlx5_ifc_pelc_reg_bits {
6619 u8 op[0x4]; 6619 u8 op[0x4];
6620 u8 reserved_0[0x4]; 6620 u8 reserved_at_4[0x4];
6621 u8 local_port[0x8]; 6621 u8 local_port[0x8];
6622 u8 reserved_1[0x10]; 6622 u8 reserved_at_10[0x10];
6623 6623
6624 u8 op_admin[0x8]; 6624 u8 op_admin[0x8];
6625 u8 op_capability[0x8]; 6625 u8 op_capability[0x8];
@@ -6634,28 +6634,28 @@ struct mlx5_ifc_pelc_reg_bits {
6634 6634
6635 u8 active[0x40]; 6635 u8 active[0x40];
6636 6636
6637 u8 reserved_2[0x80]; 6637 u8 reserved_at_140[0x80];
6638}; 6638};
6639 6639
6640struct mlx5_ifc_peir_reg_bits { 6640struct mlx5_ifc_peir_reg_bits {
6641 u8 reserved_0[0x8]; 6641 u8 reserved_at_0[0x8];
6642 u8 local_port[0x8]; 6642 u8 local_port[0x8];
6643 u8 reserved_1[0x10]; 6643 u8 reserved_at_10[0x10];
6644 6644
6645 u8 reserved_2[0xc]; 6645 u8 reserved_at_20[0xc];
6646 u8 error_count[0x4]; 6646 u8 error_count[0x4];
6647 u8 reserved_3[0x10]; 6647 u8 reserved_at_30[0x10];
6648 6648
6649 u8 reserved_4[0xc]; 6649 u8 reserved_at_40[0xc];
6650 u8 lane[0x4]; 6650 u8 lane[0x4];
6651 u8 reserved_5[0x8]; 6651 u8 reserved_at_50[0x8];
6652 u8 error_type[0x8]; 6652 u8 error_type[0x8];
6653}; 6653};
6654 6654
6655struct mlx5_ifc_pcap_reg_bits { 6655struct mlx5_ifc_pcap_reg_bits {
6656 u8 reserved_0[0x8]; 6656 u8 reserved_at_0[0x8];
6657 u8 local_port[0x8]; 6657 u8 local_port[0x8];
6658 u8 reserved_1[0x10]; 6658 u8 reserved_at_10[0x10];
6659 6659
6660 u8 port_capability_mask[4][0x20]; 6660 u8 port_capability_mask[4][0x20];
6661}; 6661};
@@ -6663,46 +6663,46 @@ struct mlx5_ifc_pcap_reg_bits {
6663struct mlx5_ifc_paos_reg_bits { 6663struct mlx5_ifc_paos_reg_bits {
6664 u8 swid[0x8]; 6664 u8 swid[0x8];
6665 u8 local_port[0x8]; 6665 u8 local_port[0x8];
6666 u8 reserved_0[0x4]; 6666 u8 reserved_at_10[0x4];
6667 u8 admin_status[0x4]; 6667 u8 admin_status[0x4];
6668 u8 reserved_1[0x4]; 6668 u8 reserved_at_18[0x4];
6669 u8 oper_status[0x4]; 6669 u8 oper_status[0x4];
6670 6670
6671 u8 ase[0x1]; 6671 u8 ase[0x1];
6672 u8 ee[0x1]; 6672 u8 ee[0x1];
6673 u8 reserved_2[0x1c]; 6673 u8 reserved_at_22[0x1c];
6674 u8 e[0x2]; 6674 u8 e[0x2];
6675 6675
6676 u8 reserved_3[0x40]; 6676 u8 reserved_at_40[0x40];
6677}; 6677};
6678 6678
6679struct mlx5_ifc_pamp_reg_bits { 6679struct mlx5_ifc_pamp_reg_bits {
6680 u8 reserved_0[0x8]; 6680 u8 reserved_at_0[0x8];
6681 u8 opamp_group[0x8]; 6681 u8 opamp_group[0x8];
6682 u8 reserved_1[0xc]; 6682 u8 reserved_at_10[0xc];
6683 u8 opamp_group_type[0x4]; 6683 u8 opamp_group_type[0x4];
6684 6684
6685 u8 start_index[0x10]; 6685 u8 start_index[0x10];
6686 u8 reserved_2[0x4]; 6686 u8 reserved_at_30[0x4];
6687 u8 num_of_indices[0xc]; 6687 u8 num_of_indices[0xc];
6688 6688
6689 u8 index_data[18][0x10]; 6689 u8 index_data[18][0x10];
6690}; 6690};
6691 6691
6692struct mlx5_ifc_lane_2_module_mapping_bits { 6692struct mlx5_ifc_lane_2_module_mapping_bits {
6693 u8 reserved_0[0x6]; 6693 u8 reserved_at_0[0x6];
6694 u8 rx_lane[0x2]; 6694 u8 rx_lane[0x2];
6695 u8 reserved_1[0x6]; 6695 u8 reserved_at_8[0x6];
6696 u8 tx_lane[0x2]; 6696 u8 tx_lane[0x2];
6697 u8 reserved_2[0x8]; 6697 u8 reserved_at_10[0x8];
6698 u8 module[0x8]; 6698 u8 module[0x8];
6699}; 6699};
6700 6700
6701struct mlx5_ifc_bufferx_reg_bits { 6701struct mlx5_ifc_bufferx_reg_bits {
6702 u8 reserved_0[0x6]; 6702 u8 reserved_at_0[0x6];
6703 u8 lossy[0x1]; 6703 u8 lossy[0x1];
6704 u8 epsb[0x1]; 6704 u8 epsb[0x1];
6705 u8 reserved_1[0xc]; 6705 u8 reserved_at_8[0xc];
6706 u8 size[0xc]; 6706 u8 size[0xc];
6707 6707
6708 u8 xoff_threshold[0x10]; 6708 u8 xoff_threshold[0x10];
@@ -6714,21 +6714,21 @@ struct mlx5_ifc_set_node_in_bits {
6714}; 6714};
6715 6715
6716struct mlx5_ifc_register_power_settings_bits { 6716struct mlx5_ifc_register_power_settings_bits {
6717 u8 reserved_0[0x18]; 6717 u8 reserved_at_0[0x18];
6718 u8 power_settings_level[0x8]; 6718 u8 power_settings_level[0x8];
6719 6719
6720 u8 reserved_1[0x60]; 6720 u8 reserved_at_20[0x60];
6721}; 6721};
6722 6722
6723struct mlx5_ifc_register_host_endianness_bits { 6723struct mlx5_ifc_register_host_endianness_bits {
6724 u8 he[0x1]; 6724 u8 he[0x1];
6725 u8 reserved_0[0x1f]; 6725 u8 reserved_at_1[0x1f];
6726 6726
6727 u8 reserved_1[0x60]; 6727 u8 reserved_at_20[0x60];
6728}; 6728};
6729 6729
6730struct mlx5_ifc_umr_pointer_desc_argument_bits { 6730struct mlx5_ifc_umr_pointer_desc_argument_bits {
6731 u8 reserved_0[0x20]; 6731 u8 reserved_at_0[0x20];
6732 6732
6733 u8 mkey[0x20]; 6733 u8 mkey[0x20];
6734 6734
@@ -6741,7 +6741,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6741 u8 dc_key[0x40]; 6741 u8 dc_key[0x40];
6742 6742
6743 u8 ext[0x1]; 6743 u8 ext[0x1];
6744 u8 reserved_0[0x7]; 6744 u8 reserved_at_41[0x7];
6745 u8 destination_qp_dct[0x18]; 6745 u8 destination_qp_dct[0x18];
6746 6746
6747 u8 static_rate[0x4]; 6747 u8 static_rate[0x4];
@@ -6750,7 +6750,7 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6750 u8 mlid[0x7]; 6750 u8 mlid[0x7];
6751 u8 rlid_udp_sport[0x10]; 6751 u8 rlid_udp_sport[0x10];
6752 6752
6753 u8 reserved_1[0x20]; 6753 u8 reserved_at_80[0x20];
6754 6754
6755 u8 rmac_47_16[0x20]; 6755 u8 rmac_47_16[0x20];
6756 6756
@@ -6758,9 +6758,9 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6758 u8 tclass[0x8]; 6758 u8 tclass[0x8];
6759 u8 hop_limit[0x8]; 6759 u8 hop_limit[0x8];
6760 6760
6761 u8 reserved_2[0x1]; 6761 u8 reserved_at_e0[0x1];
6762 u8 grh[0x1]; 6762 u8 grh[0x1];
6763 u8 reserved_3[0x2]; 6763 u8 reserved_at_e2[0x2];
6764 u8 src_addr_index[0x8]; 6764 u8 src_addr_index[0x8];
6765 u8 flow_label[0x14]; 6765 u8 flow_label[0x14];
6766 6766
@@ -6768,27 +6768,27 @@ struct mlx5_ifc_ud_adrs_vector_bits {
6768}; 6768};
6769 6769
6770struct mlx5_ifc_pages_req_event_bits { 6770struct mlx5_ifc_pages_req_event_bits {
6771 u8 reserved_0[0x10]; 6771 u8 reserved_at_0[0x10];
6772 u8 function_id[0x10]; 6772 u8 function_id[0x10];
6773 6773
6774 u8 num_pages[0x20]; 6774 u8 num_pages[0x20];
6775 6775
6776 u8 reserved_1[0xa0]; 6776 u8 reserved_at_40[0xa0];
6777}; 6777};
6778 6778
6779struct mlx5_ifc_eqe_bits { 6779struct mlx5_ifc_eqe_bits {
6780 u8 reserved_0[0x8]; 6780 u8 reserved_at_0[0x8];
6781 u8 event_type[0x8]; 6781 u8 event_type[0x8];
6782 u8 reserved_1[0x8]; 6782 u8 reserved_at_10[0x8];
6783 u8 event_sub_type[0x8]; 6783 u8 event_sub_type[0x8];
6784 6784
6785 u8 reserved_2[0xe0]; 6785 u8 reserved_at_20[0xe0];
6786 6786
6787 union mlx5_ifc_event_auto_bits event_data; 6787 union mlx5_ifc_event_auto_bits event_data;
6788 6788
6789 u8 reserved_3[0x10]; 6789 u8 reserved_at_1e0[0x10];
6790 u8 signature[0x8]; 6790 u8 signature[0x8];
6791 u8 reserved_4[0x7]; 6791 u8 reserved_at_1f8[0x7];
6792 u8 owner[0x1]; 6792 u8 owner[0x1];
6793}; 6793};
6794 6794
@@ -6798,14 +6798,14 @@ enum {
6798 6798
6799struct mlx5_ifc_cmd_queue_entry_bits { 6799struct mlx5_ifc_cmd_queue_entry_bits {
6800 u8 type[0x8]; 6800 u8 type[0x8];
6801 u8 reserved_0[0x18]; 6801 u8 reserved_at_8[0x18];
6802 6802
6803 u8 input_length[0x20]; 6803 u8 input_length[0x20];
6804 6804
6805 u8 input_mailbox_pointer_63_32[0x20]; 6805 u8 input_mailbox_pointer_63_32[0x20];
6806 6806
6807 u8 input_mailbox_pointer_31_9[0x17]; 6807 u8 input_mailbox_pointer_31_9[0x17];
6808 u8 reserved_1[0x9]; 6808 u8 reserved_at_77[0x9];
6809 6809
6810 u8 command_input_inline_data[16][0x8]; 6810 u8 command_input_inline_data[16][0x8];
6811 6811
@@ -6814,20 +6814,20 @@ struct mlx5_ifc_cmd_queue_entry_bits {
6814 u8 output_mailbox_pointer_63_32[0x20]; 6814 u8 output_mailbox_pointer_63_32[0x20];
6815 6815
6816 u8 output_mailbox_pointer_31_9[0x17]; 6816 u8 output_mailbox_pointer_31_9[0x17];
6817 u8 reserved_2[0x9]; 6817 u8 reserved_at_1b7[0x9];
6818 6818
6819 u8 output_length[0x20]; 6819 u8 output_length[0x20];
6820 6820
6821 u8 token[0x8]; 6821 u8 token[0x8];
6822 u8 signature[0x8]; 6822 u8 signature[0x8];
6823 u8 reserved_3[0x8]; 6823 u8 reserved_at_1f0[0x8];
6824 u8 status[0x7]; 6824 u8 status[0x7];
6825 u8 ownership[0x1]; 6825 u8 ownership[0x1];
6826}; 6826};
6827 6827
6828struct mlx5_ifc_cmd_out_bits { 6828struct mlx5_ifc_cmd_out_bits {
6829 u8 status[0x8]; 6829 u8 status[0x8];
6830 u8 reserved_0[0x18]; 6830 u8 reserved_at_8[0x18];
6831 6831
6832 u8 syndrome[0x20]; 6832 u8 syndrome[0x20];
6833 6833
@@ -6836,9 +6836,9 @@ struct mlx5_ifc_cmd_out_bits {
6836 6836
6837struct mlx5_ifc_cmd_in_bits { 6837struct mlx5_ifc_cmd_in_bits {
6838 u8 opcode[0x10]; 6838 u8 opcode[0x10];
6839 u8 reserved_0[0x10]; 6839 u8 reserved_at_10[0x10];
6840 6840
6841 u8 reserved_1[0x10]; 6841 u8 reserved_at_20[0x10];
6842 u8 op_mod[0x10]; 6842 u8 op_mod[0x10];
6843 6843
6844 u8 command[0][0x20]; 6844 u8 command[0][0x20];
@@ -6847,16 +6847,16 @@ struct mlx5_ifc_cmd_in_bits {
6847struct mlx5_ifc_cmd_if_box_bits { 6847struct mlx5_ifc_cmd_if_box_bits {
6848 u8 mailbox_data[512][0x8]; 6848 u8 mailbox_data[512][0x8];
6849 6849
6850 u8 reserved_0[0x180]; 6850 u8 reserved_at_1000[0x180];
6851 6851
6852 u8 next_pointer_63_32[0x20]; 6852 u8 next_pointer_63_32[0x20];
6853 6853
6854 u8 next_pointer_31_10[0x16]; 6854 u8 next_pointer_31_10[0x16];
6855 u8 reserved_1[0xa]; 6855 u8 reserved_at_11b6[0xa];
6856 6856
6857 u8 block_number[0x20]; 6857 u8 block_number[0x20];
6858 6858
6859 u8 reserved_2[0x8]; 6859 u8 reserved_at_11e0[0x8];
6860 u8 token[0x8]; 6860 u8 token[0x8];
6861 u8 ctrl_signature[0x8]; 6861 u8 ctrl_signature[0x8];
6862 u8 signature[0x8]; 6862 u8 signature[0x8];
@@ -6866,7 +6866,7 @@ struct mlx5_ifc_mtt_bits {
6866 u8 ptag_63_32[0x20]; 6866 u8 ptag_63_32[0x20];
6867 6867
6868 u8 ptag_31_8[0x18]; 6868 u8 ptag_31_8[0x18];
6869 u8 reserved_0[0x6]; 6869 u8 reserved_at_38[0x6];
6870 u8 wr_en[0x1]; 6870 u8 wr_en[0x1];
6871 u8 rd_en[0x1]; 6871 u8 rd_en[0x1];
6872}; 6872};
@@ -6904,38 +6904,38 @@ struct mlx5_ifc_initial_seg_bits {
6904 u8 cmd_interface_rev[0x10]; 6904 u8 cmd_interface_rev[0x10];
6905 u8 fw_rev_subminor[0x10]; 6905 u8 fw_rev_subminor[0x10];
6906 6906
6907 u8 reserved_0[0x40]; 6907 u8 reserved_at_40[0x40];
6908 6908
6909 u8 cmdq_phy_addr_63_32[0x20]; 6909 u8 cmdq_phy_addr_63_32[0x20];
6910 6910
6911 u8 cmdq_phy_addr_31_12[0x14]; 6911 u8 cmdq_phy_addr_31_12[0x14];
6912 u8 reserved_1[0x2]; 6912 u8 reserved_at_b4[0x2];
6913 u8 nic_interface[0x2]; 6913 u8 nic_interface[0x2];
6914 u8 log_cmdq_size[0x4]; 6914 u8 log_cmdq_size[0x4];
6915 u8 log_cmdq_stride[0x4]; 6915 u8 log_cmdq_stride[0x4];
6916 6916
6917 u8 command_doorbell_vector[0x20]; 6917 u8 command_doorbell_vector[0x20];
6918 6918
6919 u8 reserved_2[0xf00]; 6919 u8 reserved_at_e0[0xf00];
6920 6920
6921 u8 initializing[0x1]; 6921 u8 initializing[0x1];
6922 u8 reserved_3[0x4]; 6922 u8 reserved_at_fe1[0x4];
6923 u8 nic_interface_supported[0x3]; 6923 u8 nic_interface_supported[0x3];
6924 u8 reserved_4[0x18]; 6924 u8 reserved_at_fe8[0x18];
6925 6925
6926 struct mlx5_ifc_health_buffer_bits health_buffer; 6926 struct mlx5_ifc_health_buffer_bits health_buffer;
6927 6927
6928 u8 no_dram_nic_offset[0x20]; 6928 u8 no_dram_nic_offset[0x20];
6929 6929
6930 u8 reserved_5[0x6e40]; 6930 u8 reserved_at_1220[0x6e40];
6931 6931
6932 u8 reserved_6[0x1f]; 6932 u8 reserved_at_8060[0x1f];
6933 u8 clear_int[0x1]; 6933 u8 clear_int[0x1];
6934 6934
6935 u8 health_syndrome[0x8]; 6935 u8 health_syndrome[0x8];
6936 u8 health_counter[0x18]; 6936 u8 health_counter[0x18];
6937 6937
6938 u8 reserved_7[0x17fc0]; 6938 u8 reserved_at_80a0[0x17fc0];
6939}; 6939};
6940 6940
6941union mlx5_ifc_ports_control_registers_document_bits { 6941union mlx5_ifc_ports_control_registers_document_bits {
@@ -6980,44 +6980,44 @@ union mlx5_ifc_ports_control_registers_document_bits {
6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg; 6980 struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
6981 struct mlx5_ifc_slrg_reg_bits slrg_reg; 6981 struct mlx5_ifc_slrg_reg_bits slrg_reg;
6982 struct mlx5_ifc_sltp_reg_bits sltp_reg; 6982 struct mlx5_ifc_sltp_reg_bits sltp_reg;
6983 u8 reserved_0[0x60e0]; 6983 u8 reserved_at_0[0x60e0];
6984}; 6984};
6985 6985
6986union mlx5_ifc_debug_enhancements_document_bits { 6986union mlx5_ifc_debug_enhancements_document_bits {
6987 struct mlx5_ifc_health_buffer_bits health_buffer; 6987 struct mlx5_ifc_health_buffer_bits health_buffer;
6988 u8 reserved_0[0x200]; 6988 u8 reserved_at_0[0x200];
6989}; 6989};
6990 6990
6991union mlx5_ifc_uplink_pci_interface_document_bits { 6991union mlx5_ifc_uplink_pci_interface_document_bits {
6992 struct mlx5_ifc_initial_seg_bits initial_seg; 6992 struct mlx5_ifc_initial_seg_bits initial_seg;
6993 u8 reserved_0[0x20060]; 6993 u8 reserved_at_0[0x20060];
6994}; 6994};
6995 6995
6996struct mlx5_ifc_set_flow_table_root_out_bits { 6996struct mlx5_ifc_set_flow_table_root_out_bits {
6997 u8 status[0x8]; 6997 u8 status[0x8];
6998 u8 reserved_0[0x18]; 6998 u8 reserved_at_8[0x18];
6999 6999
7000 u8 syndrome[0x20]; 7000 u8 syndrome[0x20];
7001 7001
7002 u8 reserved_1[0x40]; 7002 u8 reserved_at_40[0x40];
7003}; 7003};
7004 7004
7005struct mlx5_ifc_set_flow_table_root_in_bits { 7005struct mlx5_ifc_set_flow_table_root_in_bits {
7006 u8 opcode[0x10]; 7006 u8 opcode[0x10];
7007 u8 reserved_0[0x10]; 7007 u8 reserved_at_10[0x10];
7008 7008
7009 u8 reserved_1[0x10]; 7009 u8 reserved_at_20[0x10];
7010 u8 op_mod[0x10]; 7010 u8 op_mod[0x10];
7011 7011
7012 u8 reserved_2[0x40]; 7012 u8 reserved_at_40[0x40];
7013 7013
7014 u8 table_type[0x8]; 7014 u8 table_type[0x8];
7015 u8 reserved_3[0x18]; 7015 u8 reserved_at_88[0x18];
7016 7016
7017 u8 reserved_4[0x8]; 7017 u8 reserved_at_a0[0x8];
7018 u8 table_id[0x18]; 7018 u8 table_id[0x18];
7019 7019
7020 u8 reserved_5[0x140]; 7020 u8 reserved_at_c0[0x140];
7021}; 7021};
7022 7022
7023enum { 7023enum {
@@ -7026,39 +7026,39 @@ enum {
7026 7026
7027struct mlx5_ifc_modify_flow_table_out_bits { 7027struct mlx5_ifc_modify_flow_table_out_bits {
7028 u8 status[0x8]; 7028 u8 status[0x8];
7029 u8 reserved_0[0x18]; 7029 u8 reserved_at_8[0x18];
7030 7030
7031 u8 syndrome[0x20]; 7031 u8 syndrome[0x20];
7032 7032
7033 u8 reserved_1[0x40]; 7033 u8 reserved_at_40[0x40];
7034}; 7034};
7035 7035
7036struct mlx5_ifc_modify_flow_table_in_bits { 7036struct mlx5_ifc_modify_flow_table_in_bits {
7037 u8 opcode[0x10]; 7037 u8 opcode[0x10];
7038 u8 reserved_0[0x10]; 7038 u8 reserved_at_10[0x10];
7039 7039
7040 u8 reserved_1[0x10]; 7040 u8 reserved_at_20[0x10];
7041 u8 op_mod[0x10]; 7041 u8 op_mod[0x10];
7042 7042
7043 u8 reserved_2[0x20]; 7043 u8 reserved_at_40[0x20];
7044 7044
7045 u8 reserved_3[0x10]; 7045 u8 reserved_at_60[0x10];
7046 u8 modify_field_select[0x10]; 7046 u8 modify_field_select[0x10];
7047 7047
7048 u8 table_type[0x8]; 7048 u8 table_type[0x8];
7049 u8 reserved_4[0x18]; 7049 u8 reserved_at_88[0x18];
7050 7050
7051 u8 reserved_5[0x8]; 7051 u8 reserved_at_a0[0x8];
7052 u8 table_id[0x18]; 7052 u8 table_id[0x18];
7053 7053
7054 u8 reserved_6[0x4]; 7054 u8 reserved_at_c0[0x4];
7055 u8 table_miss_mode[0x4]; 7055 u8 table_miss_mode[0x4];
7056 u8 reserved_7[0x18]; 7056 u8 reserved_at_c8[0x18];
7057 7057
7058 u8 reserved_8[0x8]; 7058 u8 reserved_at_e0[0x8];
7059 u8 table_miss_id[0x18]; 7059 u8 table_miss_id[0x18];
7060 7060
7061 u8 reserved_9[0x100]; 7061 u8 reserved_at_100[0x100];
7062}; 7062};
7063 7063
7064#endif /* MLX5_IFC_H */ 7064#endif /* MLX5_IFC_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 4560d8f1545d..2bb0c3085706 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -324,6 +324,12 @@ struct module_layout {
324#define __module_layout_align 324#define __module_layout_align
325#endif 325#endif
326 326
327struct mod_kallsyms {
328 Elf_Sym *symtab;
329 unsigned int num_symtab;
330 char *strtab;
331};
332
327struct module { 333struct module {
328 enum module_state state; 334 enum module_state state;
329 335
@@ -405,15 +411,10 @@ struct module {
405#endif 411#endif
406 412
407#ifdef CONFIG_KALLSYMS 413#ifdef CONFIG_KALLSYMS
408 /* 414 /* Protected by RCU and/or module_mutex: use rcu_dereference() */
409 * We keep the symbol and string tables for kallsyms. 415 struct mod_kallsyms *kallsyms;
410 * The core_* fields below are temporary, loader-only (they 416 struct mod_kallsyms core_kallsyms;
411 * could really be discarded after module init). 417
412 */
413 Elf_Sym *symtab, *core_symtab;
414 unsigned int num_symtab, core_num_syms;
415 char *strtab, *core_strtab;
416
417 /* Section attributes */ 418 /* Section attributes */
418 struct module_sect_attrs *sect_attrs; 419 struct module_sect_attrs *sect_attrs;
419 420
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 289c2314d766..5440b7b705eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3718,7 +3718,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
3718void *netdev_lower_get_next(struct net_device *dev, 3718void *netdev_lower_get_next(struct net_device *dev,
3719 struct list_head **iter); 3719 struct list_head **iter);
3720#define netdev_for_each_lower_dev(dev, ldev, iter) \ 3720#define netdev_for_each_lower_dev(dev, ldev, iter) \
3721 for (iter = &(dev)->adj_list.lower, \ 3721 for (iter = (dev)->adj_list.lower.next, \
3722 ldev = netdev_lower_get_next(dev, &(iter)); \ 3722 ldev = netdev_lower_get_next(dev, &(iter)); \
3723 ldev; \ 3723 ldev; \
3724 ldev = netdev_lower_get_next(dev, &(iter))) 3724 ldev = netdev_lower_get_next(dev, &(iter)))
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27df4a6585da..27716254dcc5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -988,23 +988,6 @@ static inline int pci_is_managed(struct pci_dev *pdev)
988 return pdev->is_managed; 988 return pdev->is_managed;
989} 989}
990 990
991static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
992{
993 pdev->irq = irq;
994 pdev->irq_managed = 1;
995}
996
997static inline void pci_reset_managed_irq(struct pci_dev *pdev)
998{
999 pdev->irq = 0;
1000 pdev->irq_managed = 0;
1001}
1002
1003static inline bool pci_has_managed_irq(struct pci_dev *pdev)
1004{
1005 return pdev->irq_managed && pdev->irq > 0;
1006}
1007
1008void pci_disable_device(struct pci_dev *dev); 991void pci_disable_device(struct pci_dev *dev);
1009 992
1010extern unsigned int pcibios_max_latency; 993extern unsigned int pcibios_max_latency;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b35a61a481fa..f5c5a3fa2c81 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -397,6 +397,7 @@ struct pmu {
397 * enum perf_event_active_state - the states of a event 397 * enum perf_event_active_state - the states of a event
398 */ 398 */
399enum perf_event_active_state { 399enum perf_event_active_state {
400 PERF_EVENT_STATE_DEAD = -4,
400 PERF_EVENT_STATE_EXIT = -3, 401 PERF_EVENT_STATE_EXIT = -3,
401 PERF_EVENT_STATE_ERROR = -2, 402 PERF_EVENT_STATE_ERROR = -2,
402 PERF_EVENT_STATE_OFF = -1, 403 PERF_EVENT_STATE_OFF = -1,
@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
905 } 906 }
906} 907}
907 908
908extern struct static_key_deferred perf_sched_events; 909extern struct static_key_false perf_sched_events;
909 910
910static __always_inline bool 911static __always_inline bool
911perf_sw_migrate_enabled(void) 912perf_sw_migrate_enabled(void)
@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task)
924static inline void perf_event_task_sched_in(struct task_struct *prev, 925static inline void perf_event_task_sched_in(struct task_struct *prev,
925 struct task_struct *task) 926 struct task_struct *task)
926{ 927{
927 if (static_key_false(&perf_sched_events.key)) 928 if (static_branch_unlikely(&perf_sched_events))
928 __perf_event_task_sched_in(prev, task); 929 __perf_event_task_sched_in(prev, task);
929 930
930 if (perf_sw_migrate_enabled() && task->sched_migrated) { 931 if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
941{ 942{
942 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); 943 perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
943 944
944 if (static_key_false(&perf_sched_events.key)) 945 if (static_branch_unlikely(&perf_sched_events))
945 __perf_event_task_sched_out(prev, next); 946 __perf_event_task_sched_out(prev, next);
946} 947}
947 948
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 2d8e49711b63..1132953235c0 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -10,7 +10,7 @@
10 * backing is indicated by flags in the high bits of the value. 10 * backing is indicated by flags in the high bits of the value.
11 */ 11 */
12typedef struct { 12typedef struct {
13 unsigned long val; 13 u64 val;
14} pfn_t; 14} pfn_t;
15#endif 15#endif
16 16
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 37448ab5fb5c..94994810c7c0 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -9,14 +9,13 @@
9 * PFN_DEV - pfn is not covered by system memmap by default 9 * PFN_DEV - pfn is not covered by system memmap by default
10 * PFN_MAP - pfn has a dynamic page mapping established by a device driver 10 * PFN_MAP - pfn has a dynamic page mapping established by a device driver
11 */ 11 */
12#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \ 12#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
13 << (BITS_PER_LONG - PAGE_SHIFT)) 13#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
14#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1)) 14#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
15#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2)) 15#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
16#define PFN_DEV (1UL << (BITS_PER_LONG - 3)) 16#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
17#define PFN_MAP (1UL << (BITS_PER_LONG - 4)) 17
18 18static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
19static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
20{ 19{
21 pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; 20 pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
22 21
@@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
29 return __pfn_to_pfn_t(pfn, 0); 28 return __pfn_to_pfn_t(pfn, 0);
30} 29}
31 30
32extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags); 31extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
33 32
34static inline bool pfn_t_has_page(pfn_t pfn) 33static inline bool pfn_t_has_page(pfn_t pfn)
35{ 34{
@@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
87#ifdef __HAVE_ARCH_PTE_DEVMAP 86#ifdef __HAVE_ARCH_PTE_DEVMAP
88static inline bool pfn_t_devmap(pfn_t pfn) 87static inline bool pfn_t_devmap(pfn_t pfn)
89{ 88{
90 const unsigned long flags = PFN_DEV|PFN_MAP; 89 const u64 flags = PFN_DEV|PFN_MAP;
91 90
92 return (pfn.val & flags) == flags; 91 return (pfn.val & flags) == flags;
93} 92}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 998d8f1c3c91..b50c0492629d 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,6 +49,7 @@ struct bq27xxx_reg_cache {
49 49
50struct bq27xxx_device_info { 50struct bq27xxx_device_info {
51 struct device *dev; 51 struct device *dev;
52 int id;
52 enum bq27xxx_chip chip; 53 enum bq27xxx_chip chip;
53 const char *name; 54 const char *name;
54 struct bq27xxx_access_methods bus; 55 struct bq27xxx_access_methods bus;
diff --git a/include/linux/random.h b/include/linux/random.h
index a75840c1aa71..9c29122037f9 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
34#endif 34#endif
35 35
36unsigned int get_random_int(void); 36unsigned int get_random_int(void);
37unsigned long get_random_long(void);
37unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); 38unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
38 39
39u32 prandom_u32(void); 40u32 prandom_u32(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 11f935c1a090..4ce9ff7086f4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -299,6 +299,7 @@ struct sk_buff;
299#else 299#else
300#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 300#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
301#endif 301#endif
302extern int sysctl_max_skb_frags;
302 303
303typedef struct skb_frag_struct skb_frag_t; 304typedef struct skb_frag_struct skb_frag_t;
304 305
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 343c13ac4f71..35cb9264e0d5 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -44,6 +44,7 @@
44 44
45#define KNAV_DMA_NUM_EPIB_WORDS 4 45#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16 46#define KNAV_DMA_NUM_PS_WORDS 16
47#define KNAV_DMA_NUM_SW_DATA_WORDS 4
47#define KNAV_DMA_FDQ_PER_CHAN 4 48#define KNAV_DMA_FDQ_PER_CHAN 4
48 49
49/* Tx channel scheduling priority */ 50/* Tx channel scheduling priority */
@@ -142,6 +143,7 @@ struct knav_dma_cfg {
142 * @orig_buff: buff pointer since 'buff' can be overwritten 143 * @orig_buff: buff pointer since 'buff' can be overwritten
143 * @epib: Extended packet info block 144 * @epib: Extended packet info block
144 * @psdata: Protocol specific 145 * @psdata: Protocol specific
146 * @sw_data: Software private data not touched by h/w
145 */ 147 */
146struct knav_dma_desc { 148struct knav_dma_desc {
147 __le32 desc_info; 149 __le32 desc_info;
@@ -154,7 +156,7 @@ struct knav_dma_desc {
154 __le32 orig_buff; 156 __le32 orig_buff;
155 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS]; 157 __le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 __le32 psdata[KNAV_DMA_NUM_PS_WORDS]; 158 __le32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 __le32 pad[4]; 159 u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS];
158} ____cacheline_aligned; 160} ____cacheline_aligned;
159 161
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA) 162#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index f33c5a4d6fe4..3b1ff38f0c37 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -93,6 +93,12 @@ struct rpcrdma_msg {
93 __be32 rm_pempty[3]; /* 3 empty chunk lists */ 93 __be32 rm_pempty[3]; /* 3 empty chunk lists */
94 } rm_padded; 94 } rm_padded;
95 95
96 struct {
97 __be32 rm_err;
98 __be32 rm_vers_low;
99 __be32 rm_vers_high;
100 } rm_error;
101
96 __be32 rm_chunks[0]; /* read, write and reply chunks */ 102 __be32 rm_chunks[0]; /* read, write and reply chunks */
97 103
98 } rm_body; 104 } rm_body;
@@ -102,17 +108,13 @@ struct rpcrdma_msg {
102 * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks 108 * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
103 */ 109 */
104#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7) 110#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
111#define RPCRDMA_HDRLEN_ERR (sizeof(__be32) * 5)
105 112
106enum rpcrdma_errcode { 113enum rpcrdma_errcode {
107 ERR_VERS = 1, 114 ERR_VERS = 1,
108 ERR_CHUNK = 2 115 ERR_CHUNK = 2
109}; 116};
110 117
111struct rpcrdma_err_vers {
112 uint32_t rdma_vers_low; /* Version range supported by peer */
113 uint32_t rdma_vers_high;
114};
115
116enum rpcrdma_proc { 118enum rpcrdma_proc {
117 RDMA_MSG = 0, /* An RPC call or reply msg */ 119 RDMA_MSG = 0, /* An RPC call or reply msg */
118 RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */ 120 RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index b7b279b54504..767190b01363 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -54,8 +54,6 @@
54 54
55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */ 55#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
56 56
57#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
58
59/* Memory registration strategies, by number. 57/* Memory registration strategies, by number.
60 * This is part of a kernel / user space API. Do not remove. */ 58 * This is part of a kernel / user space API. Do not remove. */
61enum rpcrdma_memreg { 59enum rpcrdma_memreg {
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acd522a91539..acfdbf353a0b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
14 * See the file COPYING for more details. 14 * See the file COPYING for more details.
15 */ 15 */
16 16
17#include <linux/smp.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/cpumask.h>
19#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
20#include <linux/tracepoint-defs.h> 22#include <linux/tracepoint-defs.h>
21 23
@@ -132,6 +134,9 @@ extern void syscall_unregfunc(void);
132 void *it_func; \ 134 void *it_func; \
133 void *__data; \ 135 void *__data; \
134 \ 136 \
137 if (!cpu_online(raw_smp_processor_id())) \
138 return; \
139 \
135 if (!(cond)) \ 140 if (!(cond)) \
136 return; \ 141 return; \
137 prercu; \ 142 prercu; \
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20afdbc01..bb679b48f408 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
11unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); 11unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
12int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); 12int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
13 13
14unsigned long ucs2_utf8size(const ucs2_char_t *src);
15unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
16 unsigned long maxlength);
17
14#endif /* _LINUX_UCS2_STRING_H_ */ 18#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0e32bc71245e..ca73c503b92a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,6 +311,7 @@ enum {
311 311
312 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ 312 __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
313 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ 313 __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
314 __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
314 315
315 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 316 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
316 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 317 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
411 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) 412 alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
412 413
413#define create_workqueue(name) \ 414#define create_workqueue(name) \
414 alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) 415 alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
415#define create_freezable_workqueue(name) \ 416#define create_freezable_workqueue(name) \
416 alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ 417 alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
417 1, (name)) 418 WQ_MEM_RECLAIM, 1, (name))
418#define create_singlethread_workqueue(name) \ 419#define create_singlethread_workqueue(name) \
419 alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) 420 alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
420 421
421extern void destroy_workqueue(struct workqueue_struct *wq); 422extern void destroy_workqueue(struct workqueue_struct *wq);
422 423
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2a91a0561a47..9b4c418bebd8 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,8 +6,8 @@
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <net/sock.h> 7#include <net/sock.h>
8 8
9void unix_inflight(struct file *fp); 9void unix_inflight(struct user_struct *user, struct file *fp);
10void unix_notinflight(struct file *fp); 10void unix_notinflight(struct user_struct *user, struct file *fp);
11void unix_gc(void); 11void unix_gc(void);
12void wait_for_unix_gc(void); 12void wait_for_unix_gc(void);
13struct sock *unix_get_socket(struct file *filp); 13struct sock *unix_get_socket(struct file *filp);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 481fe1c9044c..49dcad4fe99e 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
270 struct sock *newsk, 270 struct sock *newsk,
271 const struct request_sock *req); 271 const struct request_sock *req);
272 272
273void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 273struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
274 struct sock *child); 274 struct request_sock *req,
275 struct sock *child);
275void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 276void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
276 unsigned long timeout); 277 unsigned long timeout);
277struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, 278struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7029527725dd..4079fc18ffe4 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -61,6 +61,7 @@ struct fib_nh_exception {
61 struct rtable __rcu *fnhe_rth_input; 61 struct rtable __rcu *fnhe_rth_input;
62 struct rtable __rcu *fnhe_rth_output; 62 struct rtable __rcu *fnhe_rth_output;
63 unsigned long fnhe_stamp; 63 unsigned long fnhe_stamp;
64 struct rcu_head rcu;
64}; 65};
65 66
66struct fnhe_hash_bucket { 67struct fnhe_hash_bucket {
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 6db96ea0144f..dda9abf6b89c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
230int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); 230int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
231int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, 231int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
232 u8 *protocol, struct flowi4 *fl4); 232 u8 *protocol, struct flowi4 *fl4);
233int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
233int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); 234int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
234 235
235struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, 236struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
diff --git a/include/net/scm.h b/include/net/scm.h
index 262532d111f5..59fa93c01d2a 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -21,6 +21,7 @@ struct scm_creds {
21struct scm_fp_list { 21struct scm_fp_list {
22 short count; 22 short count;
23 short max; 23 short max;
24 struct user_struct *user;
24 struct file *fp[SCM_MAX_FD]; 25 struct file *fp[SCM_MAX_FD];
25}; 26};
26 27
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6f8f032c73e..ae6468f5c9f3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -447,7 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
447 447
448void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 448void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
449void tcp_v4_mtu_reduced(struct sock *sk); 449void tcp_v4_mtu_reduced(struct sock *sk);
450void tcp_req_err(struct sock *sk, u32 seq); 450void tcp_req_err(struct sock *sk, u32 seq, bool abort);
451int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 451int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
452struct sock *tcp_create_openreq_child(const struct sock *sk, 452struct sock *tcp_create_openreq_child(const struct sock *sk,
453 struct request_sock *req, 453 struct request_sock *req,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index e2b712c90d3f..c21c38ce7450 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -343,7 +343,7 @@ void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
343void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus); 343void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
344 344
345void snd_hdac_bus_update_rirb(struct hdac_bus *bus); 345void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
346void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, 346int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
347 void (*ack)(struct hdac_bus *, 347 void (*ack)(struct hdac_bus *,
348 struct hdac_stream *)); 348 struct hdac_stream *));
349 349
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 56cf8e485ef2..28ee5c2e6bcd 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
94 sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); 94 sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
95 95
96bool target_sense_desc_format(struct se_device *dev); 96bool target_sense_desc_format(struct se_device *dev);
97sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
98bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
99 struct request_queue *q, int block_size);
97 100
98#endif /* TARGET_CORE_BACKEND_H */ 101#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5d82816cc4e3..e8c8c08bf575 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -140,6 +140,8 @@ enum se_cmd_flags_table {
140 SCF_COMPARE_AND_WRITE = 0x00080000, 140 SCF_COMPARE_AND_WRITE = 0x00080000,
141 SCF_COMPARE_AND_WRITE_POST = 0x00100000, 141 SCF_COMPARE_AND_WRITE_POST = 0x00100000,
142 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, 142 SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
143 SCF_ACK_KREF = 0x00400000,
144 SCF_USE_CPUID = 0x00800000,
143}; 145};
144 146
145/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ 147/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -187,6 +189,7 @@ enum target_sc_flags_table {
187 TARGET_SCF_BIDI_OP = 0x01, 189 TARGET_SCF_BIDI_OP = 0x01,
188 TARGET_SCF_ACK_KREF = 0x02, 190 TARGET_SCF_ACK_KREF = 0x02,
189 TARGET_SCF_UNKNOWN_SIZE = 0x04, 191 TARGET_SCF_UNKNOWN_SIZE = 0x04,
192 TARGET_SCF_USE_CPUID = 0x08,
190}; 193};
191 194
192/* fabric independent task management function values */ 195/* fabric independent task management function values */
@@ -490,8 +493,9 @@ struct se_cmd {
490#define CMD_T_SENT (1 << 4) 493#define CMD_T_SENT (1 << 4)
491#define CMD_T_STOP (1 << 5) 494#define CMD_T_STOP (1 << 5)
492#define CMD_T_DEV_ACTIVE (1 << 7) 495#define CMD_T_DEV_ACTIVE (1 << 7)
493#define CMD_T_REQUEST_STOP (1 << 8)
494#define CMD_T_BUSY (1 << 9) 496#define CMD_T_BUSY (1 << 9)
497#define CMD_T_TAS (1 << 10)
498#define CMD_T_FABRIC_STOP (1 << 11)
495 spinlock_t t_state_lock; 499 spinlock_t t_state_lock;
496 struct kref cmd_kref; 500 struct kref cmd_kref;
497 struct completion t_transport_stop_comp; 501 struct completion t_transport_stop_comp;
@@ -511,9 +515,6 @@ struct se_cmd {
511 515
512 struct list_head state_list; 516 struct list_head state_list;
513 517
514 /* old task stop completion, consider merging with some of the above */
515 struct completion task_stop_comp;
516
517 /* backend private data */ 518 /* backend private data */
518 void *priv; 519 void *priv;
519 520
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 5b4a4be06e2b..cc68b92124d4 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -66,14 +66,18 @@ struct nd_cmd_ars_cap {
66 __u64 length; 66 __u64 length;
67 __u32 status; 67 __u32 status;
68 __u32 max_ars_out; 68 __u32 max_ars_out;
69 __u32 clear_err_unit;
70 __u32 reserved;
69} __packed; 71} __packed;
70 72
71struct nd_cmd_ars_start { 73struct nd_cmd_ars_start {
72 __u64 address; 74 __u64 address;
73 __u64 length; 75 __u64 length;
74 __u16 type; 76 __u16 type;
75 __u8 reserved[6]; 77 __u8 flags;
78 __u8 reserved[5];
76 __u32 status; 79 __u32 status;
80 __u32 scrub_time;
77} __packed; 81} __packed;
78 82
79struct nd_cmd_ars_status { 83struct nd_cmd_ars_status {
@@ -81,11 +85,14 @@ struct nd_cmd_ars_status {
81 __u32 out_length; 85 __u32 out_length;
82 __u64 address; 86 __u64 address;
83 __u64 length; 87 __u64 length;
88 __u64 restart_address;
89 __u64 restart_length;
84 __u16 type; 90 __u16 type;
91 __u16 flags;
85 __u32 num_records; 92 __u32 num_records;
86 struct nd_ars_record { 93 struct nd_ars_record {
87 __u32 handle; 94 __u32 handle;
88 __u32 flags; 95 __u32 reserved;
89 __u64 err_address; 96 __u64 err_address;
90 __u64 length; 97 __u64 length;
91 } __packed records[0]; 98 } __packed records[0];
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d0f277..331fc1b0b3c7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 156 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
157 157
158 /* 158 /*
159 * We raced in the idr lookup or with shm_destroy(). Either way, the 159 * Callers of shm_lock() must validate the status of the returned ipc
160 * ID is busted. 160 * object pointer (as returned by ipc_lock()), and error out as
161 * appropriate.
161 */ 162 */
162 WARN_ON(IS_ERR(ipcp)); 163 if (IS_ERR(ipcp))
163 164 return (void *)ipcp;
164 return container_of(ipcp, struct shmid_kernel, shm_perm); 165 return container_of(ipcp, struct shmid_kernel, shm_perm);
165} 166}
166 167
@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
186} 187}
187 188
188 189
189/* This is called by fork, once for every shm attach. */ 190static int __shm_open(struct vm_area_struct *vma)
190static void shm_open(struct vm_area_struct *vma)
191{ 191{
192 struct file *file = vma->vm_file; 192 struct file *file = vma->vm_file;
193 struct shm_file_data *sfd = shm_file_data(file); 193 struct shm_file_data *sfd = shm_file_data(file);
194 struct shmid_kernel *shp; 194 struct shmid_kernel *shp;
195 195
196 shp = shm_lock(sfd->ns, sfd->id); 196 shp = shm_lock(sfd->ns, sfd->id);
197
198 if (IS_ERR(shp))
199 return PTR_ERR(shp);
200
197 shp->shm_atim = get_seconds(); 201 shp->shm_atim = get_seconds();
198 shp->shm_lprid = task_tgid_vnr(current); 202 shp->shm_lprid = task_tgid_vnr(current);
199 shp->shm_nattch++; 203 shp->shm_nattch++;
200 shm_unlock(shp); 204 shm_unlock(shp);
205 return 0;
206}
207
208/* This is called by fork, once for every shm attach. */
209static void shm_open(struct vm_area_struct *vma)
210{
211 int err = __shm_open(vma);
212 /*
213 * We raced in the idr lookup or with shm_destroy().
214 * Either way, the ID is busted.
215 */
216 WARN_ON_ONCE(err);
201} 217}
202 218
203/* 219/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
260 down_write(&shm_ids(ns).rwsem); 276 down_write(&shm_ids(ns).rwsem);
261 /* remove from the list of attaches of the shm segment */ 277 /* remove from the list of attaches of the shm segment */
262 shp = shm_lock(ns, sfd->id); 278 shp = shm_lock(ns, sfd->id);
279
280 /*
281 * We raced in the idr lookup or with shm_destroy().
282 * Either way, the ID is busted.
283 */
284 if (WARN_ON_ONCE(IS_ERR(shp)))
285 goto done; /* no-op */
286
263 shp->shm_lprid = task_tgid_vnr(current); 287 shp->shm_lprid = task_tgid_vnr(current);
264 shp->shm_dtim = get_seconds(); 288 shp->shm_dtim = get_seconds();
265 shp->shm_nattch--; 289 shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
267 shm_destroy(ns, shp); 291 shm_destroy(ns, shp);
268 else 292 else
269 shm_unlock(shp); 293 shm_unlock(shp);
294done:
270 up_write(&shm_ids(ns).rwsem); 295 up_write(&shm_ids(ns).rwsem);
271} 296}
272 297
@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
388 struct shm_file_data *sfd = shm_file_data(file); 413 struct shm_file_data *sfd = shm_file_data(file);
389 int ret; 414 int ret;
390 415
416 /*
417 * In case of remap_file_pages() emulation, the file can represent
418 * removed IPC ID: propogate shm_lock() error to caller.
419 */
420 ret =__shm_open(vma);
421 if (ret)
422 return ret;
423
391 ret = sfd->file->f_op->mmap(sfd->file, vma); 424 ret = sfd->file->f_op->mmap(sfd->file, vma);
392 if (ret != 0) 425 if (ret) {
426 shm_close(vma);
393 return ret; 427 return ret;
428 }
394 sfd->vm_ops = vma->vm_ops; 429 sfd->vm_ops = vma->vm_ops;
395#ifdef CONFIG_MMU 430#ifdef CONFIG_MMU
396 WARN_ON(!sfd->vm_ops->fault); 431 WARN_ON(!sfd->vm_ops->fault);
397#endif 432#endif
398 vma->vm_ops = &shm_vm_ops; 433 vma->vm_ops = &shm_vm_ops;
399 shm_open(vma); 434 return 0;
400
401 return ret;
402} 435}
403 436
404static int shm_release(struct inode *ino, struct file *file) 437static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d1d3e8f57de9..2e7f7ab739e4 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
2082 /* adjust offset of jmps if necessary */ 2082 /* adjust offset of jmps if necessary */
2083 if (i < pos && i + insn->off + 1 > pos) 2083 if (i < pos && i + insn->off + 1 > pos)
2084 insn->off += delta; 2084 insn->off += delta;
2085 else if (i > pos && i + insn->off + 1 < pos) 2085 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
2086 insn->off -= delta; 2086 insn->off -= delta;
2087 } 2087 }
2088} 2088}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c03a640ef6da..d27904c193da 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
58#include <linux/kthread.h> 58#include <linux/kthread.h>
59#include <linux/delay.h> 59#include <linux/delay.h>
60#include <linux/atomic.h> 60#include <linux/atomic.h>
61#include <linux/cpuset.h>
61#include <net/sock.h> 62#include <net/sock.h>
62 63
63/* 64/*
@@ -2739,6 +2740,7 @@ out_unlock_rcu:
2739out_unlock_threadgroup: 2740out_unlock_threadgroup:
2740 percpu_up_write(&cgroup_threadgroup_rwsem); 2741 percpu_up_write(&cgroup_threadgroup_rwsem);
2741 cgroup_kn_unlock(of->kn); 2742 cgroup_kn_unlock(of->kn);
2743 cpuset_post_attach_flush();
2742 return ret ?: nbytes; 2744 return ret ?: nbytes;
2743} 2745}
2744 2746
@@ -4655,14 +4657,15 @@ static void css_free_work_fn(struct work_struct *work)
4655 4657
4656 if (ss) { 4658 if (ss) {
4657 /* css free path */ 4659 /* css free path */
4660 struct cgroup_subsys_state *parent = css->parent;
4658 int id = css->id; 4661 int id = css->id;
4659 4662
4660 if (css->parent)
4661 css_put(css->parent);
4662
4663 ss->css_free(css); 4663 ss->css_free(css);
4664 cgroup_idr_remove(&ss->css_idr, id); 4664 cgroup_idr_remove(&ss->css_idr, id);
4665 cgroup_put(cgrp); 4665 cgroup_put(cgrp);
4666
4667 if (parent)
4668 css_put(parent);
4666 } else { 4669 } else {
4667 /* cgroup free path */ 4670 /* cgroup free path */
4668 atomic_dec(&cgrp->root->nr_cgrps); 4671 atomic_dec(&cgrp->root->nr_cgrps);
@@ -4758,6 +4761,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
4758 INIT_LIST_HEAD(&css->sibling); 4761 INIT_LIST_HEAD(&css->sibling);
4759 INIT_LIST_HEAD(&css->children); 4762 INIT_LIST_HEAD(&css->children);
4760 css->serial_nr = css_serial_nr_next++; 4763 css->serial_nr = css_serial_nr_next++;
4764 atomic_set(&css->online_cnt, 0);
4761 4765
4762 if (cgroup_parent(cgrp)) { 4766 if (cgroup_parent(cgrp)) {
4763 css->parent = cgroup_css(cgroup_parent(cgrp), ss); 4767 css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4780,6 +4784,10 @@ static int online_css(struct cgroup_subsys_state *css)
4780 if (!ret) { 4784 if (!ret) {
4781 css->flags |= CSS_ONLINE; 4785 css->flags |= CSS_ONLINE;
4782 rcu_assign_pointer(css->cgroup->subsys[ss->id], css); 4786 rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4787
4788 atomic_inc(&css->online_cnt);
4789 if (css->parent)
4790 atomic_inc(&css->parent->online_cnt);
4783 } 4791 }
4784 return ret; 4792 return ret;
4785} 4793}
@@ -5017,10 +5025,15 @@ static void css_killed_work_fn(struct work_struct *work)
5017 container_of(work, struct cgroup_subsys_state, destroy_work); 5025 container_of(work, struct cgroup_subsys_state, destroy_work);
5018 5026
5019 mutex_lock(&cgroup_mutex); 5027 mutex_lock(&cgroup_mutex);
5020 offline_css(css);
5021 mutex_unlock(&cgroup_mutex);
5022 5028
5023 css_put(css); 5029 do {
5030 offline_css(css);
5031 css_put(css);
5032 /* @css can't go away while we're holding cgroup_mutex */
5033 css = css->parent;
5034 } while (css && atomic_dec_and_test(&css->online_cnt));
5035
5036 mutex_unlock(&cgroup_mutex);
5024} 5037}
5025 5038
5026/* css kill confirmation processing requires process context, bounce */ 5039/* css kill confirmation processing requires process context, bounce */
@@ -5029,8 +5042,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
5029 struct cgroup_subsys_state *css = 5042 struct cgroup_subsys_state *css =
5030 container_of(ref, struct cgroup_subsys_state, refcnt); 5043 container_of(ref, struct cgroup_subsys_state, refcnt);
5031 5044
5032 INIT_WORK(&css->destroy_work, css_killed_work_fn); 5045 if (atomic_dec_and_test(&css->online_cnt)) {
5033 queue_work(cgroup_destroy_wq, &css->destroy_work); 5046 INIT_WORK(&css->destroy_work, css_killed_work_fn);
5047 queue_work(cgroup_destroy_wq, &css->destroy_work);
5048 }
5034} 5049}
5035 5050
5036/** 5051/**
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3e945fcd8179..41989ab4db57 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
287static DEFINE_MUTEX(cpuset_mutex); 287static DEFINE_MUTEX(cpuset_mutex);
288static DEFINE_SPINLOCK(callback_lock); 288static DEFINE_SPINLOCK(callback_lock);
289 289
290static struct workqueue_struct *cpuset_migrate_mm_wq;
291
290/* 292/*
291 * CPU / memory hotplug is handled asynchronously. 293 * CPU / memory hotplug is handled asynchronously.
292 */ 294 */
@@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
972} 974}
973 975
974/* 976/*
975 * cpuset_migrate_mm 977 * Migrate memory region from one set of nodes to another. This is
976 * 978 * performed asynchronously as it can be called from process migration path
977 * Migrate memory region from one set of nodes to another. 979 * holding locks involved in process management. All mm migrations are
978 * 980 * performed in the queued order and can be waited for by flushing
979 * Temporarilly set tasks mems_allowed to target nodes of migration, 981 * cpuset_migrate_mm_wq.
980 * so that the migration code can allocate pages on these nodes.
981 *
982 * While the mm_struct we are migrating is typically from some
983 * other task, the task_struct mems_allowed that we are hacking
984 * is for our current task, which must allocate new pages for that
985 * migrating memory region.
986 */ 982 */
987 983
984struct cpuset_migrate_mm_work {
985 struct work_struct work;
986 struct mm_struct *mm;
987 nodemask_t from;
988 nodemask_t to;
989};
990
991static void cpuset_migrate_mm_workfn(struct work_struct *work)
992{
993 struct cpuset_migrate_mm_work *mwork =
994 container_of(work, struct cpuset_migrate_mm_work, work);
995
996 /* on a wq worker, no need to worry about %current's mems_allowed */
997 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
998 mmput(mwork->mm);
999 kfree(mwork);
1000}
1001
988static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1002static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
989 const nodemask_t *to) 1003 const nodemask_t *to)
990{ 1004{
991 struct task_struct *tsk = current; 1005 struct cpuset_migrate_mm_work *mwork;
992
993 tsk->mems_allowed = *to;
994 1006
995 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 1007 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1008 if (mwork) {
1009 mwork->mm = mm;
1010 mwork->from = *from;
1011 mwork->to = *to;
1012 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1013 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1014 } else {
1015 mmput(mm);
1016 }
1017}
996 1018
997 rcu_read_lock(); 1019void cpuset_post_attach_flush(void)
998 guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed); 1020{
999 rcu_read_unlock(); 1021 flush_workqueue(cpuset_migrate_mm_wq);
1000} 1022}
1001 1023
1002/* 1024/*
@@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
1097 mpol_rebind_mm(mm, &cs->mems_allowed); 1119 mpol_rebind_mm(mm, &cs->mems_allowed);
1098 if (migrate) 1120 if (migrate)
1099 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 1121 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1100 mmput(mm); 1122 else
1123 mmput(mm);
1101 } 1124 }
1102 css_task_iter_end(&it); 1125 css_task_iter_end(&it);
1103 1126
@@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
1545 * @old_mems_allowed is the right nodesets that we 1568 * @old_mems_allowed is the right nodesets that we
1546 * migrate mm from. 1569 * migrate mm from.
1547 */ 1570 */
1548 if (is_memory_migrate(cs)) { 1571 if (is_memory_migrate(cs))
1549 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 1572 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
1550 &cpuset_attach_nodemask_to); 1573 &cpuset_attach_nodemask_to);
1551 } 1574 else
1552 mmput(mm); 1575 mmput(mm);
1553 } 1576 }
1554 } 1577 }
1555 1578
@@ -1714,6 +1737,7 @@ out_unlock:
1714 mutex_unlock(&cpuset_mutex); 1737 mutex_unlock(&cpuset_mutex);
1715 kernfs_unbreak_active_protection(of->kn); 1738 kernfs_unbreak_active_protection(of->kn);
1716 css_put(&cs->css); 1739 css_put(&cs->css);
1740 flush_workqueue(cpuset_migrate_mm_wq);
1717 return retval ?: nbytes; 1741 return retval ?: nbytes;
1718} 1742}
1719 1743
@@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
2359 top_cpuset.effective_mems = node_states[N_MEMORY]; 2383 top_cpuset.effective_mems = node_states[N_MEMORY];
2360 2384
2361 register_hotmemory_notifier(&cpuset_track_online_nodes_nb); 2385 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2386
2387 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
2388 BUG_ON(!cpuset_migrate_mm_wq);
2362} 2389}
2363 2390
2364/** 2391/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5946460b2425..614614821f00 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -64,8 +64,17 @@ static void remote_function(void *data)
64 struct task_struct *p = tfc->p; 64 struct task_struct *p = tfc->p;
65 65
66 if (p) { 66 if (p) {
67 tfc->ret = -EAGAIN; 67 /* -EAGAIN */
68 if (task_cpu(p) != smp_processor_id() || !task_curr(p)) 68 if (task_cpu(p) != smp_processor_id())
69 return;
70
71 /*
72 * Now that we're on right CPU with IRQs disabled, we can test
73 * if we hit the right task without races.
74 */
75
76 tfc->ret = -ESRCH; /* No such (running) process */
77 if (p != current)
69 return; 78 return;
70 } 79 }
71 80
@@ -92,13 +101,17 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info)
92 .p = p, 101 .p = p,
93 .func = func, 102 .func = func,
94 .info = info, 103 .info = info,
95 .ret = -ESRCH, /* No such (running) process */ 104 .ret = -EAGAIN,
96 }; 105 };
106 int ret;
97 107
98 if (task_curr(p)) 108 do {
99 smp_call_function_single(task_cpu(p), remote_function, &data, 1); 109 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
110 if (!ret)
111 ret = data.ret;
112 } while (ret == -EAGAIN);
100 113
101 return data.ret; 114 return ret;
102} 115}
103 116
104/** 117/**
@@ -169,19 +182,6 @@ static bool is_kernel_event(struct perf_event *event)
169 * rely on ctx->is_active and therefore cannot use event_function_call(). 182 * rely on ctx->is_active and therefore cannot use event_function_call().
170 * See perf_install_in_context(). 183 * See perf_install_in_context().
171 * 184 *
172 * This is because we need a ctx->lock serialized variable (ctx->is_active)
173 * to reliably determine if a particular task/context is scheduled in. The
174 * task_curr() use in task_function_call() is racy in that a remote context
175 * switch is not a single atomic operation.
176 *
177 * As is, the situation is 'safe' because we set rq->curr before we do the
178 * actual context switch. This means that task_curr() will fail early, but
179 * we'll continue spinning on ctx->is_active until we've passed
180 * perf_event_task_sched_out().
181 *
182 * Without this ctx->lock serialized variable we could have race where we find
183 * the task (and hence the context) would not be active while in fact they are.
184 *
185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set. 185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
186 */ 186 */
187 187
@@ -212,7 +212,7 @@ static int event_function(void *info)
212 */ 212 */
213 if (ctx->task) { 213 if (ctx->task) {
214 if (ctx->task != current) { 214 if (ctx->task != current) {
215 ret = -EAGAIN; 215 ret = -ESRCH;
216 goto unlock; 216 goto unlock;
217 } 217 }
218 218
@@ -276,10 +276,10 @@ static void event_function_call(struct perf_event *event, event_f func, void *da
276 return; 276 return;
277 } 277 }
278 278
279again:
280 if (task == TASK_TOMBSTONE) 279 if (task == TASK_TOMBSTONE)
281 return; 280 return;
282 281
282again:
283 if (!task_function_call(task, event_function, &efs)) 283 if (!task_function_call(task, event_function, &efs))
284 return; 284 return;
285 285
@@ -289,13 +289,15 @@ again:
289 * a concurrent perf_event_context_sched_out(). 289 * a concurrent perf_event_context_sched_out().
290 */ 290 */
291 task = ctx->task; 291 task = ctx->task;
292 if (task != TASK_TOMBSTONE) { 292 if (task == TASK_TOMBSTONE) {
293 if (ctx->is_active) { 293 raw_spin_unlock_irq(&ctx->lock);
294 raw_spin_unlock_irq(&ctx->lock); 294 return;
295 goto again;
296 }
297 func(event, NULL, ctx, data);
298 } 295 }
296 if (ctx->is_active) {
297 raw_spin_unlock_irq(&ctx->lock);
298 goto again;
299 }
300 func(event, NULL, ctx, data);
299 raw_spin_unlock_irq(&ctx->lock); 301 raw_spin_unlock_irq(&ctx->lock);
300} 302}
301 303
@@ -314,6 +316,7 @@ again:
314enum event_type_t { 316enum event_type_t {
315 EVENT_FLEXIBLE = 0x1, 317 EVENT_FLEXIBLE = 0x1,
316 EVENT_PINNED = 0x2, 318 EVENT_PINNED = 0x2,
319 EVENT_TIME = 0x4,
317 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, 320 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
318}; 321};
319 322
@@ -321,7 +324,13 @@ enum event_type_t {
321 * perf_sched_events : >0 events exist 324 * perf_sched_events : >0 events exist
322 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu 325 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
323 */ 326 */
324struct static_key_deferred perf_sched_events __read_mostly; 327
328static void perf_sched_delayed(struct work_struct *work);
329DEFINE_STATIC_KEY_FALSE(perf_sched_events);
330static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
331static DEFINE_MUTEX(perf_sched_mutex);
332static atomic_t perf_sched_count;
333
325static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 334static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
326static DEFINE_PER_CPU(int, perf_sched_cb_usages); 335static DEFINE_PER_CPU(int, perf_sched_cb_usages);
327 336
@@ -1288,16 +1297,18 @@ static u64 perf_event_time(struct perf_event *event)
1288 1297
1289/* 1298/*
1290 * Update the total_time_enabled and total_time_running fields for a event. 1299 * Update the total_time_enabled and total_time_running fields for a event.
1291 * The caller of this function needs to hold the ctx->lock.
1292 */ 1300 */
1293static void update_event_times(struct perf_event *event) 1301static void update_event_times(struct perf_event *event)
1294{ 1302{
1295 struct perf_event_context *ctx = event->ctx; 1303 struct perf_event_context *ctx = event->ctx;
1296 u64 run_end; 1304 u64 run_end;
1297 1305
1306 lockdep_assert_held(&ctx->lock);
1307
1298 if (event->state < PERF_EVENT_STATE_INACTIVE || 1308 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1299 event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 1309 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1300 return; 1310 return;
1311
1301 /* 1312 /*
1302 * in cgroup mode, time_enabled represents 1313 * in cgroup mode, time_enabled represents
1303 * the time the event was enabled AND active 1314 * the time the event was enabled AND active
@@ -1645,7 +1656,7 @@ out:
1645 1656
1646static bool is_orphaned_event(struct perf_event *event) 1657static bool is_orphaned_event(struct perf_event *event)
1647{ 1658{
1648 return event->state == PERF_EVENT_STATE_EXIT; 1659 return event->state == PERF_EVENT_STATE_DEAD;
1649} 1660}
1650 1661
1651static inline int pmu_filter_match(struct perf_event *event) 1662static inline int pmu_filter_match(struct perf_event *event)
@@ -1690,14 +1701,14 @@ event_sched_out(struct perf_event *event,
1690 1701
1691 perf_pmu_disable(event->pmu); 1702 perf_pmu_disable(event->pmu);
1692 1703
1704 event->tstamp_stopped = tstamp;
1705 event->pmu->del(event, 0);
1706 event->oncpu = -1;
1693 event->state = PERF_EVENT_STATE_INACTIVE; 1707 event->state = PERF_EVENT_STATE_INACTIVE;
1694 if (event->pending_disable) { 1708 if (event->pending_disable) {
1695 event->pending_disable = 0; 1709 event->pending_disable = 0;
1696 event->state = PERF_EVENT_STATE_OFF; 1710 event->state = PERF_EVENT_STATE_OFF;
1697 } 1711 }
1698 event->tstamp_stopped = tstamp;
1699 event->pmu->del(event, 0);
1700 event->oncpu = -1;
1701 1712
1702 if (!is_software_event(event)) 1713 if (!is_software_event(event))
1703 cpuctx->active_oncpu--; 1714 cpuctx->active_oncpu--;
@@ -1732,7 +1743,6 @@ group_sched_out(struct perf_event *group_event,
1732} 1743}
1733 1744
1734#define DETACH_GROUP 0x01UL 1745#define DETACH_GROUP 0x01UL
1735#define DETACH_STATE 0x02UL
1736 1746
1737/* 1747/*
1738 * Cross CPU call to remove a performance event 1748 * Cross CPU call to remove a performance event
@@ -1752,8 +1762,6 @@ __perf_remove_from_context(struct perf_event *event,
1752 if (flags & DETACH_GROUP) 1762 if (flags & DETACH_GROUP)
1753 perf_group_detach(event); 1763 perf_group_detach(event);
1754 list_del_event(event, ctx); 1764 list_del_event(event, ctx);
1755 if (flags & DETACH_STATE)
1756 event->state = PERF_EVENT_STATE_EXIT;
1757 1765
1758 if (!ctx->nr_events && ctx->is_active) { 1766 if (!ctx->nr_events && ctx->is_active) {
1759 ctx->is_active = 0; 1767 ctx->is_active = 0;
@@ -2063,14 +2071,27 @@ static void add_event_to_ctx(struct perf_event *event,
2063 event->tstamp_stopped = tstamp; 2071 event->tstamp_stopped = tstamp;
2064} 2072}
2065 2073
2066static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2074static void ctx_sched_out(struct perf_event_context *ctx,
2067 struct perf_event_context *ctx); 2075 struct perf_cpu_context *cpuctx,
2076 enum event_type_t event_type);
2068static void 2077static void
2069ctx_sched_in(struct perf_event_context *ctx, 2078ctx_sched_in(struct perf_event_context *ctx,
2070 struct perf_cpu_context *cpuctx, 2079 struct perf_cpu_context *cpuctx,
2071 enum event_type_t event_type, 2080 enum event_type_t event_type,
2072 struct task_struct *task); 2081 struct task_struct *task);
2073 2082
2083static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2084 struct perf_event_context *ctx)
2085{
2086 if (!cpuctx->task_ctx)
2087 return;
2088
2089 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2090 return;
2091
2092 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2093}
2094
2074static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2095static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2075 struct perf_event_context *ctx, 2096 struct perf_event_context *ctx,
2076 struct task_struct *task) 2097 struct task_struct *task)
@@ -2097,49 +2118,68 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
2097/* 2118/*
2098 * Cross CPU call to install and enable a performance event 2119 * Cross CPU call to install and enable a performance event
2099 * 2120 *
2100 * Must be called with ctx->mutex held 2121 * Very similar to remote_function() + event_function() but cannot assume that
2122 * things like ctx->is_active and cpuctx->task_ctx are set.
2101 */ 2123 */
2102static int __perf_install_in_context(void *info) 2124static int __perf_install_in_context(void *info)
2103{ 2125{
2104 struct perf_event_context *ctx = info; 2126 struct perf_event *event = info;
2127 struct perf_event_context *ctx = event->ctx;
2105 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 2128 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2106 struct perf_event_context *task_ctx = cpuctx->task_ctx; 2129 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2130 bool activate = true;
2131 int ret = 0;
2107 2132
2108 raw_spin_lock(&cpuctx->ctx.lock); 2133 raw_spin_lock(&cpuctx->ctx.lock);
2109 if (ctx->task) { 2134 if (ctx->task) {
2110 raw_spin_lock(&ctx->lock); 2135 raw_spin_lock(&ctx->lock);
2111 /*
2112 * If we hit the 'wrong' task, we've since scheduled and
2113 * everything should be sorted, nothing to do!
2114 */
2115 task_ctx = ctx; 2136 task_ctx = ctx;
2116 if (ctx->task != current) 2137
2138 /* If we're on the wrong CPU, try again */
2139 if (task_cpu(ctx->task) != smp_processor_id()) {
2140 ret = -ESRCH;
2117 goto unlock; 2141 goto unlock;
2142 }
2118 2143
2119 /* 2144 /*
2120 * If task_ctx is set, it had better be to us. 2145 * If we're on the right CPU, see if the task we target is
2146 * current, if not we don't have to activate the ctx, a future
2147 * context switch will do that for us.
2121 */ 2148 */
2122 WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx); 2149 if (ctx->task != current)
2150 activate = false;
2151 else
2152 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2153
2123 } else if (task_ctx) { 2154 } else if (task_ctx) {
2124 raw_spin_lock(&task_ctx->lock); 2155 raw_spin_lock(&task_ctx->lock);
2125 } 2156 }
2126 2157
2127 ctx_resched(cpuctx, task_ctx); 2158 if (activate) {
2159 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2160 add_event_to_ctx(event, ctx);
2161 ctx_resched(cpuctx, task_ctx);
2162 } else {
2163 add_event_to_ctx(event, ctx);
2164 }
2165
2128unlock: 2166unlock:
2129 perf_ctx_unlock(cpuctx, task_ctx); 2167 perf_ctx_unlock(cpuctx, task_ctx);
2130 2168
2131 return 0; 2169 return ret;
2132} 2170}
2133 2171
2134/* 2172/*
2135 * Attach a performance event to a context 2173 * Attach a performance event to a context.
2174 *
2175 * Very similar to event_function_call, see comment there.
2136 */ 2176 */
2137static void 2177static void
2138perf_install_in_context(struct perf_event_context *ctx, 2178perf_install_in_context(struct perf_event_context *ctx,
2139 struct perf_event *event, 2179 struct perf_event *event,
2140 int cpu) 2180 int cpu)
2141{ 2181{
2142 struct task_struct *task = NULL; 2182 struct task_struct *task = READ_ONCE(ctx->task);
2143 2183
2144 lockdep_assert_held(&ctx->mutex); 2184 lockdep_assert_held(&ctx->mutex);
2145 2185
@@ -2147,40 +2187,46 @@ perf_install_in_context(struct perf_event_context *ctx,
2147 if (event->cpu != -1) 2187 if (event->cpu != -1)
2148 event->cpu = cpu; 2188 event->cpu = cpu;
2149 2189
2190 if (!task) {
2191 cpu_function_call(cpu, __perf_install_in_context, event);
2192 return;
2193 }
2194
2195 /*
2196 * Should not happen, we validate the ctx is still alive before calling.
2197 */
2198 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2199 return;
2200
2150 /* 2201 /*
2151 * Installing events is tricky because we cannot rely on ctx->is_active 2202 * Installing events is tricky because we cannot rely on ctx->is_active
2152 * to be set in case this is the nr_events 0 -> 1 transition. 2203 * to be set in case this is the nr_events 0 -> 1 transition.
2153 *
2154 * So what we do is we add the event to the list here, which will allow
2155 * a future context switch to DTRT and then send a racy IPI. If the IPI
2156 * fails to hit the right task, this means a context switch must have
2157 * happened and that will have taken care of business.
2158 */ 2204 */
2159 raw_spin_lock_irq(&ctx->lock); 2205again:
2160 task = ctx->task;
2161 /* 2206 /*
2162 * Worse, we cannot even rely on the ctx actually existing anymore. If 2207 * Cannot use task_function_call() because we need to run on the task's
2163 * between find_get_context() and perf_install_in_context() the task 2208 * CPU regardless of whether its current or not.
2164 * went through perf_event_exit_task() its dead and we should not be
2165 * adding new events.
2166 */ 2209 */
2167 if (task == TASK_TOMBSTONE) { 2210 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2211 return;
2212
2213 raw_spin_lock_irq(&ctx->lock);
2214 task = ctx->task;
2215 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2216 /*
2217 * Cannot happen because we already checked above (which also
2218 * cannot happen), and we hold ctx->mutex, which serializes us
2219 * against perf_event_exit_task_context().
2220 */
2168 raw_spin_unlock_irq(&ctx->lock); 2221 raw_spin_unlock_irq(&ctx->lock);
2169 return; 2222 return;
2170 } 2223 }
2171 update_context_time(ctx); 2224 raw_spin_unlock_irq(&ctx->lock);
2172 /* 2225 /*
2173 * Update cgrp time only if current cgrp matches event->cgrp. 2226 * Since !ctx->is_active doesn't mean anything, we must IPI
2174 * Must be done before calling add_event_to_ctx(). 2227 * unconditionally.
2175 */ 2228 */
2176 update_cgrp_time_from_event(event); 2229 goto again;
2177 add_event_to_ctx(event, ctx);
2178 raw_spin_unlock_irq(&ctx->lock);
2179
2180 if (task)
2181 task_function_call(task, __perf_install_in_context, ctx);
2182 else
2183 cpu_function_call(cpu, __perf_install_in_context, ctx);
2184} 2230}
2185 2231
2186/* 2232/*
@@ -2219,17 +2265,18 @@ static void __perf_event_enable(struct perf_event *event,
2219 event->state <= PERF_EVENT_STATE_ERROR) 2265 event->state <= PERF_EVENT_STATE_ERROR)
2220 return; 2266 return;
2221 2267
2222 update_context_time(ctx); 2268 if (ctx->is_active)
2269 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2270
2223 __perf_event_mark_enabled(event); 2271 __perf_event_mark_enabled(event);
2224 2272
2225 if (!ctx->is_active) 2273 if (!ctx->is_active)
2226 return; 2274 return;
2227 2275
2228 if (!event_filter_match(event)) { 2276 if (!event_filter_match(event)) {
2229 if (is_cgroup_event(event)) { 2277 if (is_cgroup_event(event))
2230 perf_cgroup_set_timestamp(current, ctx); // XXX ?
2231 perf_cgroup_defer_enabled(event); 2278 perf_cgroup_defer_enabled(event);
2232 } 2279 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2233 return; 2280 return;
2234 } 2281 }
2235 2282
@@ -2237,8 +2284,10 @@ static void __perf_event_enable(struct perf_event *event,
2237 * If the event is in a group and isn't the group leader, 2284 * If the event is in a group and isn't the group leader,
2238 * then don't put it on unless the group is on. 2285 * then don't put it on unless the group is on.
2239 */ 2286 */
2240 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2287 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2288 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2241 return; 2289 return;
2290 }
2242 2291
2243 task_ctx = cpuctx->task_ctx; 2292 task_ctx = cpuctx->task_ctx;
2244 if (ctx->task) 2293 if (ctx->task)
@@ -2344,24 +2393,33 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2344 } 2393 }
2345 2394
2346 ctx->is_active &= ~event_type; 2395 ctx->is_active &= ~event_type;
2396 if (!(ctx->is_active & EVENT_ALL))
2397 ctx->is_active = 0;
2398
2347 if (ctx->task) { 2399 if (ctx->task) {
2348 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2400 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2349 if (!ctx->is_active) 2401 if (!ctx->is_active)
2350 cpuctx->task_ctx = NULL; 2402 cpuctx->task_ctx = NULL;
2351 } 2403 }
2352 2404
2353 update_context_time(ctx); 2405 is_active ^= ctx->is_active; /* changed bits */
2354 update_cgrp_time_from_cpuctx(cpuctx); 2406
2355 if (!ctx->nr_active) 2407 if (is_active & EVENT_TIME) {
2408 /* update (and stop) ctx time */
2409 update_context_time(ctx);
2410 update_cgrp_time_from_cpuctx(cpuctx);
2411 }
2412
2413 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2356 return; 2414 return;
2357 2415
2358 perf_pmu_disable(ctx->pmu); 2416 perf_pmu_disable(ctx->pmu);
2359 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { 2417 if (is_active & EVENT_PINNED) {
2360 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 2418 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2361 group_sched_out(event, cpuctx, ctx); 2419 group_sched_out(event, cpuctx, ctx);
2362 } 2420 }
2363 2421
2364 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { 2422 if (is_active & EVENT_FLEXIBLE) {
2365 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 2423 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2366 group_sched_out(event, cpuctx, ctx); 2424 group_sched_out(event, cpuctx, ctx);
2367 } 2425 }
@@ -2641,18 +2699,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
2641 perf_cgroup_sched_out(task, next); 2699 perf_cgroup_sched_out(task, next);
2642} 2700}
2643 2701
2644static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2645 struct perf_event_context *ctx)
2646{
2647 if (!cpuctx->task_ctx)
2648 return;
2649
2650 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2651 return;
2652
2653 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2654}
2655
2656/* 2702/*
2657 * Called with IRQs disabled 2703 * Called with IRQs disabled
2658 */ 2704 */
@@ -2735,7 +2781,7 @@ ctx_sched_in(struct perf_event_context *ctx,
2735 if (likely(!ctx->nr_events)) 2781 if (likely(!ctx->nr_events))
2736 return; 2782 return;
2737 2783
2738 ctx->is_active |= event_type; 2784 ctx->is_active |= (event_type | EVENT_TIME);
2739 if (ctx->task) { 2785 if (ctx->task) {
2740 if (!is_active) 2786 if (!is_active)
2741 cpuctx->task_ctx = ctx; 2787 cpuctx->task_ctx = ctx;
@@ -2743,18 +2789,24 @@ ctx_sched_in(struct perf_event_context *ctx,
2743 WARN_ON_ONCE(cpuctx->task_ctx != ctx); 2789 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2744 } 2790 }
2745 2791
2746 now = perf_clock(); 2792 is_active ^= ctx->is_active; /* changed bits */
2747 ctx->timestamp = now; 2793
2748 perf_cgroup_set_timestamp(task, ctx); 2794 if (is_active & EVENT_TIME) {
2795 /* start ctx time */
2796 now = perf_clock();
2797 ctx->timestamp = now;
2798 perf_cgroup_set_timestamp(task, ctx);
2799 }
2800
2749 /* 2801 /*
2750 * First go through the list and put on any pinned groups 2802 * First go through the list and put on any pinned groups
2751 * in order to give them the best chance of going on. 2803 * in order to give them the best chance of going on.
2752 */ 2804 */
2753 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) 2805 if (is_active & EVENT_PINNED)
2754 ctx_pinned_sched_in(ctx, cpuctx); 2806 ctx_pinned_sched_in(ctx, cpuctx);
2755 2807
2756 /* Then walk through the lower prio flexible groups */ 2808 /* Then walk through the lower prio flexible groups */
2757 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) 2809 if (is_active & EVENT_FLEXIBLE)
2758 ctx_flexible_sched_in(ctx, cpuctx); 2810 ctx_flexible_sched_in(ctx, cpuctx);
2759} 2811}
2760 2812
@@ -3120,6 +3172,7 @@ static void perf_event_enable_on_exec(int ctxn)
3120 3172
3121 cpuctx = __get_cpu_context(ctx); 3173 cpuctx = __get_cpu_context(ctx);
3122 perf_ctx_lock(cpuctx, ctx); 3174 perf_ctx_lock(cpuctx, ctx);
3175 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3123 list_for_each_entry(event, &ctx->event_list, event_entry) 3176 list_for_each_entry(event, &ctx->event_list, event_entry)
3124 enabled |= event_enable_on_exec(event, ctx); 3177 enabled |= event_enable_on_exec(event, ctx);
3125 3178
@@ -3537,12 +3590,22 @@ static void unaccount_event(struct perf_event *event)
3537 if (has_branch_stack(event)) 3590 if (has_branch_stack(event))
3538 dec = true; 3591 dec = true;
3539 3592
3540 if (dec) 3593 if (dec) {
3541 static_key_slow_dec_deferred(&perf_sched_events); 3594 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3595 schedule_delayed_work(&perf_sched_work, HZ);
3596 }
3542 3597
3543 unaccount_event_cpu(event, event->cpu); 3598 unaccount_event_cpu(event, event->cpu);
3544} 3599}
3545 3600
3601static void perf_sched_delayed(struct work_struct *work)
3602{
3603 mutex_lock(&perf_sched_mutex);
3604 if (atomic_dec_and_test(&perf_sched_count))
3605 static_branch_disable(&perf_sched_events);
3606 mutex_unlock(&perf_sched_mutex);
3607}
3608
3546/* 3609/*
3547 * The following implement mutual exclusion of events on "exclusive" pmus 3610 * The following implement mutual exclusion of events on "exclusive" pmus
3548 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled 3611 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
@@ -3752,30 +3815,42 @@ static void put_event(struct perf_event *event)
3752 */ 3815 */
3753int perf_event_release_kernel(struct perf_event *event) 3816int perf_event_release_kernel(struct perf_event *event)
3754{ 3817{
3755 struct perf_event_context *ctx; 3818 struct perf_event_context *ctx = event->ctx;
3756 struct perf_event *child, *tmp; 3819 struct perf_event *child, *tmp;
3757 3820
3821 /*
3822 * If we got here through err_file: fput(event_file); we will not have
3823 * attached to a context yet.
3824 */
3825 if (!ctx) {
3826 WARN_ON_ONCE(event->attach_state &
3827 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
3828 goto no_ctx;
3829 }
3830
3758 if (!is_kernel_event(event)) 3831 if (!is_kernel_event(event))
3759 perf_remove_from_owner(event); 3832 perf_remove_from_owner(event);
3760 3833
3761 ctx = perf_event_ctx_lock(event); 3834 ctx = perf_event_ctx_lock(event);
3762 WARN_ON_ONCE(ctx->parent_ctx); 3835 WARN_ON_ONCE(ctx->parent_ctx);
3763 perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE); 3836 perf_remove_from_context(event, DETACH_GROUP);
3764 perf_event_ctx_unlock(event, ctx);
3765 3837
3838 raw_spin_lock_irq(&ctx->lock);
3766 /* 3839 /*
3767 * At this point we must have event->state == PERF_EVENT_STATE_EXIT, 3840 * Mark this even as STATE_DEAD, there is no external reference to it
3768 * either from the above perf_remove_from_context() or through 3841 * anymore.
3769 * perf_event_exit_event().
3770 * 3842 *
3771 * Therefore, anybody acquiring event->child_mutex after the below 3843 * Anybody acquiring event->child_mutex after the below loop _must_
3772 * loop _must_ also see this, most importantly inherit_event() which 3844 * also see this, most importantly inherit_event() which will avoid
3773 * will avoid placing more children on the list. 3845 * placing more children on the list.
3774 * 3846 *
3775 * Thus this guarantees that we will in fact observe and kill _ALL_ 3847 * Thus this guarantees that we will in fact observe and kill _ALL_
3776 * child events. 3848 * child events.
3777 */ 3849 */
3778 WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT); 3850 event->state = PERF_EVENT_STATE_DEAD;
3851 raw_spin_unlock_irq(&ctx->lock);
3852
3853 perf_event_ctx_unlock(event, ctx);
3779 3854
3780again: 3855again:
3781 mutex_lock(&event->child_mutex); 3856 mutex_lock(&event->child_mutex);
@@ -3830,8 +3905,8 @@ again:
3830 } 3905 }
3831 mutex_unlock(&event->child_mutex); 3906 mutex_unlock(&event->child_mutex);
3832 3907
3833 /* Must be the last reference */ 3908no_ctx:
3834 put_event(event); 3909 put_event(event); /* Must be the 'last' reference */
3835 return 0; 3910 return 0;
3836} 3911}
3837EXPORT_SYMBOL_GPL(perf_event_release_kernel); 3912EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -3988,7 +4063,7 @@ static bool is_event_hup(struct perf_event *event)
3988{ 4063{
3989 bool no_children; 4064 bool no_children;
3990 4065
3991 if (event->state != PERF_EVENT_STATE_EXIT) 4066 if (event->state > PERF_EVENT_STATE_EXIT)
3992 return false; 4067 return false;
3993 4068
3994 mutex_lock(&event->child_mutex); 4069 mutex_lock(&event->child_mutex);
@@ -7769,8 +7844,28 @@ static void account_event(struct perf_event *event)
7769 if (is_cgroup_event(event)) 7844 if (is_cgroup_event(event))
7770 inc = true; 7845 inc = true;
7771 7846
7772 if (inc) 7847 if (inc) {
7773 static_key_slow_inc(&perf_sched_events.key); 7848 if (atomic_inc_not_zero(&perf_sched_count))
7849 goto enabled;
7850
7851 mutex_lock(&perf_sched_mutex);
7852 if (!atomic_read(&perf_sched_count)) {
7853 static_branch_enable(&perf_sched_events);
7854 /*
7855 * Guarantee that all CPUs observe they key change and
7856 * call the perf scheduling hooks before proceeding to
7857 * install events that need them.
7858 */
7859 synchronize_sched();
7860 }
7861 /*
7862 * Now that we have waited for the sync_sched(), allow further
7863 * increments to by-pass the mutex.
7864 */
7865 atomic_inc(&perf_sched_count);
7866 mutex_unlock(&perf_sched_mutex);
7867 }
7868enabled:
7774 7869
7775 account_event_cpu(event, event->cpu); 7870 account_event_cpu(event, event->cpu);
7776} 7871}
@@ -8389,10 +8484,19 @@ SYSCALL_DEFINE5(perf_event_open,
8389 if (move_group) { 8484 if (move_group) {
8390 gctx = group_leader->ctx; 8485 gctx = group_leader->ctx;
8391 mutex_lock_double(&gctx->mutex, &ctx->mutex); 8486 mutex_lock_double(&gctx->mutex, &ctx->mutex);
8487 if (gctx->task == TASK_TOMBSTONE) {
8488 err = -ESRCH;
8489 goto err_locked;
8490 }
8392 } else { 8491 } else {
8393 mutex_lock(&ctx->mutex); 8492 mutex_lock(&ctx->mutex);
8394 } 8493 }
8395 8494
8495 if (ctx->task == TASK_TOMBSTONE) {
8496 err = -ESRCH;
8497 goto err_locked;
8498 }
8499
8396 if (!perf_event_validate_size(event)) { 8500 if (!perf_event_validate_size(event)) {
8397 err = -E2BIG; 8501 err = -E2BIG;
8398 goto err_locked; 8502 goto err_locked;
@@ -8509,7 +8613,12 @@ err_context:
8509 perf_unpin_context(ctx); 8613 perf_unpin_context(ctx);
8510 put_ctx(ctx); 8614 put_ctx(ctx);
8511err_alloc: 8615err_alloc:
8512 free_event(event); 8616 /*
8617 * If event_file is set, the fput() above will have called ->release()
8618 * and that will take care of freeing the event.
8619 */
8620 if (!event_file)
8621 free_event(event);
8513err_cpus: 8622err_cpus:
8514 put_online_cpus(); 8623 put_online_cpus();
8515err_task: 8624err_task:
@@ -8563,12 +8672,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8563 8672
8564 WARN_ON_ONCE(ctx->parent_ctx); 8673 WARN_ON_ONCE(ctx->parent_ctx);
8565 mutex_lock(&ctx->mutex); 8674 mutex_lock(&ctx->mutex);
8675 if (ctx->task == TASK_TOMBSTONE) {
8676 err = -ESRCH;
8677 goto err_unlock;
8678 }
8679
8566 if (!exclusive_event_installable(event, ctx)) { 8680 if (!exclusive_event_installable(event, ctx)) {
8567 mutex_unlock(&ctx->mutex);
8568 perf_unpin_context(ctx);
8569 put_ctx(ctx);
8570 err = -EBUSY; 8681 err = -EBUSY;
8571 goto err_free; 8682 goto err_unlock;
8572 } 8683 }
8573 8684
8574 perf_install_in_context(ctx, event, cpu); 8685 perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8688,10 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8577 8688
8578 return event; 8689 return event;
8579 8690
8691err_unlock:
8692 mutex_unlock(&ctx->mutex);
8693 perf_unpin_context(ctx);
8694 put_ctx(ctx);
8580err_free: 8695err_free:
8581 free_event(event); 8696 free_event(event);
8582err: 8697err:
@@ -8695,7 +8810,7 @@ perf_event_exit_event(struct perf_event *child_event,
8695 if (parent_event) 8810 if (parent_event)
8696 perf_group_detach(child_event); 8811 perf_group_detach(child_event);
8697 list_del_event(child_event, child_ctx); 8812 list_del_event(child_event, child_ctx);
8698 child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */ 8813 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
8699 raw_spin_unlock_irq(&child_ctx->lock); 8814 raw_spin_unlock_irq(&child_ctx->lock);
8700 8815
8701 /* 8816 /*
@@ -9206,7 +9321,7 @@ static void perf_event_init_cpu(int cpu)
9206 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 9321 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9207 9322
9208 mutex_lock(&swhash->hlist_mutex); 9323 mutex_lock(&swhash->hlist_mutex);
9209 if (swhash->hlist_refcount > 0) { 9324 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
9210 struct swevent_hlist *hlist; 9325 struct swevent_hlist *hlist;
9211 9326
9212 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 9327 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -9282,11 +9397,9 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9282 switch (action & ~CPU_TASKS_FROZEN) { 9397 switch (action & ~CPU_TASKS_FROZEN) {
9283 9398
9284 case CPU_UP_PREPARE: 9399 case CPU_UP_PREPARE:
9285 case CPU_DOWN_FAILED:
9286 perf_event_init_cpu(cpu); 9400 perf_event_init_cpu(cpu);
9287 break; 9401 break;
9288 9402
9289 case CPU_UP_CANCELED:
9290 case CPU_DOWN_PREPARE: 9403 case CPU_DOWN_PREPARE:
9291 perf_event_exit_cpu(cpu); 9404 perf_event_exit_cpu(cpu);
9292 break; 9405 break;
@@ -9315,9 +9428,6 @@ void __init perf_event_init(void)
9315 ret = init_hw_breakpoint(); 9428 ret = init_hw_breakpoint();
9316 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 9429 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
9317 9430
9318 /* do not patch jump label more than once per second */
9319 jump_label_rate_limit(&perf_sched_events, HZ);
9320
9321 /* 9431 /*
9322 * Build time assertion that we keep the data_head at the intended 9432 * Build time assertion that we keep the data_head at the intended
9323 * location. IOW, validation we got the __reserved[] size right. 9433 * location. IOW, validation we got the __reserved[] size right.
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 60ace56618f6..716547fdb873 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes);
292#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 292#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
293#define classhashentry(key) (classhash_table + __classhashfn((key))) 293#define classhashentry(key) (classhash_table + __classhashfn((key)))
294 294
295static struct list_head classhash_table[CLASSHASH_SIZE]; 295static struct hlist_head classhash_table[CLASSHASH_SIZE];
296 296
297/* 297/*
298 * We put the lock dependency chains into a hash-table as well, to cache 298 * We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
303#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 303#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
304#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 304#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
305 305
306static struct list_head chainhash_table[CHAINHASH_SIZE]; 306static struct hlist_head chainhash_table[CHAINHASH_SIZE];
307 307
308/* 308/*
309 * The hash key of the lock dependency chains is a hash itself too: 309 * The hash key of the lock dependency chains is a hash itself too:
@@ -666,7 +666,7 @@ static inline struct lock_class *
666look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 666look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
667{ 667{
668 struct lockdep_subclass_key *key; 668 struct lockdep_subclass_key *key;
669 struct list_head *hash_head; 669 struct hlist_head *hash_head;
670 struct lock_class *class; 670 struct lock_class *class;
671 671
672#ifdef CONFIG_DEBUG_LOCKDEP 672#ifdef CONFIG_DEBUG_LOCKDEP
@@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
719 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 719 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
720 return NULL; 720 return NULL;
721 721
722 list_for_each_entry_rcu(class, hash_head, hash_entry) { 722 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
723 if (class->key == key) { 723 if (class->key == key) {
724 /* 724 /*
725 * Huh! same key, different name? Did someone trample 725 * Huh! same key, different name? Did someone trample
@@ -742,7 +742,7 @@ static inline struct lock_class *
742register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 742register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
743{ 743{
744 struct lockdep_subclass_key *key; 744 struct lockdep_subclass_key *key;
745 struct list_head *hash_head; 745 struct hlist_head *hash_head;
746 struct lock_class *class; 746 struct lock_class *class;
747 747
748 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 748 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
774 * We have to do the hash-walk again, to avoid races 774 * We have to do the hash-walk again, to avoid races
775 * with another CPU: 775 * with another CPU:
776 */ 776 */
777 list_for_each_entry_rcu(class, hash_head, hash_entry) { 777 hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
778 if (class->key == key) 778 if (class->key == key)
779 goto out_unlock_set; 779 goto out_unlock_set;
780 } 780 }
@@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
805 * We use RCU's safe list-add method to make 805 * We use RCU's safe list-add method to make
806 * parallel walking of the hash-list safe: 806 * parallel walking of the hash-list safe:
807 */ 807 */
808 list_add_tail_rcu(&class->hash_entry, hash_head); 808 hlist_add_head_rcu(&class->hash_entry, hash_head);
809 /* 809 /*
810 * Add it to the global list of classes: 810 * Add it to the global list of classes:
811 */ 811 */
@@ -1822,7 +1822,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1822 */ 1822 */
1823static int 1823static int
1824check_prev_add(struct task_struct *curr, struct held_lock *prev, 1824check_prev_add(struct task_struct *curr, struct held_lock *prev,
1825 struct held_lock *next, int distance, int trylock_loop) 1825 struct held_lock *next, int distance, int *stack_saved)
1826{ 1826{
1827 struct lock_list *entry; 1827 struct lock_list *entry;
1828 int ret; 1828 int ret;
@@ -1883,8 +1883,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1883 } 1883 }
1884 } 1884 }
1885 1885
1886 if (!trylock_loop && !save_trace(&trace)) 1886 if (!*stack_saved) {
1887 return 0; 1887 if (!save_trace(&trace))
1888 return 0;
1889 *stack_saved = 1;
1890 }
1888 1891
1889 /* 1892 /*
1890 * Ok, all validations passed, add the new lock 1893 * Ok, all validations passed, add the new lock
@@ -1907,6 +1910,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1907 * Debugging printouts: 1910 * Debugging printouts:
1908 */ 1911 */
1909 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1912 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1913 /* We drop graph lock, so another thread can overwrite trace. */
1914 *stack_saved = 0;
1910 graph_unlock(); 1915 graph_unlock();
1911 printk("\n new dependency: "); 1916 printk("\n new dependency: ");
1912 print_lock_name(hlock_class(prev)); 1917 print_lock_name(hlock_class(prev));
@@ -1929,7 +1934,7 @@ static int
1929check_prevs_add(struct task_struct *curr, struct held_lock *next) 1934check_prevs_add(struct task_struct *curr, struct held_lock *next)
1930{ 1935{
1931 int depth = curr->lockdep_depth; 1936 int depth = curr->lockdep_depth;
1932 int trylock_loop = 0; 1937 int stack_saved = 0;
1933 struct held_lock *hlock; 1938 struct held_lock *hlock;
1934 1939
1935 /* 1940 /*
@@ -1956,7 +1961,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1956 */ 1961 */
1957 if (hlock->read != 2 && hlock->check) { 1962 if (hlock->read != 2 && hlock->check) {
1958 if (!check_prev_add(curr, hlock, next, 1963 if (!check_prev_add(curr, hlock, next,
1959 distance, trylock_loop)) 1964 distance, &stack_saved))
1960 return 0; 1965 return 0;
1961 /* 1966 /*
1962 * Stop after the first non-trylock entry, 1967 * Stop after the first non-trylock entry,
@@ -1979,7 +1984,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1979 if (curr->held_locks[depth].irq_context != 1984 if (curr->held_locks[depth].irq_context !=
1980 curr->held_locks[depth-1].irq_context) 1985 curr->held_locks[depth-1].irq_context)
1981 break; 1986 break;
1982 trylock_loop = 1;
1983 } 1987 }
1984 return 1; 1988 return 1;
1985out_bug: 1989out_bug:
@@ -2017,7 +2021,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2017 u64 chain_key) 2021 u64 chain_key)
2018{ 2022{
2019 struct lock_class *class = hlock_class(hlock); 2023 struct lock_class *class = hlock_class(hlock);
2020 struct list_head *hash_head = chainhashentry(chain_key); 2024 struct hlist_head *hash_head = chainhashentry(chain_key);
2021 struct lock_chain *chain; 2025 struct lock_chain *chain;
2022 struct held_lock *hlock_curr; 2026 struct held_lock *hlock_curr;
2023 int i, j; 2027 int i, j;
@@ -2033,7 +2037,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2033 * We can walk it lock-free, because entries only get added 2037 * We can walk it lock-free, because entries only get added
2034 * to the hash: 2038 * to the hash:
2035 */ 2039 */
2036 list_for_each_entry_rcu(chain, hash_head, entry) { 2040 hlist_for_each_entry_rcu(chain, hash_head, entry) {
2037 if (chain->chain_key == chain_key) { 2041 if (chain->chain_key == chain_key) {
2038cache_hit: 2042cache_hit:
2039 debug_atomic_inc(chain_lookup_hits); 2043 debug_atomic_inc(chain_lookup_hits);
@@ -2057,7 +2061,7 @@ cache_hit:
2057 /* 2061 /*
2058 * We have to walk the chain again locked - to avoid duplicates: 2062 * We have to walk the chain again locked - to avoid duplicates:
2059 */ 2063 */
2060 list_for_each_entry(chain, hash_head, entry) { 2064 hlist_for_each_entry(chain, hash_head, entry) {
2061 if (chain->chain_key == chain_key) { 2065 if (chain->chain_key == chain_key) {
2062 graph_unlock(); 2066 graph_unlock();
2063 goto cache_hit; 2067 goto cache_hit;
@@ -2091,7 +2095,7 @@ cache_hit:
2091 } 2095 }
2092 chain_hlocks[chain->base + j] = class - lock_classes; 2096 chain_hlocks[chain->base + j] = class - lock_classes;
2093 } 2097 }
2094 list_add_tail_rcu(&chain->entry, hash_head); 2098 hlist_add_head_rcu(&chain->entry, hash_head);
2095 debug_atomic_inc(chain_lookup_misses); 2099 debug_atomic_inc(chain_lookup_misses);
2096 inc_chains(); 2100 inc_chains();
2097 2101
@@ -3875,7 +3879,7 @@ void lockdep_reset(void)
3875 nr_process_chains = 0; 3879 nr_process_chains = 0;
3876 debug_locks = 1; 3880 debug_locks = 1;
3877 for (i = 0; i < CHAINHASH_SIZE; i++) 3881 for (i = 0; i < CHAINHASH_SIZE; i++)
3878 INIT_LIST_HEAD(chainhash_table + i); 3882 INIT_HLIST_HEAD(chainhash_table + i);
3879 raw_local_irq_restore(flags); 3883 raw_local_irq_restore(flags);
3880} 3884}
3881 3885
@@ -3894,7 +3898,7 @@ static void zap_class(struct lock_class *class)
3894 /* 3898 /*
3895 * Unhash the class and remove it from the all_lock_classes list: 3899 * Unhash the class and remove it from the all_lock_classes list:
3896 */ 3900 */
3897 list_del_rcu(&class->hash_entry); 3901 hlist_del_rcu(&class->hash_entry);
3898 list_del_rcu(&class->lock_entry); 3902 list_del_rcu(&class->lock_entry);
3899 3903
3900 RCU_INIT_POINTER(class->key, NULL); 3904 RCU_INIT_POINTER(class->key, NULL);
@@ -3917,7 +3921,7 @@ static inline int within(const void *addr, void *start, unsigned long size)
3917void lockdep_free_key_range(void *start, unsigned long size) 3921void lockdep_free_key_range(void *start, unsigned long size)
3918{ 3922{
3919 struct lock_class *class; 3923 struct lock_class *class;
3920 struct list_head *head; 3924 struct hlist_head *head;
3921 unsigned long flags; 3925 unsigned long flags;
3922 int i; 3926 int i;
3923 int locked; 3927 int locked;
@@ -3930,9 +3934,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3930 */ 3934 */
3931 for (i = 0; i < CLASSHASH_SIZE; i++) { 3935 for (i = 0; i < CLASSHASH_SIZE; i++) {
3932 head = classhash_table + i; 3936 head = classhash_table + i;
3933 if (list_empty(head)) 3937 hlist_for_each_entry_rcu(class, head, hash_entry) {
3934 continue;
3935 list_for_each_entry_rcu(class, head, hash_entry) {
3936 if (within(class->key, start, size)) 3938 if (within(class->key, start, size))
3937 zap_class(class); 3939 zap_class(class);
3938 else if (within(class->name, start, size)) 3940 else if (within(class->name, start, size))
@@ -3962,7 +3964,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
3962void lockdep_reset_lock(struct lockdep_map *lock) 3964void lockdep_reset_lock(struct lockdep_map *lock)
3963{ 3965{
3964 struct lock_class *class; 3966 struct lock_class *class;
3965 struct list_head *head; 3967 struct hlist_head *head;
3966 unsigned long flags; 3968 unsigned long flags;
3967 int i, j; 3969 int i, j;
3968 int locked; 3970 int locked;
@@ -3987,9 +3989,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
3987 locked = graph_lock(); 3989 locked = graph_lock();
3988 for (i = 0; i < CLASSHASH_SIZE; i++) { 3990 for (i = 0; i < CLASSHASH_SIZE; i++) {
3989 head = classhash_table + i; 3991 head = classhash_table + i;
3990 if (list_empty(head)) 3992 hlist_for_each_entry_rcu(class, head, hash_entry) {
3991 continue;
3992 list_for_each_entry_rcu(class, head, hash_entry) {
3993 int match = 0; 3993 int match = 0;
3994 3994
3995 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 3995 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4027,10 +4027,10 @@ void lockdep_init(void)
4027 return; 4027 return;
4028 4028
4029 for (i = 0; i < CLASSHASH_SIZE; i++) 4029 for (i = 0; i < CLASSHASH_SIZE; i++)
4030 INIT_LIST_HEAD(classhash_table + i); 4030 INIT_HLIST_HEAD(classhash_table + i);
4031 4031
4032 for (i = 0; i < CHAINHASH_SIZE; i++) 4032 for (i = 0; i < CHAINHASH_SIZE; i++)
4033 INIT_LIST_HEAD(chainhash_table + i); 4033 INIT_HLIST_HEAD(chainhash_table + i);
4034 4034
4035 lockdep_initialized = 1; 4035 lockdep_initialized = 1;
4036} 4036}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 70ee3775de24..b981a7b023f0 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(memunmap);
114 114
115static void devm_memremap_release(struct device *dev, void *res) 115static void devm_memremap_release(struct device *dev, void *res)
116{ 116{
117 memunmap(res); 117 memunmap(*(void **)res);
118} 118}
119 119
120static int devm_memremap_match(struct device *dev, void *res, void *match_data) 120static int devm_memremap_match(struct device *dev, void *res, void *match_data)
@@ -136,8 +136,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
136 if (addr) { 136 if (addr) {
137 *ptr = addr; 137 *ptr = addr;
138 devres_add(dev, ptr); 138 devres_add(dev, ptr);
139 } else 139 } else {
140 devres_free(ptr); 140 devres_free(ptr);
141 return ERR_PTR(-ENXIO);
142 }
141 143
142 return addr; 144 return addr;
143} 145}
@@ -150,7 +152,7 @@ void devm_memunmap(struct device *dev, void *addr)
150} 152}
151EXPORT_SYMBOL(devm_memunmap); 153EXPORT_SYMBOL(devm_memunmap);
152 154
153pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags) 155pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
154{ 156{
155 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); 157 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
156} 158}
diff --git a/kernel/module.c b/kernel/module.c
index 8358f4697c0c..794ebe8e878d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -303,6 +303,9 @@ struct load_info {
303 struct _ddebug *debug; 303 struct _ddebug *debug;
304 unsigned int num_debug; 304 unsigned int num_debug;
305 bool sig_ok; 305 bool sig_ok;
306#ifdef CONFIG_KALLSYMS
307 unsigned long mod_kallsyms_init_off;
308#endif
306 struct { 309 struct {
307 unsigned int sym, str, mod, vers, info, pcpu; 310 unsigned int sym, str, mod, vers, info, pcpu;
308 } index; 311 } index;
@@ -981,6 +984,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
981 mod->exit(); 984 mod->exit();
982 blocking_notifier_call_chain(&module_notify_list, 985 blocking_notifier_call_chain(&module_notify_list,
983 MODULE_STATE_GOING, mod); 986 MODULE_STATE_GOING, mod);
987 ftrace_release_mod(mod);
988
984 async_synchronize_full(); 989 async_synchronize_full();
985 990
986 /* Store the name of the last unloaded module for diagnostic purposes */ 991 /* Store the name of the last unloaded module for diagnostic purposes */
@@ -2480,10 +2485,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2480 strsect->sh_flags |= SHF_ALLOC; 2485 strsect->sh_flags |= SHF_ALLOC;
2481 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, 2486 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2482 info->index.str) | INIT_OFFSET_MASK; 2487 info->index.str) | INIT_OFFSET_MASK;
2483 mod->init_layout.size = debug_align(mod->init_layout.size);
2484 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2488 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2489
2490 /* We'll tack temporary mod_kallsyms on the end. */
2491 mod->init_layout.size = ALIGN(mod->init_layout.size,
2492 __alignof__(struct mod_kallsyms));
2493 info->mod_kallsyms_init_off = mod->init_layout.size;
2494 mod->init_layout.size += sizeof(struct mod_kallsyms);
2495 mod->init_layout.size = debug_align(mod->init_layout.size);
2485} 2496}
2486 2497
2498/*
2499 * We use the full symtab and strtab which layout_symtab arranged to
2500 * be appended to the init section. Later we switch to the cut-down
2501 * core-only ones.
2502 */
2487static void add_kallsyms(struct module *mod, const struct load_info *info) 2503static void add_kallsyms(struct module *mod, const struct load_info *info)
2488{ 2504{
2489 unsigned int i, ndst; 2505 unsigned int i, ndst;
@@ -2492,29 +2508,34 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
2492 char *s; 2508 char *s;
2493 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2509 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2494 2510
2495 mod->symtab = (void *)symsec->sh_addr; 2511 /* Set up to point into init section. */
2496 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 2512 mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2513
2514 mod->kallsyms->symtab = (void *)symsec->sh_addr;
2515 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2497 /* Make sure we get permanent strtab: don't use info->strtab. */ 2516 /* Make sure we get permanent strtab: don't use info->strtab. */
2498 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; 2517 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2499 2518
2500 /* Set types up while we still have access to sections. */ 2519 /* Set types up while we still have access to sections. */
2501 for (i = 0; i < mod->num_symtab; i++) 2520 for (i = 0; i < mod->kallsyms->num_symtab; i++)
2502 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); 2521 mod->kallsyms->symtab[i].st_info
2503 2522 = elf_type(&mod->kallsyms->symtab[i], info);
2504 mod->core_symtab = dst = mod->core_layout.base + info->symoffs; 2523
2505 mod->core_strtab = s = mod->core_layout.base + info->stroffs; 2524 /* Now populate the cut down core kallsyms for after init. */
2506 src = mod->symtab; 2525 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2507 for (ndst = i = 0; i < mod->num_symtab; i++) { 2526 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2527 src = mod->kallsyms->symtab;
2528 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2508 if (i == 0 || 2529 if (i == 0 ||
2509 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2530 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2510 info->index.pcpu)) { 2531 info->index.pcpu)) {
2511 dst[ndst] = src[i]; 2532 dst[ndst] = src[i];
2512 dst[ndst++].st_name = s - mod->core_strtab; 2533 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2513 s += strlcpy(s, &mod->strtab[src[i].st_name], 2534 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2514 KSYM_NAME_LEN) + 1; 2535 KSYM_NAME_LEN) + 1;
2515 } 2536 }
2516 } 2537 }
2517 mod->core_num_syms = ndst; 2538 mod->core_kallsyms.num_symtab = ndst;
2518} 2539}
2519#else 2540#else
2520static inline void layout_symtab(struct module *mod, struct load_info *info) 2541static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3263,9 +3284,8 @@ static noinline int do_init_module(struct module *mod)
3263 module_put(mod); 3284 module_put(mod);
3264 trim_init_extable(mod); 3285 trim_init_extable(mod);
3265#ifdef CONFIG_KALLSYMS 3286#ifdef CONFIG_KALLSYMS
3266 mod->num_symtab = mod->core_num_syms; 3287 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3267 mod->symtab = mod->core_symtab; 3288 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3268 mod->strtab = mod->core_strtab;
3269#endif 3289#endif
3270 mod_tree_remove_init(mod); 3290 mod_tree_remove_init(mod);
3271 disable_ro_nx(&mod->init_layout); 3291 disable_ro_nx(&mod->init_layout);
@@ -3295,6 +3315,7 @@ fail:
3295 module_put(mod); 3315 module_put(mod);
3296 blocking_notifier_call_chain(&module_notify_list, 3316 blocking_notifier_call_chain(&module_notify_list,
3297 MODULE_STATE_GOING, mod); 3317 MODULE_STATE_GOING, mod);
3318 ftrace_release_mod(mod);
3298 free_module(mod); 3319 free_module(mod);
3299 wake_up_all(&module_wq); 3320 wake_up_all(&module_wq);
3300 return ret; 3321 return ret;
@@ -3371,6 +3392,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
3371 mod->state = MODULE_STATE_COMING; 3392 mod->state = MODULE_STATE_COMING;
3372 mutex_unlock(&module_mutex); 3393 mutex_unlock(&module_mutex);
3373 3394
3395 ftrace_module_enable(mod);
3374 blocking_notifier_call_chain(&module_notify_list, 3396 blocking_notifier_call_chain(&module_notify_list,
3375 MODULE_STATE_COMING, mod); 3397 MODULE_STATE_COMING, mod);
3376 return 0; 3398 return 0;
@@ -3496,7 +3518,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3496 3518
3497 /* Module is ready to execute: parsing args may do that. */ 3519 /* Module is ready to execute: parsing args may do that. */
3498 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3520 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3499 -32768, 32767, NULL, 3521 -32768, 32767, mod,
3500 unknown_module_param_cb); 3522 unknown_module_param_cb);
3501 if (IS_ERR(after_dashes)) { 3523 if (IS_ERR(after_dashes)) {
3502 err = PTR_ERR(after_dashes); 3524 err = PTR_ERR(after_dashes);
@@ -3627,6 +3649,11 @@ static inline int is_arm_mapping_symbol(const char *str)
3627 && (str[2] == '\0' || str[2] == '.'); 3649 && (str[2] == '\0' || str[2] == '.');
3628} 3650}
3629 3651
3652static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3653{
3654 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3655}
3656
3630static const char *get_ksymbol(struct module *mod, 3657static const char *get_ksymbol(struct module *mod,
3631 unsigned long addr, 3658 unsigned long addr,
3632 unsigned long *size, 3659 unsigned long *size,
@@ -3634,6 +3661,7 @@ static const char *get_ksymbol(struct module *mod,
3634{ 3661{
3635 unsigned int i, best = 0; 3662 unsigned int i, best = 0;
3636 unsigned long nextval; 3663 unsigned long nextval;
3664 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3637 3665
3638 /* At worse, next value is at end of module */ 3666 /* At worse, next value is at end of module */
3639 if (within_module_init(addr, mod)) 3667 if (within_module_init(addr, mod))
@@ -3643,32 +3671,32 @@ static const char *get_ksymbol(struct module *mod,
3643 3671
3644 /* Scan for closest preceding symbol, and next symbol. (ELF 3672 /* Scan for closest preceding symbol, and next symbol. (ELF
3645 starts real symbols at 1). */ 3673 starts real symbols at 1). */
3646 for (i = 1; i < mod->num_symtab; i++) { 3674 for (i = 1; i < kallsyms->num_symtab; i++) {
3647 if (mod->symtab[i].st_shndx == SHN_UNDEF) 3675 if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
3648 continue; 3676 continue;
3649 3677
3650 /* We ignore unnamed symbols: they're uninformative 3678 /* We ignore unnamed symbols: they're uninformative
3651 * and inserted at a whim. */ 3679 * and inserted at a whim. */
3652 if (mod->symtab[i].st_value <= addr 3680 if (*symname(kallsyms, i) == '\0'
3653 && mod->symtab[i].st_value > mod->symtab[best].st_value 3681 || is_arm_mapping_symbol(symname(kallsyms, i)))
3654 && *(mod->strtab + mod->symtab[i].st_name) != '\0' 3682 continue;
3655 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) 3683
3684 if (kallsyms->symtab[i].st_value <= addr
3685 && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3656 best = i; 3686 best = i;
3657 if (mod->symtab[i].st_value > addr 3687 if (kallsyms->symtab[i].st_value > addr
3658 && mod->symtab[i].st_value < nextval 3688 && kallsyms->symtab[i].st_value < nextval)
3659 && *(mod->strtab + mod->symtab[i].st_name) != '\0' 3689 nextval = kallsyms->symtab[i].st_value;
3660 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3661 nextval = mod->symtab[i].st_value;
3662 } 3690 }
3663 3691
3664 if (!best) 3692 if (!best)
3665 return NULL; 3693 return NULL;
3666 3694
3667 if (size) 3695 if (size)
3668 *size = nextval - mod->symtab[best].st_value; 3696 *size = nextval - kallsyms->symtab[best].st_value;
3669 if (offset) 3697 if (offset)
3670 *offset = addr - mod->symtab[best].st_value; 3698 *offset = addr - kallsyms->symtab[best].st_value;
3671 return mod->strtab + mod->symtab[best].st_name; 3699 return symname(kallsyms, best);
3672} 3700}
3673 3701
3674/* For kallsyms to ask for address resolution. NULL means not found. Careful 3702/* For kallsyms to ask for address resolution. NULL means not found. Careful
@@ -3758,19 +3786,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3758 3786
3759 preempt_disable(); 3787 preempt_disable();
3760 list_for_each_entry_rcu(mod, &modules, list) { 3788 list_for_each_entry_rcu(mod, &modules, list) {
3789 struct mod_kallsyms *kallsyms;
3790
3761 if (mod->state == MODULE_STATE_UNFORMED) 3791 if (mod->state == MODULE_STATE_UNFORMED)
3762 continue; 3792 continue;
3763 if (symnum < mod->num_symtab) { 3793 kallsyms = rcu_dereference_sched(mod->kallsyms);
3764 *value = mod->symtab[symnum].st_value; 3794 if (symnum < kallsyms->num_symtab) {
3765 *type = mod->symtab[symnum].st_info; 3795 *value = kallsyms->symtab[symnum].st_value;
3766 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, 3796 *type = kallsyms->symtab[symnum].st_info;
3767 KSYM_NAME_LEN); 3797 strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
3768 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 3798 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3769 *exported = is_exported(name, *value, mod); 3799 *exported = is_exported(name, *value, mod);
3770 preempt_enable(); 3800 preempt_enable();
3771 return 0; 3801 return 0;
3772 } 3802 }
3773 symnum -= mod->num_symtab; 3803 symnum -= kallsyms->num_symtab;
3774 } 3804 }
3775 preempt_enable(); 3805 preempt_enable();
3776 return -ERANGE; 3806 return -ERANGE;
@@ -3779,11 +3809,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3779static unsigned long mod_find_symname(struct module *mod, const char *name) 3809static unsigned long mod_find_symname(struct module *mod, const char *name)
3780{ 3810{
3781 unsigned int i; 3811 unsigned int i;
3812 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3782 3813
3783 for (i = 0; i < mod->num_symtab; i++) 3814 for (i = 0; i < kallsyms->num_symtab; i++)
3784 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && 3815 if (strcmp(name, symname(kallsyms, i)) == 0 &&
3785 mod->symtab[i].st_info != 'U') 3816 kallsyms->symtab[i].st_info != 'U')
3786 return mod->symtab[i].st_value; 3817 return kallsyms->symtab[i].st_value;
3787 return 0; 3818 return 0;
3788} 3819}
3789 3820
@@ -3822,11 +3853,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3822 module_assert_mutex(); 3853 module_assert_mutex();
3823 3854
3824 list_for_each_entry(mod, &modules, list) { 3855 list_for_each_entry(mod, &modules, list) {
3856 /* We hold module_mutex: no need for rcu_dereference_sched */
3857 struct mod_kallsyms *kallsyms = mod->kallsyms;
3858
3825 if (mod->state == MODULE_STATE_UNFORMED) 3859 if (mod->state == MODULE_STATE_UNFORMED)
3826 continue; 3860 continue;
3827 for (i = 0; i < mod->num_symtab; i++) { 3861 for (i = 0; i < kallsyms->num_symtab; i++) {
3828 ret = fn(data, mod->strtab + mod->symtab[i].st_name, 3862 ret = fn(data, symname(kallsyms, i),
3829 mod, mod->symtab[i].st_value); 3863 mod, kallsyms->symtab[i].st_value);
3830 if (ret != 0) 3864 if (ret != 0)
3831 return ret; 3865 return ret;
3832 } 3866 }
diff --git a/kernel/resource.c b/kernel/resource.c
index 09c0597840b0..3669d1bfc425 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
1083 if (!conflict) 1083 if (!conflict)
1084 break; 1084 break;
1085 if (conflict != parent) { 1085 if (conflict != parent) {
1086 parent = conflict; 1086 if (!(conflict->flags & IORESOURCE_BUSY)) {
1087 if (!(conflict->flags & IORESOURCE_BUSY)) 1087 parent = conflict;
1088 continue; 1088 continue;
1089 }
1089 } 1090 }
1090 if (conflict->flags & flags & IORESOURCE_MUXED) { 1091 if (conflict->flags & flags & IORESOURCE_MUXED) {
1091 add_wait_queue(&muxed_resource_wait, &wait); 1092 add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cd64c979d0e1..57b939c81bce 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -420,7 +420,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
420 * entity. 420 * entity.
421 */ 421 */
422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { 422 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
423 printk_deferred_once("sched: DL replenish lagged to much\n"); 423 printk_deferred_once("sched: DL replenish lagged too much\n");
424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 424 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
425 dl_se->runtime = pi_se->dl_runtime; 425 dl_se->runtime = pi_se->dl_runtime;
426 } 426 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca592f977b2..57a6eea84694 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4961,7 +4961,7 @@ void ftrace_release_mod(struct module *mod)
4961 mutex_unlock(&ftrace_lock); 4961 mutex_unlock(&ftrace_lock);
4962} 4962}
4963 4963
4964static void ftrace_module_enable(struct module *mod) 4964void ftrace_module_enable(struct module *mod)
4965{ 4965{
4966 struct dyn_ftrace *rec; 4966 struct dyn_ftrace *rec;
4967 struct ftrace_page *pg; 4967 struct ftrace_page *pg;
@@ -5038,38 +5038,8 @@ void ftrace_module_init(struct module *mod)
5038 ftrace_process_locs(mod, mod->ftrace_callsites, 5038 ftrace_process_locs(mod, mod->ftrace_callsites,
5039 mod->ftrace_callsites + mod->num_ftrace_callsites); 5039 mod->ftrace_callsites + mod->num_ftrace_callsites);
5040} 5040}
5041
5042static int ftrace_module_notify(struct notifier_block *self,
5043 unsigned long val, void *data)
5044{
5045 struct module *mod = data;
5046
5047 switch (val) {
5048 case MODULE_STATE_COMING:
5049 ftrace_module_enable(mod);
5050 break;
5051 case MODULE_STATE_GOING:
5052 ftrace_release_mod(mod);
5053 break;
5054 default:
5055 break;
5056 }
5057
5058 return 0;
5059}
5060#else
5061static int ftrace_module_notify(struct notifier_block *self,
5062 unsigned long val, void *data)
5063{
5064 return 0;
5065}
5066#endif /* CONFIG_MODULES */ 5041#endif /* CONFIG_MODULES */
5067 5042
5068struct notifier_block ftrace_module_nb = {
5069 .notifier_call = ftrace_module_notify,
5070 .priority = INT_MIN, /* Run after anything that can remove kprobes */
5071};
5072
5073void __init ftrace_init(void) 5043void __init ftrace_init(void)
5074{ 5044{
5075 extern unsigned long __start_mcount_loc[]; 5045 extern unsigned long __start_mcount_loc[];
@@ -5098,10 +5068,6 @@ void __init ftrace_init(void)
5098 __start_mcount_loc, 5068 __start_mcount_loc,
5099 __stop_mcount_loc); 5069 __stop_mcount_loc);
5100 5070
5101 ret = register_module_notifier(&ftrace_module_nb);
5102 if (ret)
5103 pr_warning("Failed to register trace ftrace module exit notifier\n");
5104
5105 set_ftrace_early_filters(); 5071 set_ftrace_early_filters();
5106 5072
5107 return; 5073 return;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f333e57c4614..ab09829d3b97 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
869 * The ftrace subsystem is for showing formats only. 869 * The ftrace subsystem is for showing formats only.
870 * They can not be enabled or disabled via the event files. 870 * They can not be enabled or disabled via the event files.
871 */ 871 */
872 if (call->class && call->class->reg) 872 if (call->class && call->class->reg &&
873 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
873 return file; 874 return file;
874 } 875 }
875 876
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 202df6cffcca..2a1abbaca10e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -156,7 +156,11 @@ check_stack(unsigned long ip, unsigned long *stack)
156 for (; p < top && i < stack_trace_max.nr_entries; p++) { 156 for (; p < top && i < stack_trace_max.nr_entries; p++) {
157 if (stack_dump_trace[i] == ULONG_MAX) 157 if (stack_dump_trace[i] == ULONG_MAX)
158 break; 158 break;
159 if (*p == stack_dump_trace[i]) { 159 /*
160 * The READ_ONCE_NOCHECK is used to let KASAN know that
161 * this is not a stack-out-of-bounds error.
162 */
163 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
160 stack_dump_trace[x] = stack_dump_trace[i++]; 164 stack_dump_trace[x] = stack_dump_trace[i++];
161 this_size = stack_trace_index[x++] = 165 this_size = stack_trace_index[x++] =
162 (top - p) * sizeof(unsigned long); 166 (top - p) * sizeof(unsigned long);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 61a0264e28f9..7ff5dc7d2ac5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -301,7 +301,23 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
301static LIST_HEAD(workqueues); /* PR: list of all workqueues */ 301static LIST_HEAD(workqueues); /* PR: list of all workqueues */
302static bool workqueue_freezing; /* PL: have wqs started freezing? */ 302static bool workqueue_freezing; /* PL: have wqs started freezing? */
303 303
304static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */ 304/* PL: allowable cpus for unbound wqs and work items */
305static cpumask_var_t wq_unbound_cpumask;
306
307/* CPU where unbound work was last round robin scheduled from this CPU */
308static DEFINE_PER_CPU(int, wq_rr_cpu_last);
309
310/*
311 * Local execution of unbound work items is no longer guaranteed. The
312 * following always forces round-robin CPU selection on unbound work items
313 * to uncover usages which depend on it.
314 */
315#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
316static bool wq_debug_force_rr_cpu = true;
317#else
318static bool wq_debug_force_rr_cpu = false;
319#endif
320module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
305 321
306/* the per-cpu worker pools */ 322/* the per-cpu worker pools */
307static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], 323static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
570 int node) 586 int node)
571{ 587{
572 assert_rcu_or_wq_mutex_or_pool_mutex(wq); 588 assert_rcu_or_wq_mutex_or_pool_mutex(wq);
589
590 /*
591 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
592 * delayed item is pending. The plan is to keep CPU -> NODE
593 * mapping valid and stable across CPU on/offlines. Once that
594 * happens, this workaround can be removed.
595 */
596 if (unlikely(node == NUMA_NO_NODE))
597 return wq->dfl_pwq;
598
573 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); 599 return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
574} 600}
575 601
@@ -1298,6 +1324,39 @@ static bool is_chained_work(struct workqueue_struct *wq)
1298 return worker && worker->current_pwq->wq == wq; 1324 return worker && worker->current_pwq->wq == wq;
1299} 1325}
1300 1326
1327/*
1328 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1329 * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
1330 * avoid perturbing sensitive tasks.
1331 */
1332static int wq_select_unbound_cpu(int cpu)
1333{
1334 static bool printed_dbg_warning;
1335 int new_cpu;
1336
1337 if (likely(!wq_debug_force_rr_cpu)) {
1338 if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1339 return cpu;
1340 } else if (!printed_dbg_warning) {
1341 pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1342 printed_dbg_warning = true;
1343 }
1344
1345 if (cpumask_empty(wq_unbound_cpumask))
1346 return cpu;
1347
1348 new_cpu = __this_cpu_read(wq_rr_cpu_last);
1349 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1350 if (unlikely(new_cpu >= nr_cpu_ids)) {
1351 new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1352 if (unlikely(new_cpu >= nr_cpu_ids))
1353 return cpu;
1354 }
1355 __this_cpu_write(wq_rr_cpu_last, new_cpu);
1356
1357 return new_cpu;
1358}
1359
1301static void __queue_work(int cpu, struct workqueue_struct *wq, 1360static void __queue_work(int cpu, struct workqueue_struct *wq,
1302 struct work_struct *work) 1361 struct work_struct *work)
1303{ 1362{
@@ -1323,7 +1382,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1323 return; 1382 return;
1324retry: 1383retry:
1325 if (req_cpu == WORK_CPU_UNBOUND) 1384 if (req_cpu == WORK_CPU_UNBOUND)
1326 cpu = raw_smp_processor_id(); 1385 cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1327 1386
1328 /* pwq which will be used unless @work is executing elsewhere */ 1387 /* pwq which will be used unless @work is executing elsewhere */
1329 if (!(wq->flags & WQ_UNBOUND)) 1388 if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1464 timer_stats_timer_set_start_info(&dwork->timer); 1523 timer_stats_timer_set_start_info(&dwork->timer);
1465 1524
1466 dwork->wq = wq; 1525 dwork->wq = wq;
1467 /* timer isn't guaranteed to run in this cpu, record earlier */
1468 if (cpu == WORK_CPU_UNBOUND)
1469 cpu = raw_smp_processor_id();
1470 dwork->cpu = cpu; 1526 dwork->cpu = cpu;
1471 timer->expires = jiffies + delay; 1527 timer->expires = jiffies + delay;
1472 1528
1473 add_timer_on(timer, cpu); 1529 if (unlikely(cpu != WORK_CPU_UNBOUND))
1530 add_timer_on(timer, cpu);
1531 else
1532 add_timer(timer);
1474} 1533}
1475 1534
1476/** 1535/**
@@ -2355,7 +2414,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq,
2355 WARN_ONCE(current->flags & PF_MEMALLOC, 2414 WARN_ONCE(current->flags & PF_MEMALLOC,
2356 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", 2415 "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2357 current->pid, current->comm, target_wq->name, target_func); 2416 current->pid, current->comm, target_wq->name, target_func);
2358 WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM), 2417 WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2418 (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2359 "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", 2419 "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2360 worker->current_pwq->wq->name, worker->current_func, 2420 worker->current_pwq->wq->name, worker->current_func,
2361 target_wq->name, target_func); 2421 target_wq->name, target_func);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ecb9e75614bf..8bfd1aca7a3d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1400,6 +1400,21 @@ config RCU_EQS_DEBUG
1400 1400
1401endmenu # "RCU Debugging" 1401endmenu # "RCU Debugging"
1402 1402
1403config DEBUG_WQ_FORCE_RR_CPU
1404 bool "Force round-robin CPU selection for unbound work items"
1405 depends on DEBUG_KERNEL
1406 default n
1407 help
1408 Workqueue used to implicitly guarantee that work items queued
1409 without explicit CPU specified are put on the local CPU. This
1410 guarantee is no longer true and while local CPU is still
1411 preferred work items may be put on foreign CPUs. Kernel
1412 parameter "workqueue.debug_force_rr_cpu" is added to force
1413 round-robin CPU selection to flush out usages which depend on the
1414 now broken guarantee. This config option enables the debug
1415 feature by default. When enabled, memory and cache locality will
1416 be impacted.
1417
1403config DEBUG_BLOCK_EXT_DEVT 1418config DEBUG_BLOCK_EXT_DEVT
1404 bool "Force extended block device numbers and spread them" 1419 bool "Force extended block device numbers and spread them"
1405 depends on DEBUG_KERNEL 1420 depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 49518fb48cab..e07c1ba9ba13 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -18,6 +18,8 @@ config UBSAN_SANITIZE_ALL
18 This option activates instrumentation for the entire kernel. 18 This option activates instrumentation for the entire kernel.
19 If you don't enable this option, you have to explicitly specify 19 If you don't enable this option, you have to explicitly specify
20 UBSAN_SANITIZE := y for the files/directories you want to check for UB. 20 UBSAN_SANITIZE := y for the files/directories you want to check for UB.
21 Enabling this option will get kernel image size increased
22 significantly.
21 23
22config UBSAN_ALIGNMENT 24config UBSAN_ALIGNMENT
23 bool "Enable checking of pointers alignment" 25 bool "Enable checking of pointers alignment"
@@ -25,5 +27,5 @@ config UBSAN_ALIGNMENT
25 default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS 27 default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
26 help 28 help
27 This option enables detection of unaligned memory accesses. 29 This option enables detection of unaligned memory accesses.
28 Enabling this option on architectures that support unalligned 30 Enabling this option on architectures that support unaligned
29 accesses may produce a lot of false positives. 31 accesses may produce a lot of false positives.
diff --git a/lib/klist.c b/lib/klist.c
index d74cf7a29afd..0507fa5d84c5 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
282 struct klist_node *n) 282 struct klist_node *n)
283{ 283{
284 i->i_klist = k; 284 i->i_klist = k;
285 i->i_cur = n; 285 i->i_cur = NULL;
286 if (n) 286 if (n && kref_get_unless_zero(&n->n_ref))
287 kref_get(&n->n_ref); 287 i->i_cur = n;
288} 288}
289EXPORT_SYMBOL_GPL(klist_iter_init_node); 289EXPORT_SYMBOL_GPL(klist_iter_init_node);
290 290
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index bafa9933fa76..004fc70fc56a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -598,9 +598,9 @@ EXPORT_SYMBOL(sg_miter_next);
598 * 598 *
599 * Description: 599 * Description:
600 * Stops mapping iterator @miter. @miter should have been started 600 * Stops mapping iterator @miter. @miter should have been started
601 * started using sg_miter_start(). A stopped iteration can be 601 * using sg_miter_start(). A stopped iteration can be resumed by
602 * resumed by calling sg_miter_next() on it. This is useful when 602 * calling sg_miter_next() on it. This is useful when resources (kmap)
603 * resources (kmap) need to be released during iteration. 603 * need to be released during iteration.
604 * 604 *
605 * Context: 605 * Context:
606 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care 606 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 6f500ef2301d..f0b323abb4c6 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
49 } 49 }
50} 50}
51EXPORT_SYMBOL(ucs2_strncmp); 51EXPORT_SYMBOL(ucs2_strncmp);
52
53unsigned long
54ucs2_utf8size(const ucs2_char_t *src)
55{
56 unsigned long i;
57 unsigned long j = 0;
58
59 for (i = 0; i < ucs2_strlen(src); i++) {
60 u16 c = src[i];
61
62 if (c >= 0x800)
63 j += 3;
64 else if (c >= 0x80)
65 j += 2;
66 else
67 j += 1;
68 }
69
70 return j;
71}
72EXPORT_SYMBOL(ucs2_utf8size);
73
74/*
75 * copy at most maxlength bytes of whole utf8 characters to dest from the
76 * ucs2 string src.
77 *
78 * The return value is the number of characters copied, not including the
79 * final NUL character.
80 */
81unsigned long
82ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
83{
84 unsigned int i;
85 unsigned long j = 0;
86 unsigned long limit = ucs2_strnlen(src, maxlength);
87
88 for (i = 0; maxlength && i < limit; i++) {
89 u16 c = src[i];
90
91 if (c >= 0x800) {
92 if (maxlength < 3)
93 break;
94 maxlength -= 3;
95 dest[j++] = 0xe0 | (c & 0xf000) >> 12;
96 dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
97 dest[j++] = 0x80 | (c & 0x003f);
98 } else if (c >= 0x80) {
99 if (maxlength < 2)
100 break;
101 maxlength -= 2;
102 dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
103 dest[j++] = 0x80 | (c & 0x03f);
104 } else {
105 maxlength -= 1;
106 dest[j++] = c & 0x7f;
107 }
108 }
109 if (maxlength)
110 dest[j] = '\0';
111 return j;
112}
113EXPORT_SYMBOL(ucs2_as_utf8);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 48ff9c36644d..f44e178e6ede 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1590,22 +1590,23 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1590 return buf; 1590 return buf;
1591 } 1591 }
1592 case 'K': 1592 case 'K':
1593 /*
1594 * %pK cannot be used in IRQ context because its test
1595 * for CAP_SYSLOG would be meaningless.
1596 */
1597 if (kptr_restrict && (in_irq() || in_serving_softirq() ||
1598 in_nmi())) {
1599 if (spec.field_width == -1)
1600 spec.field_width = default_width;
1601 return string(buf, end, "pK-error", spec);
1602 }
1603
1604 switch (kptr_restrict) { 1593 switch (kptr_restrict) {
1605 case 0: 1594 case 0:
1606 /* Always print %pK values */ 1595 /* Always print %pK values */
1607 break; 1596 break;
1608 case 1: { 1597 case 1: {
1598 const struct cred *cred;
1599
1600 /*
1601 * kptr_restrict==1 cannot be used in IRQ context
1602 * because its test for CAP_SYSLOG would be meaningless.
1603 */
1604 if (in_irq() || in_serving_softirq() || in_nmi()) {
1605 if (spec.field_width == -1)
1606 spec.field_width = default_width;
1607 return string(buf, end, "pK-error", spec);
1608 }
1609
1609 /* 1610 /*
1610 * Only print the real pointer value if the current 1611 * Only print the real pointer value if the current
1611 * process has CAP_SYSLOG and is running with the 1612 * process has CAP_SYSLOG and is running with the
@@ -1615,8 +1616,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1615 * leak pointer values if a binary opens a file using 1616 * leak pointer values if a binary opens a file using
1616 * %pK and then elevates privileges before reading it. 1617 * %pK and then elevates privileges before reading it.
1617 */ 1618 */
1618 const struct cred *cred = current_cred(); 1619 cred = current_cred();
1619
1620 if (!has_capability_noaudit(current, CAP_SYSLOG) || 1620 if (!has_capability_noaudit(current, CAP_SYSLOG) ||
1621 !uid_eq(cred->euid, cred->uid) || 1621 !uid_eq(cred->euid, cred->uid) ||
1622 !gid_eq(cred->egid, cred->gid)) 1622 !gid_eq(cred->egid, cred->gid))
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 926c76d56388..c554d173a65f 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -328,7 +328,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
328 return 0; 328 return 0;
329 329
330out_destroy_stat: 330out_destroy_stat:
331 while (--i) 331 while (i--)
332 percpu_counter_destroy(&wb->stat[i]); 332 percpu_counter_destroy(&wb->stat[i]);
333 fprop_local_destroy_percpu(&wb->completions); 333 fprop_local_destroy_percpu(&wb->completions);
334out_put_cong: 334out_put_cong:
diff --git a/mm/filemap.c b/mm/filemap.c
index bc943867d68c..3461d97ecb30 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -446,7 +446,8 @@ int filemap_write_and_wait(struct address_space *mapping)
446{ 446{
447 int err = 0; 447 int err = 0;
448 448
449 if (mapping->nrpages) { 449 if ((!dax_mapping(mapping) && mapping->nrpages) ||
450 (dax_mapping(mapping) && mapping->nrexceptional)) {
450 err = filemap_fdatawrite(mapping); 451 err = filemap_fdatawrite(mapping);
451 /* 452 /*
452 * Even if the above returned error, the pages may be 453 * Even if the above returned error, the pages may be
@@ -482,13 +483,8 @@ int filemap_write_and_wait_range(struct address_space *mapping,
482{ 483{
483 int err = 0; 484 int err = 0;
484 485
485 if (dax_mapping(mapping) && mapping->nrexceptional) { 486 if ((!dax_mapping(mapping) && mapping->nrpages) ||
486 err = dax_writeback_mapping_range(mapping, lstart, lend); 487 (dax_mapping(mapping) && mapping->nrexceptional)) {
487 if (err)
488 return err;
489 }
490
491 if (mapping->nrpages) {
492 err = __filemap_fdatawrite_range(mapping, lstart, lend, 488 err = __filemap_fdatawrite_range(mapping, lstart, lend,
493 WB_SYNC_ALL); 489 WB_SYNC_ALL);
494 /* See comment of filemap_write_and_wait() */ 490 /* See comment of filemap_write_and_wait() */
@@ -1890,6 +1886,7 @@ EXPORT_SYMBOL(generic_file_read_iter);
1890 * page_cache_read - adds requested page to the page cache if not already there 1886 * page_cache_read - adds requested page to the page cache if not already there
1891 * @file: file to read 1887 * @file: file to read
1892 * @offset: page index 1888 * @offset: page index
1889 * @gfp_mask: memory allocation flags
1893 * 1890 *
1894 * This adds the requested page to the page cache if it isn't already there, 1891 * This adds the requested page to the page cache if it isn't already there,
1895 * and schedules an I/O to read in its contents from disk. 1892 * and schedules an I/O to read in its contents from disk.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 08fc0ba2207e..e10a4fee88d2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1700,7 +1700,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1700 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1701 VM_BUG_ON(!pmd_none(*new_pmd)); 1701 VM_BUG_ON(!pmd_none(*new_pmd));
1702 1702
1703 if (pmd_move_must_withdraw(new_ptl, old_ptl)) { 1703 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
1704 vma_is_anonymous(vma)) {
1704 pgtable_t pgtable; 1705 pgtable_t pgtable;
1705 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); 1706 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1706 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); 1707 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
@@ -2835,6 +2836,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2835 pgtable_t pgtable; 2836 pgtable_t pgtable;
2836 pmd_t _pmd; 2837 pmd_t _pmd;
2837 bool young, write, dirty; 2838 bool young, write, dirty;
2839 unsigned long addr;
2838 int i; 2840 int i;
2839 2841
2840 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); 2842 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -2860,10 +2862,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2860 young = pmd_young(*pmd); 2862 young = pmd_young(*pmd);
2861 dirty = pmd_dirty(*pmd); 2863 dirty = pmd_dirty(*pmd);
2862 2864
2865 pmdp_huge_split_prepare(vma, haddr, pmd);
2863 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2866 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2864 pmd_populate(mm, &_pmd, pgtable); 2867 pmd_populate(mm, &_pmd, pgtable);
2865 2868
2866 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2869 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2867 pte_t entry, *pte; 2870 pte_t entry, *pte;
2868 /* 2871 /*
2869 * Note that NUMA hinting access restrictions are not 2872 * Note that NUMA hinting access restrictions are not
@@ -2884,9 +2887,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2884 } 2887 }
2885 if (dirty) 2888 if (dirty)
2886 SetPageDirty(page + i); 2889 SetPageDirty(page + i);
2887 pte = pte_offset_map(&_pmd, haddr); 2890 pte = pte_offset_map(&_pmd, addr);
2888 BUG_ON(!pte_none(*pte)); 2891 BUG_ON(!pte_none(*pte));
2889 set_pte_at(mm, haddr, pte, entry); 2892 set_pte_at(mm, addr, pte, entry);
2890 atomic_inc(&page[i]._mapcount); 2893 atomic_inc(&page[i]._mapcount);
2891 pte_unmap(pte); 2894 pte_unmap(pte);
2892 } 2895 }
@@ -2936,7 +2939,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2936 pmd_populate(mm, pmd, pgtable); 2939 pmd_populate(mm, pmd, pgtable);
2937 2940
2938 if (freeze) { 2941 if (freeze) {
2939 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 2942 for (i = 0; i < HPAGE_PMD_NR; i++) {
2940 page_remove_rmap(page + i, false); 2943 page_remove_rmap(page + i, false);
2941 put_page(page + i); 2944 put_page(page + i);
2942 } 2945 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06ae13e869d0..01f2b48c8618 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2630,8 +2630,10 @@ static int __init hugetlb_init(void)
2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 } 2631 }
2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); 2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 if (default_hstate_max_huge_pages) 2633 if (default_hstate_max_huge_pages) {
2634 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 2634 if (!default_hstate.max_huge_pages)
2635 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 }
2635 2637
2636 hugetlb_init_hstates(); 2638 hugetlb_init_hstates();
2637 gather_bootmem_prealloc(); 2639 gather_bootmem_prealloc();
diff --git a/mm/memory.c b/mm/memory.c
index 635451abc8f7..8132787ae4d5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3404,8 +3404,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3404 if (unlikely(pmd_none(*pmd)) && 3404 if (unlikely(pmd_none(*pmd)) &&
3405 unlikely(__pte_alloc(mm, vma, pmd, address))) 3405 unlikely(__pte_alloc(mm, vma, pmd, address)))
3406 return VM_FAULT_OOM; 3406 return VM_FAULT_OOM;
3407 /* if an huge pmd materialized from under us just retry later */ 3407 /*
3408 if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) 3408 * If a huge pmd materialized under us just retry later. Use
3409 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
3410 * didn't become pmd_trans_huge under us and then back to pmd_none, as
3411 * a result of MADV_DONTNEED running immediately after a huge pmd fault
3412 * in a different thread of this mm, in turn leading to a misleading
3413 * pmd_trans_huge() retval. All we have to ensure is that it is a
3414 * regular pmd that we can walk with pte_offset_map() and we can do that
3415 * through an atomic read in C, which is what pmd_trans_unstable()
3416 * provides.
3417 */
3418 if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
3409 return 0; 3419 return 0;
3410 /* 3420 /*
3411 * A regular pmd is established and it can't morph into a huge pmd 3421 * A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/migrate.c b/mm/migrate.c
index b1034f9c77e7..3ad0fea5c438 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1582,7 +1582,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1582 (GFP_HIGHUSER_MOVABLE | 1582 (GFP_HIGHUSER_MOVABLE |
1583 __GFP_THISNODE | __GFP_NOMEMALLOC | 1583 __GFP_THISNODE | __GFP_NOMEMALLOC |
1584 __GFP_NORETRY | __GFP_NOWARN) & 1584 __GFP_NORETRY | __GFP_NOWARN) &
1585 ~(__GFP_IO | __GFP_FS), 0); 1585 ~__GFP_RECLAIM, 0);
1586 1586
1587 return newpage; 1587 return newpage;
1588} 1588}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f2415a7a688..76d1ec29149b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2664,12 +2664,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2664 if (!vma || !(vma->vm_flags & VM_SHARED)) 2664 if (!vma || !(vma->vm_flags & VM_SHARED))
2665 goto out; 2665 goto out;
2666 2666
2667 if (start < vma->vm_start || start + size > vma->vm_end) 2667 if (start < vma->vm_start)
2668 goto out; 2668 goto out;
2669 2669
2670 if (pgoff == linear_page_index(vma, start)) { 2670 if (start + size > vma->vm_end) {
2671 ret = 0; 2671 struct vm_area_struct *next;
2672 goto out; 2672
2673 for (next = vma->vm_next; next; next = next->vm_next) {
2674 /* hole between vmas ? */
2675 if (next->vm_start != next->vm_prev->vm_end)
2676 goto out;
2677
2678 if (next->vm_file != vma->vm_file)
2679 goto out;
2680
2681 if (next->vm_flags != vma->vm_flags)
2682 goto out;
2683
2684 if (start + size <= next->vm_end)
2685 break;
2686 }
2687
2688 if (!next)
2689 goto out;
2673 } 2690 }
2674 2691
2675 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; 2692 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2679,9 +2696,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2679 flags &= MAP_NONBLOCK; 2696 flags &= MAP_NONBLOCK;
2680 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; 2697 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2681 if (vma->vm_flags & VM_LOCKED) { 2698 if (vma->vm_flags & VM_LOCKED) {
2699 struct vm_area_struct *tmp;
2682 flags |= MAP_LOCKED; 2700 flags |= MAP_LOCKED;
2701
2683 /* drop PG_Mlocked flag for over-mapped range */ 2702 /* drop PG_Mlocked flag for over-mapped range */
2684 munlock_vma_pages_range(vma, start, start + size); 2703 for (tmp = vma; tmp->vm_start >= start + size;
2704 tmp = tmp->vm_next) {
2705 munlock_vma_pages_range(tmp,
2706 max(tmp->vm_start, start),
2707 min(tmp->vm_end, start + size));
2708 }
2685 } 2709 }
2686 2710
2687 file = get_file(vma->vm_file); 2711 file = get_file(vma->vm_file);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 8eb7bb40dc40..f7cb3d4d9c2e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -160,9 +160,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
160 } 160 }
161 161
162 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 162 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
163 if (next - addr != HPAGE_PMD_SIZE) 163 if (next - addr != HPAGE_PMD_SIZE) {
164 split_huge_pmd(vma, pmd, addr); 164 split_huge_pmd(vma, pmd, addr);
165 else { 165 if (pmd_none(*pmd))
166 continue;
167 } else {
166 int nr_ptes = change_huge_pmd(vma, pmd, addr, 168 int nr_ptes = change_huge_pmd(vma, pmd, addr,
167 newprot, prot_numa); 169 newprot, prot_numa);
168 170
diff --git a/mm/mremap.c b/mm/mremap.c
index d77946a997f7..8eeba02fc991 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -210,6 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
210 } 210 }
211 } 211 }
212 split_huge_pmd(vma, old_pmd, old_addr); 212 split_huge_pmd(vma, old_pmd, old_addr);
213 if (pmd_none(*old_pmd))
214 continue;
213 VM_BUG_ON(pmd_trans_huge(*old_pmd)); 215 VM_BUG_ON(pmd_trans_huge(*old_pmd));
214 } 216 }
215 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, 217 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 9d4767698a1c..06a005b979a7 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -90,9 +90,9 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
90 * ARCHes with special requirements for evicting THP backing TLB entries can 90 * ARCHes with special requirements for evicting THP backing TLB entries can
91 * implement this. Otherwise also, it can help optimize normal TLB flush in 91 * implement this. Otherwise also, it can help optimize normal TLB flush in
92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the 92 * THP regime. stock flush_tlb_range() typically has optimization to nuke the
93 * entire TLB TLB if flush span is greater than a threshhold, which will 93 * entire TLB if flush span is greater than a threshold, which will
94 * likely be true for a single huge page. Thus a single thp flush will 94 * likely be true for a single huge page. Thus a single thp flush will
95 * invalidate the entire TLB which is not desitable. 95 * invalidate the entire TLB which is not desirable.
96 * e.g. see arch/arc: flush_pmd_tlb_range 96 * e.g. see arch/arc: flush_pmd_tlb_range
97 */ 97 */
98#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) 98#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
@@ -195,7 +195,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
195 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 195 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
196 VM_BUG_ON(pmd_trans_huge(*pmdp)); 196 VM_BUG_ON(pmd_trans_huge(*pmdp));
197 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 197 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
198 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 198
199 /* collapse entails shooting down ptes not pmd */
200 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199 return pmd; 201 return pmd;
200} 202}
201#endif 203#endif
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697a8bc4..621fbcb35a36 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2275 2275
2276 err = setup_cpu_cache(cachep, gfp); 2276 err = setup_cpu_cache(cachep, gfp);
2277 if (err) { 2277 if (err) {
2278 __kmem_cache_shutdown(cachep); 2278 __kmem_cache_release(cachep);
2279 return err; 2279 return err;
2280 } 2280 }
2281 2281
@@ -2414,12 +2414,13 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
2414 2414
2415int __kmem_cache_shutdown(struct kmem_cache *cachep) 2415int __kmem_cache_shutdown(struct kmem_cache *cachep)
2416{ 2416{
2417 return __kmem_cache_shrink(cachep, false);
2418}
2419
2420void __kmem_cache_release(struct kmem_cache *cachep)
2421{
2417 int i; 2422 int i;
2418 struct kmem_cache_node *n; 2423 struct kmem_cache_node *n;
2419 int rc = __kmem_cache_shrink(cachep, false);
2420
2421 if (rc)
2422 return rc;
2423 2424
2424 free_percpu(cachep->cpu_cache); 2425 free_percpu(cachep->cpu_cache);
2425 2426
@@ -2430,7 +2431,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2430 kfree(n); 2431 kfree(n);
2431 cachep->node[i] = NULL; 2432 cachep->node[i] = NULL;
2432 } 2433 }
2433 return 0;
2434} 2434}
2435 2435
2436/* 2436/*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad240c0bb..2eedacea439d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 140#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
141 141
142int __kmem_cache_shutdown(struct kmem_cache *); 142int __kmem_cache_shutdown(struct kmem_cache *);
143void __kmem_cache_release(struct kmem_cache *);
143int __kmem_cache_shrink(struct kmem_cache *, bool); 144int __kmem_cache_shrink(struct kmem_cache *, bool);
144void slab_kmem_cache_release(struct kmem_cache *); 145void slab_kmem_cache_release(struct kmem_cache *);
145 146
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef01ccf7..065b7bdabdc3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s,
693 693
694void slab_kmem_cache_release(struct kmem_cache *s) 694void slab_kmem_cache_release(struct kmem_cache *s)
695{ 695{
696 __kmem_cache_release(s);
696 destroy_memcg_params(s); 697 destroy_memcg_params(s);
697 kfree_const(s->name); 698 kfree_const(s->name);
698 kmem_cache_free(kmem_cache, s); 699 kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8cc7c53..5ec158054ffe 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
630 return 0; 630 return 0;
631} 631}
632 632
633void __kmem_cache_release(struct kmem_cache *c)
634{
635}
636
633int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate) 637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
634{ 638{
635 return 0; 639 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355ac056b..d8fbd4a6ed59 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@ static inline void add_partial(struct kmem_cache_node *n,
1592 __add_partial(n, page, tail); 1592 __add_partial(n, page, tail);
1593} 1593}
1594 1594
1595static inline void
1596__remove_partial(struct kmem_cache_node *n, struct page *page)
1597{
1598 list_del(&page->lru);
1599 n->nr_partial--;
1600}
1601
1602static inline void remove_partial(struct kmem_cache_node *n, 1595static inline void remove_partial(struct kmem_cache_node *n,
1603 struct page *page) 1596 struct page *page)
1604{ 1597{
1605 lockdep_assert_held(&n->list_lock); 1598 lockdep_assert_held(&n->list_lock);
1606 __remove_partial(n, page); 1599 list_del(&page->lru);
1600 n->nr_partial--;
1607} 1601}
1608 1602
1609/* 1603/*
@@ -3184,6 +3178,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
3184 } 3178 }
3185} 3179}
3186 3180
3181void __kmem_cache_release(struct kmem_cache *s)
3182{
3183 free_percpu(s->cpu_slab);
3184 free_kmem_cache_nodes(s);
3185}
3186
3187static int init_kmem_cache_nodes(struct kmem_cache *s) 3187static int init_kmem_cache_nodes(struct kmem_cache *s)
3188{ 3188{
3189 int node; 3189 int node;
@@ -3443,28 +3443,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3443 3443
3444/* 3444/*
3445 * Attempt to free all partial slabs on a node. 3445 * Attempt to free all partial slabs on a node.
3446 * This is called from kmem_cache_close(). We must be the last thread 3446 * This is called from __kmem_cache_shutdown(). We must take list_lock
3447 * using the cache and therefore we do not need to lock anymore. 3447 * because sysfs file might still access partial list after the shutdowning.
3448 */ 3448 */
3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 3449static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3450{ 3450{
3451 struct page *page, *h; 3451 struct page *page, *h;
3452 3452
3453 BUG_ON(irqs_disabled());
3454 spin_lock_irq(&n->list_lock);
3453 list_for_each_entry_safe(page, h, &n->partial, lru) { 3455 list_for_each_entry_safe(page, h, &n->partial, lru) {
3454 if (!page->inuse) { 3456 if (!page->inuse) {
3455 __remove_partial(n, page); 3457 remove_partial(n, page);
3456 discard_slab(s, page); 3458 discard_slab(s, page);
3457 } else { 3459 } else {
3458 list_slab_objects(s, page, 3460 list_slab_objects(s, page,
3459 "Objects remaining in %s on kmem_cache_close()"); 3461 "Objects remaining in %s on __kmem_cache_shutdown()");
3460 } 3462 }
3461 } 3463 }
3464 spin_unlock_irq(&n->list_lock);
3462} 3465}
3463 3466
3464/* 3467/*
3465 * Release all resources used by a slab cache. 3468 * Release all resources used by a slab cache.
3466 */ 3469 */
3467static inline int kmem_cache_close(struct kmem_cache *s) 3470int __kmem_cache_shutdown(struct kmem_cache *s)
3468{ 3471{
3469 int node; 3472 int node;
3470 struct kmem_cache_node *n; 3473 struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3476 if (n->nr_partial || slabs_node(s, node)) 3479 if (n->nr_partial || slabs_node(s, node))
3477 return 1; 3480 return 1;
3478 } 3481 }
3479 free_percpu(s->cpu_slab);
3480 free_kmem_cache_nodes(s);
3481 return 0; 3482 return 0;
3482} 3483}
3483 3484
3484int __kmem_cache_shutdown(struct kmem_cache *s)
3485{
3486 return kmem_cache_close(s);
3487}
3488
3489/******************************************************************** 3485/********************************************************************
3490 * Kmalloc subsystem 3486 * Kmalloc subsystem
3491 *******************************************************************/ 3487 *******************************************************************/
@@ -3980,7 +3976,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
3980 memcg_propagate_slab_attrs(s); 3976 memcg_propagate_slab_attrs(s);
3981 err = sysfs_slab_add(s); 3977 err = sysfs_slab_add(s);
3982 if (err) 3978 if (err)
3983 kmem_cache_close(s); 3979 __kmem_cache_release(s);
3984 3980
3985 return err; 3981 return err;
3986} 3982}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d5871ac493eb..f066781be3c8 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1625,7 +1625,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1625 1625
1626 rt = atrtr_find(&at_hint); 1626 rt = atrtr_find(&at_hint);
1627 } 1627 }
1628 err = ENETUNREACH; 1628 err = -ENETUNREACH;
1629 if (!rt) 1629 if (!rt)
1630 goto out; 1630 goto out;
1631 1631
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e6c8382c79ba..ccf70bed0d0c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -527,11 +527,12 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
527 * gets dereferenced. 527 * gets dereferenced.
528 */ 528 */
529 spin_lock_bh(&bat_priv->gw.list_lock); 529 spin_lock_bh(&bat_priv->gw.list_lock);
530 hlist_del_init_rcu(&gw_node->list); 530 if (!hlist_unhashed(&gw_node->list)) {
531 hlist_del_init_rcu(&gw_node->list);
532 batadv_gw_node_free_ref(gw_node);
533 }
531 spin_unlock_bh(&bat_priv->gw.list_lock); 534 spin_unlock_bh(&bat_priv->gw.list_lock);
532 535
533 batadv_gw_node_free_ref(gw_node);
534
535 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 536 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
536 if (gw_node == curr_gw) 537 if (gw_node == curr_gw)
537 batadv_gw_reselect(bat_priv); 538 batadv_gw_reselect(bat_priv);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 01acccc4d218..57f7107169f5 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -76,6 +76,28 @@ out:
76} 76}
77 77
78/** 78/**
79 * batadv_mutual_parents - check if two devices are each others parent
80 * @dev1: 1st net_device
81 * @dev2: 2nd net_device
82 *
83 * veth devices come in pairs and each is the parent of the other!
84 *
85 * Return: true if the devices are each others parent, otherwise false
86 */
87static bool batadv_mutual_parents(const struct net_device *dev1,
88 const struct net_device *dev2)
89{
90 int dev1_parent_iflink = dev_get_iflink(dev1);
91 int dev2_parent_iflink = dev_get_iflink(dev2);
92
93 if (!dev1_parent_iflink || !dev2_parent_iflink)
94 return false;
95
96 return (dev1_parent_iflink == dev2->ifindex) &&
97 (dev2_parent_iflink == dev1->ifindex);
98}
99
100/**
79 * batadv_is_on_batman_iface - check if a device is a batman iface descendant 101 * batadv_is_on_batman_iface - check if a device is a batman iface descendant
80 * @net_dev: the device to check 102 * @net_dev: the device to check
81 * 103 *
@@ -108,6 +130,9 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
108 if (WARN(!parent_dev, "Cannot find parent device")) 130 if (WARN(!parent_dev, "Cannot find parent device"))
109 return false; 131 return false;
110 132
133 if (batadv_mutual_parents(net_dev, parent_dev))
134 return false;
135
111 ret = batadv_is_on_batman_iface(parent_dev); 136 ret = batadv_is_on_batman_iface(parent_dev);
112 137
113 return ret; 138 return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index cdfc85fa2743..0e80fd1461ab 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -303,9 +303,11 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
303 303
304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) { 304 if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
305 spin_lock_bh(&orig_node->vlan_list_lock); 305 spin_lock_bh(&orig_node->vlan_list_lock);
306 hlist_del_init_rcu(&vlan->list); 306 if (!hlist_unhashed(&vlan->list)) {
307 hlist_del_init_rcu(&vlan->list);
308 batadv_orig_node_vlan_free_ref(vlan);
309 }
307 spin_unlock_bh(&orig_node->vlan_list_lock); 310 spin_unlock_bh(&orig_node->vlan_list_lock);
308 batadv_orig_node_vlan_free_ref(vlan);
309 } 311 }
310 312
311 batadv_orig_node_vlan_free_ref(vlan); 313 batadv_orig_node_vlan_free_ref(vlan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 47bcef754796..883c821a9e78 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4112,8 +4112,10 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4112 break; 4112 break;
4113 } 4113 }
4114 4114
4115 *req_complete = bt_cb(skb)->hci.req_complete; 4115 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 4116 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4117 else
4118 *req_complete = bt_cb(skb)->hci.req_complete;
4117 kfree_skb(skb); 4119 kfree_skb(skb);
4118 } 4120 }
4119 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4121 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 30e105f57f0d..74c278e00225 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -425,8 +425,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
425 mp = br_mdb_ip_get(mdb, group); 425 mp = br_mdb_ip_get(mdb, group);
426 if (!mp) { 426 if (!mp) {
427 mp = br_multicast_new_group(br, port, group); 427 mp = br_multicast_new_group(br, port, group);
428 err = PTR_ERR(mp); 428 err = PTR_ERR_OR_ZERO(mp);
429 if (IS_ERR(mp)) 429 if (err)
430 return err; 430 return err;
431 } 431 }
432 432
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 61d7617d9249..b82440e1fcb4 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -159,7 +159,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
159 tmppkt = NULL; 159 tmppkt = NULL;
160 160
161 /* Verify that length is correct */ 161 /* Verify that length is correct */
162 err = EPROTO; 162 err = -EPROTO;
163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1) 163 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
164 goto out; 164 goto out;
165 } 165 }
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9cfedf565f5b..9382619a405b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1197,6 +1197,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1197 return new_piece; 1197 return new_piece;
1198} 1198}
1199 1199
1200static size_t sizeof_footer(struct ceph_connection *con)
1201{
1202 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1203 sizeof(struct ceph_msg_footer) :
1204 sizeof(struct ceph_msg_footer_old);
1205}
1206
1200static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1207static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1201{ 1208{
1202 BUG_ON(!msg); 1209 BUG_ON(!msg);
@@ -2335,9 +2342,9 @@ static int read_partial_message(struct ceph_connection *con)
2335 ceph_pr_addr(&con->peer_addr.in_addr), 2342 ceph_pr_addr(&con->peer_addr.in_addr),
2336 seq, con->in_seq + 1); 2343 seq, con->in_seq + 1);
2337 con->in_base_pos = -front_len - middle_len - data_len - 2344 con->in_base_pos = -front_len - middle_len - data_len -
2338 sizeof(m->footer); 2345 sizeof_footer(con);
2339 con->in_tag = CEPH_MSGR_TAG_READY; 2346 con->in_tag = CEPH_MSGR_TAG_READY;
2340 return 0; 2347 return 1;
2341 } else if ((s64)seq - (s64)con->in_seq > 1) { 2348 } else if ((s64)seq - (s64)con->in_seq > 1) {
2342 pr_err("read_partial_message bad seq %lld expected %lld\n", 2349 pr_err("read_partial_message bad seq %lld expected %lld\n",
2343 seq, con->in_seq + 1); 2350 seq, con->in_seq + 1);
@@ -2360,10 +2367,10 @@ static int read_partial_message(struct ceph_connection *con)
2360 /* skip this message */ 2367 /* skip this message */
2361 dout("alloc_msg said skip message\n"); 2368 dout("alloc_msg said skip message\n");
2362 con->in_base_pos = -front_len - middle_len - data_len - 2369 con->in_base_pos = -front_len - middle_len - data_len -
2363 sizeof(m->footer); 2370 sizeof_footer(con);
2364 con->in_tag = CEPH_MSGR_TAG_READY; 2371 con->in_tag = CEPH_MSGR_TAG_READY;
2365 con->in_seq++; 2372 con->in_seq++;
2366 return 0; 2373 return 1;
2367 } 2374 }
2368 2375
2369 BUG_ON(!con->in_msg); 2376 BUG_ON(!con->in_msg);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3534e12683d3..5bc053778fed 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2853,8 +2853,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2853 mutex_lock(&osdc->request_mutex); 2853 mutex_lock(&osdc->request_mutex);
2854 req = __lookup_request(osdc, tid); 2854 req = __lookup_request(osdc, tid);
2855 if (!req) { 2855 if (!req) {
2856 pr_warn("%s osd%d tid %llu unknown, skipping\n", 2856 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
2857 __func__, osd->o_osd, tid); 2857 osd->o_osd, tid);
2858 m = NULL; 2858 m = NULL;
2859 *skip = 1; 2859 *skip = 1;
2860 goto out; 2860 goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8cba3d852f25..0ef061b2badc 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5379,12 +5379,12 @@ void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5379{ 5379{
5380 struct netdev_adjacent *lower; 5380 struct netdev_adjacent *lower;
5381 5381
5382 lower = list_entry((*iter)->next, struct netdev_adjacent, list); 5382 lower = list_entry(*iter, struct netdev_adjacent, list);
5383 5383
5384 if (&lower->list == &dev->adj_list.lower) 5384 if (&lower->list == &dev->adj_list.lower)
5385 return NULL; 5385 return NULL;
5386 5386
5387 *iter = &lower->list; 5387 *iter = lower->list.next;
5388 5388
5389 return lower->dev; 5389 return lower->dev;
5390} 5390}
@@ -7422,8 +7422,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; 7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
7423 setup(dev); 7423 setup(dev);
7424 7424
7425 if (!dev->tx_queue_len) 7425 if (!dev->tx_queue_len) {
7426 dev->priv_flags |= IFF_NO_QUEUE; 7426 dev->priv_flags |= IFF_NO_QUEUE;
7427 dev->tx_queue_len = 1;
7428 }
7427 7429
7428 dev->num_tx_queues = txqs; 7430 dev->num_tx_queues = txqs;
7429 dev->real_num_tx_queues = txqs; 7431 dev->real_num_tx_queues = txqs;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d79699c9d1b9..12e700332010 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -208,7 +208,6 @@ ip:
208 case htons(ETH_P_IPV6): { 208 case htons(ETH_P_IPV6): {
209 const struct ipv6hdr *iph; 209 const struct ipv6hdr *iph;
210 struct ipv6hdr _iph; 210 struct ipv6hdr _iph;
211 __be32 flow_label;
212 211
213ipv6: 212ipv6:
214 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 213 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@ ipv6:
230 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 229 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
231 } 230 }
232 231
233 flow_label = ip6_flowlabel(iph); 232 if ((dissector_uses_key(flow_dissector,
234 if (flow_label) { 233 FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
234 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
235 ip6_flowlabel(iph)) {
236 __be32 flow_label = ip6_flowlabel(iph);
237
235 if (dissector_uses_key(flow_dissector, 238 if (dissector_uses_key(flow_dissector,
236 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 239 FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
237 key_tags = skb_flow_dissector_target(flow_dissector, 240 key_tags = skb_flow_dissector_target(flow_dissector,
@@ -396,6 +399,13 @@ ip_proto_again:
396 goto out_bad; 399 goto out_bad;
397 proto = eth->h_proto; 400 proto = eth->h_proto;
398 nhoff += sizeof(*eth); 401 nhoff += sizeof(*eth);
402
403 /* Cap headers that we access via pointers at the
404 * end of the Ethernet header as our maximum alignment
405 * at that point is only 2 bytes.
406 */
407 if (NET_IP_ALIGN)
408 hlen = nhoff;
399 } 409 }
400 410
401 key_control->flags |= FLOW_DIS_ENCAPSULATION; 411 key_control->flags |= FLOW_DIS_ENCAPSULATION;
diff --git a/net/core/scm.c b/net/core/scm.c
index 14596fb37172..2696aefdc148 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
87 *fplp = fpl; 87 *fplp = fpl;
88 fpl->count = 0; 88 fpl->count = 0;
89 fpl->max = SCM_MAX_FD; 89 fpl->max = SCM_MAX_FD;
90 fpl->user = NULL;
90 } 91 }
91 fpp = &fpl->fp[fpl->count]; 92 fpp = &fpl->fp[fpl->count];
92 93
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
107 *fpp++ = file; 108 *fpp++ = file;
108 fpl->count++; 109 fpl->count++;
109 } 110 }
111
112 if (!fpl->user)
113 fpl->user = get_uid(current_user());
114
110 return num; 115 return num;
111} 116}
112 117
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
119 scm->fp = NULL; 124 scm->fp = NULL;
120 for (i=fpl->count-1; i>=0; i--) 125 for (i=fpl->count-1; i>=0; i--)
121 fput(fpl->fp[i]); 126 fput(fpl->fp[i]);
127 free_uid(fpl->user);
122 kfree(fpl); 128 kfree(fpl);
123 } 129 }
124} 130}
@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
336 for (i = 0; i < fpl->count; i++) 342 for (i = 0; i < fpl->count; i++)
337 get_file(fpl->fp[i]); 343 get_file(fpl->fp[i]);
338 new_fpl->max = new_fpl->count; 344 new_fpl->max = new_fpl->count;
345 new_fpl->user = get_uid(fpl->user);
339 } 346 }
340 return new_fpl; 347 return new_fpl;
341} 348}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2df375ec9c2..5bf88f58bee7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,8 @@
79 79
80struct kmem_cache *skbuff_head_cache __read_mostly; 80struct kmem_cache *skbuff_head_cache __read_mostly;
81static struct kmem_cache *skbuff_fclone_cache __read_mostly; 81static struct kmem_cache *skbuff_fclone_cache __read_mostly;
82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
83EXPORT_SYMBOL(sysctl_max_skb_frags);
82 84
83/** 85/**
84 * skb_panic - private function for out-of-line support 86 * skb_panic - private function for out-of-line support
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 95b6139d710c..a6beb7b6ae55 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -26,6 +26,7 @@ static int zero = 0;
26static int one = 1; 26static int one = 1;
27static int min_sndbuf = SOCK_MIN_SNDBUF; 27static int min_sndbuf = SOCK_MIN_SNDBUF;
28static int min_rcvbuf = SOCK_MIN_RCVBUF; 28static int min_rcvbuf = SOCK_MIN_RCVBUF;
29static int max_skb_frags = MAX_SKB_FRAGS;
29 30
30static int net_msg_warn; /* Unused, but still a sysctl */ 31static int net_msg_warn; /* Unused, but still a sysctl */
31 32
@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
392 .mode = 0644, 393 .mode = 0644,
393 .proc_handler = proc_dointvec 394 .proc_handler = proc_dointvec
394 }, 395 },
396 {
397 .procname = "max_skb_frags",
398 .data = &sysctl_max_skb_frags,
399 .maxlen = sizeof(int),
400 .mode = 0644,
401 .proc_handler = proc_dointvec_minmax,
402 .extra1 = &one,
403 .extra2 = &max_skb_frags,
404 },
395 { } 405 { }
396}; 406};
397 407
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5684e14932bd..902d606324a0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -824,26 +824,26 @@ lookup:
824 824
825 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 825 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
826 struct request_sock *req = inet_reqsk(sk); 826 struct request_sock *req = inet_reqsk(sk);
827 struct sock *nsk = NULL; 827 struct sock *nsk;
828 828
829 sk = req->rsk_listener; 829 sk = req->rsk_listener;
830 if (likely(sk->sk_state == DCCP_LISTEN)) { 830 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
831 nsk = dccp_check_req(sk, skb, req);
832 } else {
833 inet_csk_reqsk_queue_drop_and_put(sk, req); 831 inet_csk_reqsk_queue_drop_and_put(sk, req);
834 goto lookup; 832 goto lookup;
835 } 833 }
834 sock_hold(sk);
835 nsk = dccp_check_req(sk, skb, req);
836 if (!nsk) { 836 if (!nsk) {
837 reqsk_put(req); 837 reqsk_put(req);
838 goto discard_it; 838 goto discard_and_relse;
839 } 839 }
840 if (nsk == sk) { 840 if (nsk == sk) {
841 sock_hold(sk);
842 reqsk_put(req); 841 reqsk_put(req);
843 } else if (dccp_child_process(sk, nsk, skb)) { 842 } else if (dccp_child_process(sk, nsk, skb)) {
844 dccp_v4_ctl_send_reset(sk, skb); 843 dccp_v4_ctl_send_reset(sk, skb);
845 goto discard_it; 844 goto discard_and_relse;
846 } else { 845 } else {
846 sock_put(sk);
847 return 0; 847 return 0;
848 } 848 }
849 } 849 }
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c6d0508e63a..b8608b71a66d 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -691,26 +691,26 @@ lookup:
691 691
692 if (sk->sk_state == DCCP_NEW_SYN_RECV) { 692 if (sk->sk_state == DCCP_NEW_SYN_RECV) {
693 struct request_sock *req = inet_reqsk(sk); 693 struct request_sock *req = inet_reqsk(sk);
694 struct sock *nsk = NULL; 694 struct sock *nsk;
695 695
696 sk = req->rsk_listener; 696 sk = req->rsk_listener;
697 if (likely(sk->sk_state == DCCP_LISTEN)) { 697 if (unlikely(sk->sk_state != DCCP_LISTEN)) {
698 nsk = dccp_check_req(sk, skb, req);
699 } else {
700 inet_csk_reqsk_queue_drop_and_put(sk, req); 698 inet_csk_reqsk_queue_drop_and_put(sk, req);
701 goto lookup; 699 goto lookup;
702 } 700 }
701 sock_hold(sk);
702 nsk = dccp_check_req(sk, skb, req);
703 if (!nsk) { 703 if (!nsk) {
704 reqsk_put(req); 704 reqsk_put(req);
705 goto discard_it; 705 goto discard_and_relse;
706 } 706 }
707 if (nsk == sk) { 707 if (nsk == sk) {
708 sock_hold(sk);
709 reqsk_put(req); 708 reqsk_put(req);
710 } else if (dccp_child_process(sk, nsk, skb)) { 709 } else if (dccp_child_process(sk, nsk, skb)) {
711 dccp_v6_ctl_send_reset(sk, skb); 710 dccp_v6_ctl_send_reset(sk, skb);
712 goto discard_it; 711 goto discard_and_relse;
713 } else { 712 } else {
713 sock_put(sk);
714 return 0; 714 return 0;
715 } 715 }
716 } 716 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 40b9ca72aae3..ab24521beb4d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1194,7 +1194,6 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1194 if (ret) { 1194 if (ret) {
1195 netdev_err(master, "error %d registering interface %s\n", 1195 netdev_err(master, "error %d registering interface %s\n",
1196 ret, slave_dev->name); 1196 ret, slave_dev->name);
1197 phy_disconnect(p->phy);
1198 ds->ports[port] = NULL; 1197 ds->ports[port] = NULL;
1199 free_netdev(slave_dev); 1198 free_netdev(slave_dev);
1200 return ret; 1199 return ret;
@@ -1205,6 +1204,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1205 ret = dsa_slave_phy_setup(p, slave_dev); 1204 ret = dsa_slave_phy_setup(p, slave_dev);
1206 if (ret) { 1205 if (ret) {
1207 netdev_err(master, "error %d setting up slave phy\n", ret); 1206 netdev_err(master, "error %d setting up slave phy\n", ret);
1207 unregister_netdev(slave_dev);
1208 free_netdev(slave_dev); 1208 free_netdev(slave_dev);
1209 return ret; 1209 return ret;
1210 } 1210 }
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebd9d31e65a..f6303b17546b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1847 if (err < 0) 1847 if (err < 0)
1848 goto errout; 1848 goto errout;
1849 1849
1850 err = EINVAL; 1850 err = -EINVAL;
1851 if (!tb[NETCONFA_IFINDEX]) 1851 if (!tb[NETCONFA_IFINDEX])
1852 goto errout; 1852 goto errout;
1853 1853
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 46b9c887bede..64148914803a 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
789 reqsk_put(req); 789 reqsk_put(req);
790} 790}
791 791
792void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, 792struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
793 struct sock *child) 793 struct request_sock *req,
794 struct sock *child)
794{ 795{
795 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 796 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
796 797
797 spin_lock(&queue->rskq_lock); 798 spin_lock(&queue->rskq_lock);
798 if (unlikely(sk->sk_state != TCP_LISTEN)) { 799 if (unlikely(sk->sk_state != TCP_LISTEN)) {
799 inet_child_forget(sk, req, child); 800 inet_child_forget(sk, req, child);
801 child = NULL;
800 } else { 802 } else {
801 req->sk = child; 803 req->sk = child;
802 req->dl_next = NULL; 804 req->dl_next = NULL;
@@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
808 sk_acceptq_added(sk); 810 sk_acceptq_added(sk);
809 } 811 }
810 spin_unlock(&queue->rskq_lock); 812 spin_unlock(&queue->rskq_lock);
813 return child;
811} 814}
812EXPORT_SYMBOL(inet_csk_reqsk_queue_add); 815EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
813 816
@@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
817 if (own_req) { 820 if (own_req) {
818 inet_csk_reqsk_queue_drop(sk, req); 821 inet_csk_reqsk_queue_drop(sk, req);
819 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); 822 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
820 inet_csk_reqsk_queue_add(sk, req, child); 823 if (inet_csk_reqsk_queue_add(sk, req, child))
821 /* Warning: caller must not call reqsk_put(req); 824 return child;
822 * child stole last reference on it.
823 */
824 return child;
825 } 825 }
826 /* Too bad, another child took ownership of the request, undo. */ 826 /* Too bad, another child took ownership of the request, undo. */
827 bh_unlock_sock(child); 827 bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7c51c4e1661f..41ba68de46d8 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1054,8 +1054,9 @@ static const struct net_device_ops gre_tap_netdev_ops = {
1054static void ipgre_tap_setup(struct net_device *dev) 1054static void ipgre_tap_setup(struct net_device *dev)
1055{ 1055{
1056 ether_setup(dev); 1056 ether_setup(dev);
1057 dev->netdev_ops = &gre_tap_netdev_ops; 1057 dev->netdev_ops = &gre_tap_netdev_ops;
1058 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1058 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1059 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1059 ip_tunnel_setup(dev, gre_tap_net_id); 1060 ip_tunnel_setup(dev, gre_tap_net_id);
1060} 1061}
1061 1062
@@ -1240,6 +1241,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1240 err = ipgre_newlink(net, dev, tb, NULL); 1241 err = ipgre_newlink(net, dev, tb, NULL);
1241 if (err < 0) 1242 if (err < 0)
1242 goto out; 1243 goto out;
1244
1245 /* openvswitch users expect packet sizes to be unrestricted,
1246 * so set the largest MTU we can.
1247 */
1248 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1249 if (err)
1250 goto out;
1251
1243 return dev; 1252 return dev;
1244out: 1253out:
1245 free_netdev(dev); 1254 free_netdev(dev);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5f73a7c03e27..a50124260f5a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
249 switch (cmsg->cmsg_type) { 249 switch (cmsg->cmsg_type) {
250 case IP_RETOPTS: 250 case IP_RETOPTS:
251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); 251 err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
252
253 /* Our caller is responsible for freeing ipc->opt */
252 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), 254 err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
253 err < 40 ? err : 40); 255 err < 40 ? err : 40);
254 if (err) 256 if (err)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c7bd72e9b544..89e8861e05fc 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -943,17 +943,31 @@ done:
943} 943}
944EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); 944EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
945 945
946int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 946int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
947{ 947{
948 struct ip_tunnel *tunnel = netdev_priv(dev); 948 struct ip_tunnel *tunnel = netdev_priv(dev);
949 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 949 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
950 int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
950 951
951 if (new_mtu < 68 || 952 if (new_mtu < 68)
952 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
953 return -EINVAL; 953 return -EINVAL;
954
955 if (new_mtu > max_mtu) {
956 if (strict)
957 return -EINVAL;
958
959 new_mtu = max_mtu;
960 }
961
954 dev->mtu = new_mtu; 962 dev->mtu = new_mtu;
955 return 0; 963 return 0;
956} 964}
965EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
966
967int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
968{
969 return __ip_tunnel_change_mtu(dev, new_mtu, true);
970}
957EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); 971EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
958 972
959static void ip_tunnel_dev_free(struct net_device *dev) 973static void ip_tunnel_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c117b21b937d..d3a27165f9cc 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
746 746
747 if (msg->msg_controllen) { 747 if (msg->msg_controllen) {
748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); 748 err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
749 if (err) 749 if (unlikely(err)) {
750 kfree(ipc.opt);
750 return err; 751 return err;
752 }
751 if (ipc.opt) 753 if (ipc.opt)
752 free = 1; 754 free = 1;
753 } 755 }
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bc35f1842512..7113bae4e6a0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
547 547
548 if (msg->msg_controllen) { 548 if (msg->msg_controllen) {
549 err = ip_cmsg_send(net, msg, &ipc, false); 549 err = ip_cmsg_send(net, msg, &ipc, false);
550 if (err) 550 if (unlikely(err)) {
551 kfree(ipc.opt);
551 goto out; 552 goto out;
553 }
552 if (ipc.opt) 554 if (ipc.opt)
553 free = 1; 555 free = 1;
554 } 556 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 85f184e429c6..02c62299d717 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; 129static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130static int ip_rt_min_advmss __read_mostly = 256; 130static int ip_rt_min_advmss __read_mostly = 256;
131 131
132static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
132/* 133/*
133 * Interface to generic destination cache. 134 * Interface to generic destination cache.
134 */ 135 */
@@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
755 struct fib_nh *nh = &FIB_RES_NH(res); 756 struct fib_nh *nh = &FIB_RES_NH(res);
756 757
757 update_or_create_fnhe(nh, fl4->daddr, new_gw, 758 update_or_create_fnhe(nh, fl4->daddr, new_gw,
758 0, 0); 759 0, jiffies + ip_rt_gc_timeout);
759 } 760 }
760 if (kill_route) 761 if (kill_route)
761 rt->dst.obsolete = DST_OBSOLETE_KILL; 762 rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
1556#endif 1557#endif
1557} 1558}
1558 1559
1560static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1561{
1562 struct fnhe_hash_bucket *hash;
1563 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1564 u32 hval = fnhe_hashfun(daddr);
1565
1566 spin_lock_bh(&fnhe_lock);
1567
1568 hash = rcu_dereference_protected(nh->nh_exceptions,
1569 lockdep_is_held(&fnhe_lock));
1570 hash += hval;
1571
1572 fnhe_p = &hash->chain;
1573 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1574 while (fnhe) {
1575 if (fnhe->fnhe_daddr == daddr) {
1576 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1577 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1578 fnhe_flush_routes(fnhe);
1579 kfree_rcu(fnhe, rcu);
1580 break;
1581 }
1582 fnhe_p = &fnhe->fnhe_next;
1583 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1584 lockdep_is_held(&fnhe_lock));
1585 }
1586
1587 spin_unlock_bh(&fnhe_lock);
1588}
1589
1559/* called in rcu_read_lock() section */ 1590/* called in rcu_read_lock() section */
1560static int __mkroute_input(struct sk_buff *skb, 1591static int __mkroute_input(struct sk_buff *skb,
1561 const struct fib_result *res, 1592 const struct fib_result *res,
@@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
1609 1640
1610 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1641 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1611 if (do_cache) { 1642 if (do_cache) {
1612 if (fnhe) 1643 if (fnhe) {
1613 rth = rcu_dereference(fnhe->fnhe_rth_input); 1644 rth = rcu_dereference(fnhe->fnhe_rth_input);
1614 else 1645 if (rth && rth->dst.expires &&
1615 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1646 time_after(jiffies, rth->dst.expires)) {
1647 ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1648 fnhe = NULL;
1649 } else {
1650 goto rt_cache;
1651 }
1652 }
1653
1654 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1616 1655
1656rt_cache:
1617 if (rt_cache_valid(rth)) { 1657 if (rt_cache_valid(rth)) {
1618 skb_dst_set_noref(skb, &rth->dst); 1658 skb_dst_set_noref(skb, &rth->dst);
1619 goto out; 1659 goto out;
@@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2014 struct fib_nh *nh = &FIB_RES_NH(*res); 2054 struct fib_nh *nh = &FIB_RES_NH(*res);
2015 2055
2016 fnhe = find_exception(nh, fl4->daddr); 2056 fnhe = find_exception(nh, fl4->daddr);
2017 if (fnhe) 2057 if (fnhe) {
2018 prth = &fnhe->fnhe_rth_output; 2058 prth = &fnhe->fnhe_rth_output;
2019 else { 2059 rth = rcu_dereference(*prth);
2020 if (unlikely(fl4->flowi4_flags & 2060 if (rth && rth->dst.expires &&
2021 FLOWI_FLAG_KNOWN_NH && 2061 time_after(jiffies, rth->dst.expires)) {
2022 !(nh->nh_gw && 2062 ip_del_fnhe(nh, fl4->daddr);
2023 nh->nh_scope == RT_SCOPE_LINK))) { 2063 fnhe = NULL;
2024 do_cache = false; 2064 } else {
2025 goto add; 2065 goto rt_cache;
2026 } 2066 }
2027 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2028 } 2067 }
2068
2069 if (unlikely(fl4->flowi4_flags &
2070 FLOWI_FLAG_KNOWN_NH &&
2071 !(nh->nh_gw &&
2072 nh->nh_scope == RT_SCOPE_LINK))) {
2073 do_cache = false;
2074 goto add;
2075 }
2076 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2029 rth = rcu_dereference(*prth); 2077 rth = rcu_dereference(*prth);
2078
2079rt_cache:
2030 if (rt_cache_valid(rth)) { 2080 if (rt_cache_valid(rth)) {
2031 dst_hold(&rth->dst); 2081 dst_hold(&rth->dst);
2032 return rth; 2082 return rth;
@@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
2569} 2619}
2570 2620
2571#ifdef CONFIG_SYSCTL 2621#ifdef CONFIG_SYSCTL
2572static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2573static int ip_rt_gc_interval __read_mostly = 60 * HZ; 2622static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2574static int ip_rt_gc_min_interval __read_mostly = HZ / 2; 2623static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2575static int ip_rt_gc_elasticity __read_mostly = 8; 2624static int ip_rt_gc_elasticity __read_mostly = 8;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19746b3fcbbe..483ffdf5aa4d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -940,7 +940,7 @@ new_segment:
940 940
941 i = skb_shinfo(skb)->nr_frags; 941 i = skb_shinfo(skb)->nr_frags;
942 can_coalesce = skb_can_coalesce(skb, i, page, offset); 942 can_coalesce = skb_can_coalesce(skb, i, page, offset);
943 if (!can_coalesce && i >= MAX_SKB_FRAGS) { 943 if (!can_coalesce && i >= sysctl_max_skb_frags) {
944 tcp_mark_push(tp, skb); 944 tcp_mark_push(tp, skb);
945 goto new_segment; 945 goto new_segment;
946 } 946 }
@@ -1213,7 +1213,7 @@ new_segment:
1213 1213
1214 if (!skb_can_coalesce(skb, i, pfrag->page, 1214 if (!skb_can_coalesce(skb, i, pfrag->page,
1215 pfrag->offset)) { 1215 pfrag->offset)) {
1216 if (i == MAX_SKB_FRAGS || !sg) { 1216 if (i == sysctl_max_skb_frags || !sg) {
1217 tcp_mark_push(tp, skb); 1217 tcp_mark_push(tp, skb);
1218 goto new_segment; 1218 goto new_segment;
1219 } 1219 }
@@ -2950,7 +2950,7 @@ static void __tcp_alloc_md5sig_pool(void)
2950 struct crypto_hash *hash; 2950 struct crypto_hash *hash;
2951 2951
2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); 2952 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2953 if (IS_ERR_OR_NULL(hash)) 2953 if (IS_ERR(hash))
2954 return; 2954 return;
2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; 2955 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2956 } 2956 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1c2a73406261..3b2c8e90a475 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2896,7 +2896,10 @@ static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
2896{ 2896{
2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; 2897 const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min; 2898 struct rtt_meas *m = tcp_sk(sk)->rtt_min;
2899 struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now }; 2899 struct rtt_meas rttm = {
2900 .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
2901 .ts = now,
2902 };
2900 u32 elapsed; 2903 u32 elapsed;
2901 2904
2902 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ 2905 /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a4d523709ab3..487ac67059e2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
311 311
312 312
313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ 313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
314void tcp_req_err(struct sock *sk, u32 seq) 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
315{ 315{
316 struct request_sock *req = inet_reqsk(sk); 316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk); 317 struct net *net = sock_net(sk);
@@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
323 323
324 if (seq != tcp_rsk(req)->snt_isn) { 324 if (seq != tcp_rsk(req)->snt_isn) {
325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 325 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
326 } else { 326 } else if (abort) {
327 /* 327 /*
328 * Still in SYN_RECV, just remove it silently. 328 * Still in SYN_RECV, just remove it silently.
329 * There is no good way to pass the error to the newly 329 * There is no good way to pass the error to the newly
@@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
383 } 383 }
384 seq = ntohl(th->seq); 384 seq = ntohl(th->seq);
385 if (sk->sk_state == TCP_NEW_SYN_RECV) 385 if (sk->sk_state == TCP_NEW_SYN_RECV)
386 return tcp_req_err(sk, seq); 386 return tcp_req_err(sk, seq,
387 type == ICMP_PARAMETERPROB ||
388 type == ICMP_TIME_EXCEEDED ||
389 (type == ICMP_DEST_UNREACH &&
390 (code == ICMP_NET_UNREACH ||
391 code == ICMP_HOST_UNREACH)));
387 392
388 bh_lock_sock(sk); 393 bh_lock_sock(sk);
389 /* If too many ICMPs get dropped on busy 394 /* If too many ICMPs get dropped on busy
@@ -1592,28 +1597,30 @@ process:
1592 1597
1593 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1598 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1594 struct request_sock *req = inet_reqsk(sk); 1599 struct request_sock *req = inet_reqsk(sk);
1595 struct sock *nsk = NULL; 1600 struct sock *nsk;
1596 1601
1597 sk = req->rsk_listener; 1602 sk = req->rsk_listener;
1598 if (tcp_v4_inbound_md5_hash(sk, skb)) 1603 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1599 goto discard_and_relse; 1604 reqsk_put(req);
1600 if (likely(sk->sk_state == TCP_LISTEN)) { 1605 goto discard_it;
1601 nsk = tcp_check_req(sk, skb, req, false); 1606 }
1602 } else { 1607 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1603 inet_csk_reqsk_queue_drop_and_put(sk, req); 1608 inet_csk_reqsk_queue_drop_and_put(sk, req);
1604 goto lookup; 1609 goto lookup;
1605 } 1610 }
1611 sock_hold(sk);
1612 nsk = tcp_check_req(sk, skb, req, false);
1606 if (!nsk) { 1613 if (!nsk) {
1607 reqsk_put(req); 1614 reqsk_put(req);
1608 goto discard_it; 1615 goto discard_and_relse;
1609 } 1616 }
1610 if (nsk == sk) { 1617 if (nsk == sk) {
1611 sock_hold(sk);
1612 reqsk_put(req); 1618 reqsk_put(req);
1613 } else if (tcp_child_process(sk, nsk, skb)) { 1619 } else if (tcp_child_process(sk, nsk, skb)) {
1614 tcp_v4_send_reset(nsk, skb); 1620 tcp_v4_send_reset(nsk, skb);
1615 goto discard_it; 1621 goto discard_and_relse;
1616 } else { 1622 } else {
1623 sock_put(sk);
1617 return 0; 1624 return 0;
1618 } 1625 }
1619 } 1626 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index be0b21852b13..95d2f198017e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1048,8 +1048,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1048 if (msg->msg_controllen) { 1048 if (msg->msg_controllen) {
1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc, 1049 err = ip_cmsg_send(sock_net(sk), msg, &ipc,
1050 sk->sk_family == AF_INET6); 1050 sk->sk_family == AF_INET6);
1051 if (err) 1051 if (unlikely(err)) {
1052 kfree(ipc.opt);
1052 return err; 1053 return err;
1054 }
1053 if (ipc.opt) 1055 if (ipc.opt)
1054 free = 1; 1056 free = 1;
1055 connected = 0; 1057 connected = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 38eeddedfc21..bdd7eac4307a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
583 if (err < 0) 583 if (err < 0)
584 goto errout; 584 goto errout;
585 585
586 err = EINVAL; 586 err = -EINVAL;
587 if (!tb[NETCONFA_IFINDEX]) 587 if (!tb[NETCONFA_IFINDEX])
588 goto errout; 588 goto errout;
589 589
@@ -3538,6 +3538,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3538{ 3538{
3539 struct inet6_dev *idev = ifp->idev; 3539 struct inet6_dev *idev = ifp->idev;
3540 struct net_device *dev = idev->dev; 3540 struct net_device *dev = idev->dev;
3541 bool notify = false;
3541 3542
3542 addrconf_join_solict(dev, &ifp->addr); 3543 addrconf_join_solict(dev, &ifp->addr);
3543 3544
@@ -3583,7 +3584,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3583 /* Because optimistic nodes can use this address, 3584 /* Because optimistic nodes can use this address,
3584 * notify listeners. If DAD fails, RTM_DELADDR is sent. 3585 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3585 */ 3586 */
3586 ipv6_ifa_notify(RTM_NEWADDR, ifp); 3587 notify = true;
3587 } 3588 }
3588 } 3589 }
3589 3590
@@ -3591,6 +3592,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3591out: 3592out:
3592 spin_unlock(&ifp->lock); 3593 spin_unlock(&ifp->lock);
3593 read_unlock_bh(&idev->lock); 3594 read_unlock_bh(&idev->lock);
3595 if (notify)
3596 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3594} 3597}
3595 3598
3596static void addrconf_dad_start(struct inet6_ifaddr *ifp) 3599static void addrconf_dad_start(struct inet6_ifaddr *ifp)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 1f9ebe3cbb4a..dc2db4f7b182 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
540 } 540 }
541 spin_lock_bh(&ip6_sk_fl_lock); 541 spin_lock_bh(&ip6_sk_fl_lock);
542 for (sflp = &np->ipv6_fl_list; 542 for (sflp = &np->ipv6_fl_list;
543 (sfl = rcu_dereference(*sflp)) != NULL; 543 (sfl = rcu_dereference_protected(*sflp,
544 lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
544 sflp = &sfl->next) { 545 sflp = &sfl->next) {
545 if (sfl->fl->label == freq.flr_label) { 546 if (sfl->fl->label == freq.flr_label) {
546 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) 547 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
547 np->flow_label &= ~IPV6_FLOWLABEL_MASK; 548 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
548 *sflp = rcu_dereference(sfl->next); 549 *sflp = sfl->next;
549 spin_unlock_bh(&ip6_sk_fl_lock); 550 spin_unlock_bh(&ip6_sk_fl_lock);
550 fl_release(sfl->fl); 551 fl_release(sfl->fl);
551 kfree_rcu(sfl, rcu); 552 kfree_rcu(sfl, rcu);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f37f18b6b40c..a69aad1e29d1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1512,6 +1512,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
1512 dev->destructor = ip6gre_dev_free; 1512 dev->destructor = ip6gre_dev_free;
1513 1513
1514 dev->features |= NETIF_F_NETNS_LOCAL; 1514 dev->features |= NETIF_F_NETNS_LOCAL;
1515 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1515} 1516}
1516 1517
1517static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1518static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 31ba7ca19757..051b6a6bfff6 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -21,6 +21,10 @@
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/netfilter/ipv6/nf_nat_masquerade.h> 22#include <net/netfilter/ipv6/nf_nat_masquerade.h>
23 23
24#define MAX_WORK_COUNT 16
25
26static atomic_t v6_worker_count;
27
24unsigned int 28unsigned int
25nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, 29nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
26 const struct net_device *out) 30 const struct net_device *out)
@@ -78,14 +82,78 @@ static struct notifier_block masq_dev_notifier = {
78 .notifier_call = masq_device_event, 82 .notifier_call = masq_device_event,
79}; 83};
80 84
85struct masq_dev_work {
86 struct work_struct work;
87 struct net *net;
88 int ifindex;
89};
90
91static void iterate_cleanup_work(struct work_struct *work)
92{
93 struct masq_dev_work *w;
94 long index;
95
96 w = container_of(work, struct masq_dev_work, work);
97
98 index = w->ifindex;
99 nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
100
101 put_net(w->net);
102 kfree(w);
103 atomic_dec(&v6_worker_count);
104 module_put(THIS_MODULE);
105}
106
107/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
108 * schedule.
109 *
110 * Unfortunately, nf_ct_iterate_cleanup can run for a long
111 * time if there are lots of conntracks and the system
112 * handles high softirq load, so it frequently calls cond_resched
113 * while iterating the conntrack table.
114 *
115 * So we defer nf_ct_iterate_cleanup walk to the system workqueue.
116 *
117 * As we can have 'a lot' of inet_events (depending on amount
118 * of ipv6 addresses being deleted), we also need to add an upper
119 * limit to the number of queued work items.
120 */
81static int masq_inet_event(struct notifier_block *this, 121static int masq_inet_event(struct notifier_block *this,
82 unsigned long event, void *ptr) 122 unsigned long event, void *ptr)
83{ 123{
84 struct inet6_ifaddr *ifa = ptr; 124 struct inet6_ifaddr *ifa = ptr;
85 struct netdev_notifier_info info; 125 const struct net_device *dev;
126 struct masq_dev_work *w;
127 struct net *net;
128
129 if (event != NETDEV_DOWN ||
130 atomic_read(&v6_worker_count) >= MAX_WORK_COUNT)
131 return NOTIFY_DONE;
132
133 dev = ifa->idev->dev;
134 net = maybe_get_net(dev_net(dev));
135 if (!net)
136 return NOTIFY_DONE;
86 137
87 netdev_notifier_info_init(&info, ifa->idev->dev); 138 if (!try_module_get(THIS_MODULE))
88 return masq_device_event(this, event, &info); 139 goto err_module;
140
141 w = kmalloc(sizeof(*w), GFP_ATOMIC);
142 if (w) {
143 atomic_inc(&v6_worker_count);
144
145 INIT_WORK(&w->work, iterate_cleanup_work);
146 w->ifindex = dev->ifindex;
147 w->net = net;
148 schedule_work(&w->work);
149
150 return NOTIFY_DONE;
151 }
152
153 module_put(THIS_MODULE);
154 err_module:
155 put_net(net);
156 return NOTIFY_DONE;
89} 157}
90 158
91static struct notifier_block masq_inet_notifier = { 159static struct notifier_block masq_inet_notifier = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 006396e31cb0..5c8c84273028 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
327 struct tcp_sock *tp; 327 struct tcp_sock *tp;
328 __u32 seq, snd_una; 328 __u32 seq, snd_una;
329 struct sock *sk; 329 struct sock *sk;
330 bool fatal;
330 int err; 331 int err;
331 332
332 sk = __inet6_lookup_established(net, &tcp_hashinfo, 333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
345 return; 346 return;
346 } 347 }
347 seq = ntohl(th->seq); 348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
348 if (sk->sk_state == TCP_NEW_SYN_RECV) 350 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq); 351 return tcp_req_err(sk, seq, fatal);
350 352
351 bh_lock_sock(sk); 353 bh_lock_sock(sk);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) 354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
400 goto out; 402 goto out;
401 } 403 }
402 404
403 icmpv6_err_convert(type, code, &err);
404 405
405 /* Might be for an request_sock */ 406 /* Might be for an request_sock */
406 switch (sk->sk_state) { 407 switch (sk->sk_state) {
@@ -1386,7 +1387,7 @@ process:
1386 1387
1387 if (sk->sk_state == TCP_NEW_SYN_RECV) { 1388 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1388 struct request_sock *req = inet_reqsk(sk); 1389 struct request_sock *req = inet_reqsk(sk);
1389 struct sock *nsk = NULL; 1390 struct sock *nsk;
1390 1391
1391 sk = req->rsk_listener; 1392 sk = req->rsk_listener;
1392 tcp_v6_fill_cb(skb, hdr, th); 1393 tcp_v6_fill_cb(skb, hdr, th);
@@ -1394,24 +1395,24 @@ process:
1394 reqsk_put(req); 1395 reqsk_put(req);
1395 goto discard_it; 1396 goto discard_it;
1396 } 1397 }
1397 if (likely(sk->sk_state == TCP_LISTEN)) { 1398 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1398 nsk = tcp_check_req(sk, skb, req, false);
1399 } else {
1400 inet_csk_reqsk_queue_drop_and_put(sk, req); 1399 inet_csk_reqsk_queue_drop_and_put(sk, req);
1401 goto lookup; 1400 goto lookup;
1402 } 1401 }
1402 sock_hold(sk);
1403 nsk = tcp_check_req(sk, skb, req, false);
1403 if (!nsk) { 1404 if (!nsk) {
1404 reqsk_put(req); 1405 reqsk_put(req);
1405 goto discard_it; 1406 goto discard_and_relse;
1406 } 1407 }
1407 if (nsk == sk) { 1408 if (nsk == sk) {
1408 sock_hold(sk);
1409 reqsk_put(req); 1409 reqsk_put(req);
1410 tcp_v6_restore_cb(skb); 1410 tcp_v6_restore_cb(skb);
1411 } else if (tcp_child_process(sk, nsk, skb)) { 1411 } else if (tcp_child_process(sk, nsk, skb)) {
1412 tcp_v6_send_reset(nsk, skb); 1412 tcp_v6_send_reset(nsk, skb);
1413 goto discard_it; 1413 goto discard_and_relse;
1414 } else { 1414 } else {
1415 sock_put(sk);
1415 return 0; 1416 return 0;
1416 } 1417 }
1417 } 1418 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f93c5be612a7..2caaa84ce92d 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq, 124 ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
125 NLM_F_ACK, tunnel, cmd); 125 NLM_F_ACK, tunnel, cmd);
126 126
127 if (ret >= 0) 127 if (ret >= 0) {
128 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 128 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
129 /* We don't care if no one is listening */
130 if (ret == -ESRCH)
131 ret = 0;
132 return ret;
133 }
129 134
130 nlmsg_free(msg); 135 nlmsg_free(msg);
131 136
@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
147 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 152 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
148 NLM_F_ACK, session, cmd); 153 NLM_F_ACK, session, cmd);
149 154
150 if (ret >= 0) 155 if (ret >= 0) {
151 return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC); 156 ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
157 /* We don't care if no one is listening */
158 if (ret == -ESRCH)
159 ret = 0;
160 return ret;
161 }
152 162
153 nlmsg_free(msg); 163 nlmsg_free(msg);
154 164
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8c067e6663a1..95e757c377f9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -891,7 +891,7 @@ config NETFILTER_XT_TARGET_TEE
891 depends on IPV6 || IPV6=n 891 depends on IPV6 || IPV6=n
892 depends on !NF_CONNTRACK || NF_CONNTRACK 892 depends on !NF_CONNTRACK || NF_CONNTRACK
893 select NF_DUP_IPV4 893 select NF_DUP_IPV4
894 select NF_DUP_IPV6 if IP6_NF_IPTABLES != n 894 select NF_DUP_IPV6 if IPV6
895 ---help--- 895 ---help---
896 This option adds a "TEE" target with which a packet can be cloned and 896 This option adds a "TEE" target with which a packet can be cloned and
897 this clone be rerouted to another nexthop. 897 this clone be rerouted to another nexthop.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 58882de06bd7..f60b4fdeeb8c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1412,6 +1412,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1412 } 1412 }
1413 spin_unlock(lockp); 1413 spin_unlock(lockp);
1414 local_bh_enable(); 1414 local_bh_enable();
1415 cond_resched();
1415 } 1416 }
1416 1417
1417 for_each_possible_cpu(cpu) { 1418 for_each_possible_cpu(cpu) {
@@ -1424,6 +1425,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1424 set_bit(IPS_DYING_BIT, &ct->status); 1425 set_bit(IPS_DYING_BIT, &ct->status);
1425 } 1426 }
1426 spin_unlock_bh(&pcpu->lock); 1427 spin_unlock_bh(&pcpu->lock);
1428 cond_resched();
1427 } 1429 }
1428 return NULL; 1430 return NULL;
1429found: 1431found:
@@ -1440,6 +1442,8 @@ void nf_ct_iterate_cleanup(struct net *net,
1440 struct nf_conn *ct; 1442 struct nf_conn *ct;
1441 unsigned int bucket = 0; 1443 unsigned int bucket = 0;
1442 1444
1445 might_sleep();
1446
1443 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { 1447 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1444 /* Time to push up daises... */ 1448 /* Time to push up daises... */
1445 if (del_timer(&ct->timeout)) 1449 if (del_timer(&ct->timeout))
@@ -1448,6 +1452,7 @@ void nf_ct_iterate_cleanup(struct net *net,
1448 /* ... else the timer will get him soon. */ 1452 /* ... else the timer will get him soon. */
1449 1453
1450 nf_ct_put(ct); 1454 nf_ct_put(ct);
1455 cond_resched();
1451 } 1456 }
1452} 1457}
1453EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); 1458EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index a7ba23353dab..857ae89633af 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -311,14 +311,14 @@ replay:
311#endif 311#endif
312 { 312 {
313 nfnl_unlock(subsys_id); 313 nfnl_unlock(subsys_id);
314 netlink_ack(skb, nlh, -EOPNOTSUPP); 314 netlink_ack(oskb, nlh, -EOPNOTSUPP);
315 return kfree_skb(skb); 315 return kfree_skb(skb);
316 } 316 }
317 } 317 }
318 318
319 if (!ss->commit || !ss->abort) { 319 if (!ss->commit || !ss->abort) {
320 nfnl_unlock(subsys_id); 320 nfnl_unlock(subsys_id);
321 netlink_ack(skb, nlh, -EOPNOTSUPP); 321 netlink_ack(oskb, nlh, -EOPNOTSUPP);
322 return kfree_skb(skb); 322 return kfree_skb(skb);
323 } 323 }
324 324
@@ -328,10 +328,12 @@ replay:
328 nlh = nlmsg_hdr(skb); 328 nlh = nlmsg_hdr(skb);
329 err = 0; 329 err = 0;
330 330
331 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || 331 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
332 skb->len < nlh->nlmsg_len) { 332 skb->len < nlh->nlmsg_len ||
333 err = -EINVAL; 333 nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
334 goto ack; 334 nfnl_err_reset(&err_list);
335 status |= NFNL_BATCH_FAILURE;
336 goto done;
335 } 337 }
336 338
337 /* Only requests are handled by the kernel */ 339 /* Only requests are handled by the kernel */
@@ -406,7 +408,7 @@ ack:
406 * pointing to the batch header. 408 * pointing to the batch header.
407 */ 409 */
408 nfnl_err_reset(&err_list); 410 nfnl_err_reset(&err_list);
409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 411 netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
410 status |= NFNL_BATCH_FAILURE; 412 status |= NFNL_BATCH_FAILURE;
411 goto done; 413 goto done;
412 } 414 }
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 94837d236ab0..2671b9deb103 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -312,7 +312,7 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 312 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
313 untimeout(h, timeout); 313 untimeout(h, timeout);
314 } 314 }
315 nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); 315 spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
316 } 316 }
317 local_bh_enable(); 317 local_bh_enable();
318} 318}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c7808fc19719..c9743f78f219 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -100,7 +100,7 @@ static int nft_counter_init(const struct nft_ctx *ctx,
100 100
101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu); 101 cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
102 if (cpu_stats == NULL) 102 if (cpu_stats == NULL)
103 return ENOMEM; 103 return -ENOMEM;
104 104
105 preempt_disable(); 105 preempt_disable();
106 this_cpu = this_cpu_ptr(cpu_stats); 106 this_cpu = this_cpu_ptr(cpu_stats);
@@ -138,7 +138,7 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu, 138 cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
139 GFP_ATOMIC); 139 GFP_ATOMIC);
140 if (cpu_stats == NULL) 140 if (cpu_stats == NULL)
141 return ENOMEM; 141 return -ENOMEM;
142 142
143 preempt_disable(); 143 preempt_disable();
144 this_cpu = this_cpu_ptr(cpu_stats); 144 this_cpu = this_cpu_ptr(cpu_stats);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3eff7b67cdf2..6e57a3966dc5 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -38,7 +38,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
38 return XT_CONTINUE; 38 return XT_CONTINUE;
39} 39}
40 40
41#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 41#if IS_ENABLED(CONFIG_IPV6)
42static unsigned int 42static unsigned int
43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par) 43tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
44{ 44{
@@ -131,7 +131,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
131 .destroy = tee_tg_destroy, 131 .destroy = tee_tg_destroy,
132 .me = THIS_MODULE, 132 .me = THIS_MODULE,
133 }, 133 },
134#if IS_ENABLED(CONFIG_NF_DUP_IPV6) 134#if IS_ENABLED(CONFIG_IPV6)
135 { 135 {
136 .name = "TEE", 136 .name = "TEE",
137 .revision = 1, 137 .revision = 1,
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 1605691d9414..5eb7694348b5 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -90,7 +90,9 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
90 int err; 90 int err;
91 struct vxlan_config conf = { 91 struct vxlan_config conf = {
92 .no_share = true, 92 .no_share = true,
93 .flags = VXLAN_F_COLLECT_METADATA, 93 .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
94 /* Don't restrict the packets that can be sent by MTU */
95 .mtu = IP_MAX_MTU,
94 }; 96 };
95 97
96 if (!options) { 98 if (!options) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b5c2cf2aa6d4..af1acf009866 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1852,6 +1852,7 @@ reset:
1852 } 1852 }
1853 1853
1854 tp = old_tp; 1854 tp = old_tp;
1855 protocol = tc_skb_protocol(skb);
1855 goto reclassify; 1856 goto reclassify;
1856#endif 1857#endif
1857} 1858}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index ab0d538a74ed..1099e99a53c4 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -60,6 +60,8 @@
60#include <net/inet_common.h> 60#include <net/inet_common.h>
61#include <net/inet_ecn.h> 61#include <net/inet_ecn.h>
62 62
63#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
64
63/* Global data structures. */ 65/* Global data structures. */
64struct sctp_globals sctp_globals __read_mostly; 66struct sctp_globals sctp_globals __read_mostly;
65 67
@@ -1355,6 +1357,8 @@ static __init int sctp_init(void)
1355 unsigned long limit; 1357 unsigned long limit;
1356 int max_share; 1358 int max_share;
1357 int order; 1359 int order;
1360 int num_entries;
1361 int max_entry_order;
1358 1362
1359 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); 1363 sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
1360 1364
@@ -1407,14 +1411,24 @@ static __init int sctp_init(void)
1407 1411
1408 /* Size and allocate the association hash table. 1412 /* Size and allocate the association hash table.
1409 * The methodology is similar to that of the tcp hash tables. 1413 * The methodology is similar to that of the tcp hash tables.
1414 * Though not identical. Start by getting a goal size
1410 */ 1415 */
1411 if (totalram_pages >= (128 * 1024)) 1416 if (totalram_pages >= (128 * 1024))
1412 goal = totalram_pages >> (22 - PAGE_SHIFT); 1417 goal = totalram_pages >> (22 - PAGE_SHIFT);
1413 else 1418 else
1414 goal = totalram_pages >> (24 - PAGE_SHIFT); 1419 goal = totalram_pages >> (24 - PAGE_SHIFT);
1415 1420
1416 for (order = 0; (1UL << order) < goal; order++) 1421 /* Then compute the page order for said goal */
1417 ; 1422 order = get_order(goal);
1423
1424 /* Now compute the required page order for the maximum sized table we
1425 * want to create
1426 */
1427 max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
1428 sizeof(struct sctp_bind_hashbucket));
1429
1430 /* Limit the page order by that maximum hash table size */
1431 order = min(order, max_entry_order);
1418 1432
1419 /* Allocate and initialize the endpoint hash table. */ 1433 /* Allocate and initialize the endpoint hash table. */
1420 sctp_ep_hashsize = 64; 1434 sctp_ep_hashsize = 64;
@@ -1430,20 +1444,35 @@ static __init int sctp_init(void)
1430 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); 1444 INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
1431 } 1445 }
1432 1446
1433 /* Allocate and initialize the SCTP port hash table. */ 1447 /* Allocate and initialize the SCTP port hash table.
1448 * Note that order is initalized to start at the max sized
1449 * table we want to support. If we can't get that many pages
1450 * reduce the order and try again
1451 */
1434 do { 1452 do {
1435 sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
1436 sizeof(struct sctp_bind_hashbucket);
1437 if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
1438 continue;
1439 sctp_port_hashtable = (struct sctp_bind_hashbucket *) 1453 sctp_port_hashtable = (struct sctp_bind_hashbucket *)
1440 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order); 1454 __get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
1441 } while (!sctp_port_hashtable && --order > 0); 1455 } while (!sctp_port_hashtable && --order > 0);
1456
1442 if (!sctp_port_hashtable) { 1457 if (!sctp_port_hashtable) {
1443 pr_err("Failed bind hash alloc\n"); 1458 pr_err("Failed bind hash alloc\n");
1444 status = -ENOMEM; 1459 status = -ENOMEM;
1445 goto err_bhash_alloc; 1460 goto err_bhash_alloc;
1446 } 1461 }
1462
1463 /* Now compute the number of entries that will fit in the
1464 * port hash space we allocated
1465 */
1466 num_entries = (1UL << order) * PAGE_SIZE /
1467 sizeof(struct sctp_bind_hashbucket);
1468
1469 /* And finish by rounding it down to the nearest power of two
1470 * this wastes some memory of course, but its needed because
1471 * the hash function operates based on the assumption that
1472 * that the number of entries is a power of two
1473 */
1474 sctp_port_hashsize = rounddown_pow_of_two(num_entries);
1475
1447 for (i = 0; i < sctp_port_hashsize; i++) { 1476 for (i = 0; i < sctp_port_hashsize; i++) {
1448 spin_lock_init(&sctp_port_hashtable[i].lock); 1477 spin_lock_init(&sctp_port_hashtable[i].lock);
1449 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); 1478 INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
@@ -1452,7 +1481,8 @@ static __init int sctp_init(void)
1452 if (sctp_transport_hashtable_init()) 1481 if (sctp_transport_hashtable_init())
1453 goto err_thash_alloc; 1482 goto err_thash_alloc;
1454 1483
1455 pr_info("Hash tables configured (bind %d)\n", sctp_port_hashsize); 1484 pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
1485 num_entries);
1456 1486
1457 sctp_sysctl_register(); 1487 sctp_sysctl_register();
1458 1488
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ca2ebfe0be8..e878da0949db 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5538,6 +5538,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5538 struct sctp_hmac_algo_param *hmacs; 5538 struct sctp_hmac_algo_param *hmacs;
5539 __u16 data_len = 0; 5539 __u16 data_len = 0;
5540 u32 num_idents; 5540 u32 num_idents;
5541 int i;
5541 5542
5542 if (!ep->auth_enable) 5543 if (!ep->auth_enable)
5543 return -EACCES; 5544 return -EACCES;
@@ -5555,8 +5556,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5555 return -EFAULT; 5556 return -EFAULT;
5556 if (put_user(num_idents, &p->shmac_num_idents)) 5557 if (put_user(num_idents, &p->shmac_num_idents))
5557 return -EFAULT; 5558 return -EFAULT;
5558 if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) 5559 for (i = 0; i < num_idents; i++) {
5559 return -EFAULT; 5560 __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
5561
5562 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
5563 return -EFAULT;
5564 }
5560 return 0; 5565 return 0;
5561} 5566}
5562 5567
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b32fd602669..273bc3a35425 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
1225 if (bp[0] == '\\' && bp[1] == 'x') { 1225 if (bp[0] == '\\' && bp[1] == 'x') {
1226 /* HEX STRING */ 1226 /* HEX STRING */
1227 bp += 2; 1227 bp += 2;
1228 while (len < bufsize) { 1228 while (len < bufsize - 1) {
1229 int h, l; 1229 int h, l;
1230 1230
1231 h = hex_to_bin(bp[0]); 1231 h = hex_to_bin(bp[0]);
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index c14f3a4bff68..b289e106540b 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -80,13 +80,13 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
80 if (!r) 80 if (!r)
81 goto out; 81 goto out;
82 82
83 r->r.fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES * 83 r->fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES *
84 sizeof(u64), GFP_KERNEL); 84 sizeof(u64), GFP_KERNEL);
85 if (!r->r.fmr.physaddrs) 85 if (!r->fmr.physaddrs)
86 goto out_free; 86 goto out_free;
87 87
88 r->r.fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); 88 r->fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr);
89 if (IS_ERR(r->r.fmr.fmr)) 89 if (IS_ERR(r->fmr.fmr))
90 goto out_fmr_err; 90 goto out_fmr_err;
91 91
92 list_add(&r->mw_list, &buf->rb_mws); 92 list_add(&r->mw_list, &buf->rb_mws);
@@ -95,9 +95,9 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
95 return 0; 95 return 0;
96 96
97out_fmr_err: 97out_fmr_err:
98 rc = PTR_ERR(r->r.fmr.fmr); 98 rc = PTR_ERR(r->fmr.fmr);
99 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc); 99 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc);
100 kfree(r->r.fmr.physaddrs); 100 kfree(r->fmr.physaddrs);
101out_free: 101out_free:
102 kfree(r); 102 kfree(r);
103out: 103out:
@@ -109,7 +109,7 @@ __fmr_unmap(struct rpcrdma_mw *r)
109{ 109{
110 LIST_HEAD(l); 110 LIST_HEAD(l);
111 111
112 list_add(&r->r.fmr.fmr->list, &l); 112 list_add(&r->fmr.fmr->list, &l);
113 return ib_unmap_fmr(&l); 113 return ib_unmap_fmr(&l);
114} 114}
115 115
@@ -148,7 +148,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
148 nsegs = RPCRDMA_MAX_FMR_SGES; 148 nsegs = RPCRDMA_MAX_FMR_SGES;
149 for (i = 0; i < nsegs;) { 149 for (i = 0; i < nsegs;) {
150 rpcrdma_map_one(device, seg, direction); 150 rpcrdma_map_one(device, seg, direction);
151 mw->r.fmr.physaddrs[i] = seg->mr_dma; 151 mw->fmr.physaddrs[i] = seg->mr_dma;
152 len += seg->mr_len; 152 len += seg->mr_len;
153 ++seg; 153 ++seg;
154 ++i; 154 ++i;
@@ -158,13 +158,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
158 break; 158 break;
159 } 159 }
160 160
161 rc = ib_map_phys_fmr(mw->r.fmr.fmr, mw->r.fmr.physaddrs, 161 rc = ib_map_phys_fmr(mw->fmr.fmr, mw->fmr.physaddrs,
162 i, seg1->mr_dma); 162 i, seg1->mr_dma);
163 if (rc) 163 if (rc)
164 goto out_maperr; 164 goto out_maperr;
165 165
166 seg1->rl_mw = mw; 166 seg1->rl_mw = mw;
167 seg1->mr_rkey = mw->r.fmr.fmr->rkey; 167 seg1->mr_rkey = mw->fmr.fmr->rkey;
168 seg1->mr_base = seg1->mr_dma + pageoff; 168 seg1->mr_base = seg1->mr_dma + pageoff;
169 seg1->mr_nsegs = i; 169 seg1->mr_nsegs = i;
170 seg1->mr_len = len; 170 seg1->mr_len = len;
@@ -219,7 +219,7 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
219 seg = &req->rl_segments[i]; 219 seg = &req->rl_segments[i];
220 mw = seg->rl_mw; 220 mw = seg->rl_mw;
221 221
222 list_add(&mw->r.fmr.fmr->list, &unmap_list); 222 list_add(&mw->fmr.fmr->list, &unmap_list);
223 223
224 i += seg->mr_nsegs; 224 i += seg->mr_nsegs;
225 } 225 }
@@ -281,9 +281,9 @@ fmr_op_destroy(struct rpcrdma_buffer *buf)
281 while (!list_empty(&buf->rb_all)) { 281 while (!list_empty(&buf->rb_all)) {
282 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); 282 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
283 list_del(&r->mw_all); 283 list_del(&r->mw_all);
284 kfree(r->r.fmr.physaddrs); 284 kfree(r->fmr.physaddrs);
285 285
286 rc = ib_dealloc_fmr(r->r.fmr.fmr); 286 rc = ib_dealloc_fmr(r->fmr.fmr);
287 if (rc) 287 if (rc)
288 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", 288 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
289 __func__, rc); 289 __func__, rc);
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index e16567389e28..c250924a9fd3 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -109,20 +109,20 @@ static void
109__frwr_recovery_worker(struct work_struct *work) 109__frwr_recovery_worker(struct work_struct *work)
110{ 110{
111 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw, 111 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
112 r.frmr.fr_work); 112 frmr.fr_work);
113 struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt; 113 struct rpcrdma_xprt *r_xprt = r->frmr.fr_xprt;
114 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; 114 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
115 struct ib_pd *pd = r_xprt->rx_ia.ri_pd; 115 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
116 116
117 if (ib_dereg_mr(r->r.frmr.fr_mr)) 117 if (ib_dereg_mr(r->frmr.fr_mr))
118 goto out_fail; 118 goto out_fail;
119 119
120 r->r.frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); 120 r->frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
121 if (IS_ERR(r->r.frmr.fr_mr)) 121 if (IS_ERR(r->frmr.fr_mr))
122 goto out_fail; 122 goto out_fail;
123 123
124 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); 124 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
125 r->r.frmr.fr_state = FRMR_IS_INVALID; 125 r->frmr.fr_state = FRMR_IS_INVALID;
126 rpcrdma_put_mw(r_xprt, r); 126 rpcrdma_put_mw(r_xprt, r);
127 return; 127 return;
128 128
@@ -137,15 +137,15 @@ out_fail:
137static void 137static void
138__frwr_queue_recovery(struct rpcrdma_mw *r) 138__frwr_queue_recovery(struct rpcrdma_mw *r)
139{ 139{
140 INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker); 140 INIT_WORK(&r->frmr.fr_work, __frwr_recovery_worker);
141 queue_work(frwr_recovery_wq, &r->r.frmr.fr_work); 141 queue_work(frwr_recovery_wq, &r->frmr.fr_work);
142} 142}
143 143
144static int 144static int
145__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, 145__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
146 unsigned int depth) 146 unsigned int depth)
147{ 147{
148 struct rpcrdma_frmr *f = &r->r.frmr; 148 struct rpcrdma_frmr *f = &r->frmr;
149 int rc; 149 int rc;
150 150
151 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth); 151 f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
@@ -158,6 +158,8 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
158 158
159 sg_init_table(f->sg, depth); 159 sg_init_table(f->sg, depth);
160 160
161 init_completion(&f->fr_linv_done);
162
161 return 0; 163 return 0;
162 164
163out_mr_err: 165out_mr_err:
@@ -179,11 +181,11 @@ __frwr_release(struct rpcrdma_mw *r)
179{ 181{
180 int rc; 182 int rc;
181 183
182 rc = ib_dereg_mr(r->r.frmr.fr_mr); 184 rc = ib_dereg_mr(r->frmr.fr_mr);
183 if (rc) 185 if (rc)
184 dprintk("RPC: %s: ib_dereg_mr status %i\n", 186 dprintk("RPC: %s: ib_dereg_mr status %i\n",
185 __func__, rc); 187 __func__, rc);
186 kfree(r->r.frmr.sg); 188 kfree(r->frmr.sg);
187} 189}
188 190
189static int 191static int
@@ -244,39 +246,76 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
244 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth); 246 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
245} 247}
246 248
247/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs 249static void
248 * to be reset. 250__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
251 const char *wr)
252{
253 frmr->fr_state = FRMR_IS_STALE;
254 if (wc->status != IB_WC_WR_FLUSH_ERR)
255 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
256 wr, ib_wc_status_msg(wc->status),
257 wc->status, wc->vendor_err);
258}
259
260/**
261 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
262 * @cq: completion queue (ignored)
263 * @wc: completed WR
249 * 264 *
250 * WARNING: Only wr_id and status are reliable at this point
251 */ 265 */
252static void 266static void
253__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_mw *r) 267frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
254{ 268{
255 if (likely(wc->status == IB_WC_SUCCESS)) 269 struct rpcrdma_frmr *frmr;
256 return; 270 struct ib_cqe *cqe;
257
258 /* WARNING: Only wr_id and status are reliable at this point */
259 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
260 if (wc->status == IB_WC_WR_FLUSH_ERR)
261 dprintk("RPC: %s: frmr %p flushed\n", __func__, r);
262 else
263 pr_warn("RPC: %s: frmr %p error, status %s (%d)\n",
264 __func__, r, ib_wc_status_msg(wc->status), wc->status);
265 271
266 r->r.frmr.fr_state = FRMR_IS_STALE; 272 /* WARNING: Only wr_cqe and status are reliable at this point */
273 if (wc->status != IB_WC_SUCCESS) {
274 cqe = wc->wr_cqe;
275 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
276 __frwr_sendcompletion_flush(wc, frmr, "fastreg");
277 }
267} 278}
268 279
280/**
281 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
282 * @cq: completion queue (ignored)
283 * @wc: completed WR
284 *
285 */
269static void 286static void
270frwr_sendcompletion(struct ib_wc *wc) 287frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
271{ 288{
272 struct rpcrdma_mw *r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; 289 struct rpcrdma_frmr *frmr;
273 struct rpcrdma_frmr *f = &r->r.frmr; 290 struct ib_cqe *cqe;
274 291
275 if (unlikely(wc->status != IB_WC_SUCCESS)) 292 /* WARNING: Only wr_cqe and status are reliable at this point */
276 __frwr_sendcompletion_flush(wc, r); 293 if (wc->status != IB_WC_SUCCESS) {
294 cqe = wc->wr_cqe;
295 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
296 __frwr_sendcompletion_flush(wc, frmr, "localinv");
297 }
298}
277 299
278 if (f->fr_waiter) 300/**
279 complete(&f->fr_linv_done); 301 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
302 * @cq: completion queue (ignored)
303 * @wc: completed WR
304 *
305 * Awaken anyone waiting for an MR to finish being fenced.
306 */
307static void
308frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
309{
310 struct rpcrdma_frmr *frmr;
311 struct ib_cqe *cqe;
312
313 /* WARNING: Only wr_cqe and status are reliable at this point */
314 cqe = wc->wr_cqe;
315 frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
316 if (wc->status != IB_WC_SUCCESS)
317 __frwr_sendcompletion_flush(wc, frmr, "localinv");
318 complete_all(&frmr->fr_linv_done);
280} 319}
281 320
282static int 321static int
@@ -313,8 +352,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
313 352
314 list_add(&r->mw_list, &buf->rb_mws); 353 list_add(&r->mw_list, &buf->rb_mws);
315 list_add(&r->mw_all, &buf->rb_all); 354 list_add(&r->mw_all, &buf->rb_all);
316 r->mw_sendcompletion = frwr_sendcompletion; 355 r->frmr.fr_xprt = r_xprt;
317 r->r.frmr.fr_xprt = r_xprt;
318 } 356 }
319 357
320 return 0; 358 return 0;
@@ -347,10 +385,9 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
347 mw = rpcrdma_get_mw(r_xprt); 385 mw = rpcrdma_get_mw(r_xprt);
348 if (!mw) 386 if (!mw)
349 return -ENOMEM; 387 return -ENOMEM;
350 } while (mw->r.frmr.fr_state != FRMR_IS_INVALID); 388 } while (mw->frmr.fr_state != FRMR_IS_INVALID);
351 frmr = &mw->r.frmr; 389 frmr = &mw->frmr;
352 frmr->fr_state = FRMR_IS_VALID; 390 frmr->fr_state = FRMR_IS_VALID;
353 frmr->fr_waiter = false;
354 mr = frmr->fr_mr; 391 mr = frmr->fr_mr;
355 reg_wr = &frmr->fr_regwr; 392 reg_wr = &frmr->fr_regwr;
356 393
@@ -400,7 +437,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
400 437
401 reg_wr->wr.next = NULL; 438 reg_wr->wr.next = NULL;
402 reg_wr->wr.opcode = IB_WR_REG_MR; 439 reg_wr->wr.opcode = IB_WR_REG_MR;
403 reg_wr->wr.wr_id = (uintptr_t)mw; 440 frmr->fr_cqe.done = frwr_wc_fastreg;
441 reg_wr->wr.wr_cqe = &frmr->fr_cqe;
404 reg_wr->wr.num_sge = 0; 442 reg_wr->wr.num_sge = 0;
405 reg_wr->wr.send_flags = 0; 443 reg_wr->wr.send_flags = 0;
406 reg_wr->mr = mr; 444 reg_wr->mr = mr;
@@ -434,15 +472,15 @@ static struct ib_send_wr *
434__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg) 472__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
435{ 473{
436 struct rpcrdma_mw *mw = seg->rl_mw; 474 struct rpcrdma_mw *mw = seg->rl_mw;
437 struct rpcrdma_frmr *f = &mw->r.frmr; 475 struct rpcrdma_frmr *f = &mw->frmr;
438 struct ib_send_wr *invalidate_wr; 476 struct ib_send_wr *invalidate_wr;
439 477
440 f->fr_waiter = false;
441 f->fr_state = FRMR_IS_INVALID; 478 f->fr_state = FRMR_IS_INVALID;
442 invalidate_wr = &f->fr_invwr; 479 invalidate_wr = &f->fr_invwr;
443 480
444 memset(invalidate_wr, 0, sizeof(*invalidate_wr)); 481 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
445 invalidate_wr->wr_id = (unsigned long)(void *)mw; 482 f->fr_cqe.done = frwr_wc_localinv;
483 invalidate_wr->wr_cqe = &f->fr_cqe;
446 invalidate_wr->opcode = IB_WR_LOCAL_INV; 484 invalidate_wr->opcode = IB_WR_LOCAL_INV;
447 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey; 485 invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
448 486
@@ -455,7 +493,7 @@ __frwr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
455{ 493{
456 struct ib_device *device = r_xprt->rx_ia.ri_device; 494 struct ib_device *device = r_xprt->rx_ia.ri_device;
457 struct rpcrdma_mw *mw = seg->rl_mw; 495 struct rpcrdma_mw *mw = seg->rl_mw;
458 struct rpcrdma_frmr *f = &mw->r.frmr; 496 struct rpcrdma_frmr *f = &mw->frmr;
459 497
460 seg->rl_mw = NULL; 498 seg->rl_mw = NULL;
461 499
@@ -504,15 +542,15 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
504 542
505 i += seg->mr_nsegs; 543 i += seg->mr_nsegs;
506 } 544 }
507 f = &seg->rl_mw->r.frmr; 545 f = &seg->rl_mw->frmr;
508 546
509 /* Strong send queue ordering guarantees that when the 547 /* Strong send queue ordering guarantees that when the
510 * last WR in the chain completes, all WRs in the chain 548 * last WR in the chain completes, all WRs in the chain
511 * are complete. 549 * are complete.
512 */ 550 */
513 f->fr_invwr.send_flags = IB_SEND_SIGNALED; 551 f->fr_invwr.send_flags = IB_SEND_SIGNALED;
514 f->fr_waiter = true; 552 f->fr_cqe.done = frwr_wc_localinv_wake;
515 init_completion(&f->fr_linv_done); 553 reinit_completion(&f->fr_linv_done);
516 INIT_CQCOUNT(&r_xprt->rx_ep); 554 INIT_CQCOUNT(&r_xprt->rx_ep);
517 555
518 /* Transport disconnect drains the receive CQ before it 556 /* Transport disconnect drains the receive CQ before it
@@ -520,14 +558,18 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
520 * unless ri_id->qp is a valid pointer. 558 * unless ri_id->qp is a valid pointer.
521 */ 559 */
522 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr); 560 rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
523 if (rc) 561 if (rc) {
524 pr_warn("%s: ib_post_send failed %i\n", __func__, rc); 562 pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
563 rdma_disconnect(ia->ri_id);
564 goto unmap;
565 }
525 566
526 wait_for_completion(&f->fr_linv_done); 567 wait_for_completion(&f->fr_linv_done);
527 568
528 /* ORDER: Now DMA unmap all of the req's MRs, and return 569 /* ORDER: Now DMA unmap all of the req's MRs, and return
529 * them to the free MW list. 570 * them to the free MW list.
530 */ 571 */
572unmap:
531 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) { 573 for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
532 seg = &req->rl_segments[i]; 574 seg = &req->rl_segments[i];
533 575
@@ -549,7 +591,7 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
549 struct rpcrdma_mr_seg *seg1 = seg; 591 struct rpcrdma_mr_seg *seg1 = seg;
550 struct rpcrdma_ia *ia = &r_xprt->rx_ia; 592 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
551 struct rpcrdma_mw *mw = seg1->rl_mw; 593 struct rpcrdma_mw *mw = seg1->rl_mw;
552 struct rpcrdma_frmr *frmr = &mw->r.frmr; 594 struct rpcrdma_frmr *frmr = &mw->frmr;
553 struct ib_send_wr *invalidate_wr, *bad_wr; 595 struct ib_send_wr *invalidate_wr, *bad_wr;
554 int rc, nsegs = seg->mr_nsegs; 596 int rc, nsegs = seg->mr_nsegs;
555 597
@@ -557,10 +599,11 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
557 599
558 seg1->rl_mw = NULL; 600 seg1->rl_mw = NULL;
559 frmr->fr_state = FRMR_IS_INVALID; 601 frmr->fr_state = FRMR_IS_INVALID;
560 invalidate_wr = &mw->r.frmr.fr_invwr; 602 invalidate_wr = &mw->frmr.fr_invwr;
561 603
562 memset(invalidate_wr, 0, sizeof(*invalidate_wr)); 604 memset(invalidate_wr, 0, sizeof(*invalidate_wr));
563 invalidate_wr->wr_id = (uintptr_t)mw; 605 frmr->fr_cqe.done = frwr_wc_localinv;
606 invalidate_wr->wr_cqe = &frmr->fr_cqe;
564 invalidate_wr->opcode = IB_WR_LOCAL_INV; 607 invalidate_wr->opcode = IB_WR_LOCAL_INV;
565 invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey; 608 invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
566 DECR_CQCOUNT(&r_xprt->rx_ep); 609 DECR_CQCOUNT(&r_xprt->rx_ep);
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index dbb302ecf590..481b9b6f4a15 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -68,7 +68,6 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
68 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing)); 68 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
69 seg->mr_rkey = ia->ri_dma_mr->rkey; 69 seg->mr_rkey = ia->ri_dma_mr->rkey;
70 seg->mr_base = seg->mr_dma; 70 seg->mr_base = seg->mr_dma;
71 seg->mr_nsegs = 1;
72 return 1; 71 return 1;
73} 72}
74 73
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 0f28f2d743ed..888823bb6dae 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -132,6 +132,33 @@ rpcrdma_tail_pullup(struct xdr_buf *buf)
132 return tlen; 132 return tlen;
133} 133}
134 134
135/* Split "vec" on page boundaries into segments. FMR registers pages,
136 * not a byte range. Other modes coalesce these segments into a single
137 * MR when they can.
138 */
139static int
140rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
141 int n, int nsegs)
142{
143 size_t page_offset;
144 u32 remaining;
145 char *base;
146
147 base = vec->iov_base;
148 page_offset = offset_in_page(base);
149 remaining = vec->iov_len;
150 while (remaining && n < nsegs) {
151 seg[n].mr_page = NULL;
152 seg[n].mr_offset = base;
153 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
154 remaining -= seg[n].mr_len;
155 base += seg[n].mr_len;
156 ++n;
157 page_offset = 0;
158 }
159 return n;
160}
161
135/* 162/*
136 * Chunk assembly from upper layer xdr_buf. 163 * Chunk assembly from upper layer xdr_buf.
137 * 164 *
@@ -150,11 +177,10 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
150 int page_base; 177 int page_base;
151 struct page **ppages; 178 struct page **ppages;
152 179
153 if (pos == 0 && xdrbuf->head[0].iov_len) { 180 if (pos == 0) {
154 seg[n].mr_page = NULL; 181 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
155 seg[n].mr_offset = xdrbuf->head[0].iov_base; 182 if (n == nsegs)
156 seg[n].mr_len = xdrbuf->head[0].iov_len; 183 return -EIO;
157 ++n;
158 } 184 }
159 185
160 len = xdrbuf->page_len; 186 len = xdrbuf->page_len;
@@ -192,13 +218,9 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
192 * xdr pad bytes, saving the server an RDMA operation. */ 218 * xdr pad bytes, saving the server an RDMA operation. */
193 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) 219 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
194 return n; 220 return n;
221 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
195 if (n == nsegs) 222 if (n == nsegs)
196 /* Tail remains, but we're out of segments */
197 return -EIO; 223 return -EIO;
198 seg[n].mr_page = NULL;
199 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
200 seg[n].mr_len = xdrbuf->tail[0].iov_len;
201 ++n;
202 } 224 }
203 225
204 return n; 226 return n;
@@ -773,20 +795,17 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
773 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; 795 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
774 struct rpc_xprt *xprt = &r_xprt->rx_xprt; 796 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
775 __be32 *iptr; 797 __be32 *iptr;
776 int rdmalen, status; 798 int rdmalen, status, rmerr;
777 unsigned long cwnd; 799 unsigned long cwnd;
778 u32 credits;
779 800
780 dprintk("RPC: %s: incoming rep %p\n", __func__, rep); 801 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
781 802
782 if (rep->rr_len == RPCRDMA_BAD_LEN) 803 if (rep->rr_len == RPCRDMA_BAD_LEN)
783 goto out_badstatus; 804 goto out_badstatus;
784 if (rep->rr_len < RPCRDMA_HDRLEN_MIN) 805 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
785 goto out_shortreply; 806 goto out_shortreply;
786 807
787 headerp = rdmab_to_msg(rep->rr_rdmabuf); 808 headerp = rdmab_to_msg(rep->rr_rdmabuf);
788 if (headerp->rm_vers != rpcrdma_version)
789 goto out_badversion;
790#if defined(CONFIG_SUNRPC_BACKCHANNEL) 809#if defined(CONFIG_SUNRPC_BACKCHANNEL)
791 if (rpcrdma_is_bcall(headerp)) 810 if (rpcrdma_is_bcall(headerp))
792 goto out_bcall; 811 goto out_bcall;
@@ -809,15 +828,16 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
809 */ 828 */
810 list_del_init(&rqst->rq_list); 829 list_del_init(&rqst->rq_list);
811 spin_unlock_bh(&xprt->transport_lock); 830 spin_unlock_bh(&xprt->transport_lock);
812 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" 831 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
813 " RPC request 0x%p xid 0x%08x\n", 832 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
814 __func__, rep, req, rqst,
815 be32_to_cpu(headerp->rm_xid));
816 833
817 /* from here on, the reply is no longer an orphan */ 834 /* from here on, the reply is no longer an orphan */
818 req->rl_reply = rep; 835 req->rl_reply = rep;
819 xprt->reestablish_timeout = 0; 836 xprt->reestablish_timeout = 0;
820 837
838 if (headerp->rm_vers != rpcrdma_version)
839 goto out_badversion;
840
821 /* check for expected message types */ 841 /* check for expected message types */
822 /* The order of some of these tests is important. */ 842 /* The order of some of these tests is important. */
823 switch (headerp->rm_type) { 843 switch (headerp->rm_type) {
@@ -878,6 +898,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
878 status = rdmalen; 898 status = rdmalen;
879 break; 899 break;
880 900
901 case rdma_error:
902 goto out_rdmaerr;
903
881badheader: 904badheader:
882 default: 905 default:
883 dprintk("%s: invalid rpcrdma reply header (type %d):" 906 dprintk("%s: invalid rpcrdma reply header (type %d):"
@@ -893,6 +916,7 @@ badheader:
893 break; 916 break;
894 } 917 }
895 918
919out:
896 /* Invalidate and flush the data payloads before waking the 920 /* Invalidate and flush the data payloads before waking the
897 * waiting application. This guarantees the memory region is 921 * waiting application. This guarantees the memory region is
898 * properly fenced from the server before the application 922 * properly fenced from the server before the application
@@ -903,15 +927,9 @@ badheader:
903 if (req->rl_nchunks) 927 if (req->rl_nchunks)
904 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req); 928 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
905 929
906 credits = be32_to_cpu(headerp->rm_credit);
907 if (credits == 0)
908 credits = 1; /* don't deadlock */
909 else if (credits > r_xprt->rx_buf.rb_max_requests)
910 credits = r_xprt->rx_buf.rb_max_requests;
911
912 spin_lock_bh(&xprt->transport_lock); 930 spin_lock_bh(&xprt->transport_lock);
913 cwnd = xprt->cwnd; 931 cwnd = xprt->cwnd;
914 xprt->cwnd = credits << RPC_CWNDSHIFT; 932 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
915 if (xprt->cwnd > cwnd) 933 if (xprt->cwnd > cwnd)
916 xprt_release_rqst_cong(rqst->rq_task); 934 xprt_release_rqst_cong(rqst->rq_task);
917 935
@@ -935,13 +953,43 @@ out_bcall:
935 return; 953 return;
936#endif 954#endif
937 955
938out_shortreply: 956/* If the incoming reply terminated a pending RPC, the next
939 dprintk("RPC: %s: short/invalid reply\n", __func__); 957 * RPC call will post a replacement receive buffer as it is
940 goto repost; 958 * being marshaled.
941 959 */
942out_badversion: 960out_badversion:
943 dprintk("RPC: %s: invalid version %d\n", 961 dprintk("RPC: %s: invalid version %d\n",
944 __func__, be32_to_cpu(headerp->rm_vers)); 962 __func__, be32_to_cpu(headerp->rm_vers));
963 status = -EIO;
964 r_xprt->rx_stats.bad_reply_count++;
965 goto out;
966
967out_rdmaerr:
968 rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
969 switch (rmerr) {
970 case ERR_VERS:
971 pr_err("%s: server reports header version error (%u-%u)\n",
972 __func__,
973 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
974 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
975 break;
976 case ERR_CHUNK:
977 pr_err("%s: server reports header decoding error\n",
978 __func__);
979 break;
980 default:
981 pr_err("%s: server reports unknown error %d\n",
982 __func__, rmerr);
983 }
984 status = -EREMOTEIO;
985 r_xprt->rx_stats.bad_reply_count++;
986 goto out;
987
988/* If no pending RPC transaction was matched, post a replacement
989 * receive buffer before returning.
990 */
991out_shortreply:
992 dprintk("RPC: %s: short/invalid reply\n", __func__);
945 goto repost; 993 goto repost;
946 994
947out_nomatch: 995out_nomatch:
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 878f1bfb1db9..f5ed9f982cd7 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -112,89 +112,65 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
112 } 112 }
113} 113}
114 114
115/**
116 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
117 * @cq: completion queue (ignored)
118 * @wc: completed WR
119 *
120 */
115static void 121static void
116rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context) 122rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
117{ 123{
118 struct rpcrdma_ep *ep = context; 124 /* WARNING: Only wr_cqe and status are reliable at this point */
119 125 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
120 pr_err("RPC: %s: %s on device %s ep %p\n", 126 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
121 __func__, ib_event_msg(event->event), 127 ib_wc_status_msg(wc->status),
122 event->device->name, context); 128 wc->status, wc->vendor_err);
123 if (ep->rep_connected == 1) {
124 ep->rep_connected = -EIO;
125 rpcrdma_conn_func(ep);
126 wake_up_all(&ep->rep_connect_wait);
127 }
128} 129}
129 130
130static void 131static void
131rpcrdma_sendcq_process_wc(struct ib_wc *wc) 132rpcrdma_receive_worker(struct work_struct *work)
132{ 133{
133 /* WARNING: Only wr_id and status are reliable at this point */ 134 struct rpcrdma_rep *rep =
134 if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) { 135 container_of(work, struct rpcrdma_rep, rr_work);
135 if (wc->status != IB_WC_SUCCESS &&
136 wc->status != IB_WC_WR_FLUSH_ERR)
137 pr_err("RPC: %s: SEND: %s\n",
138 __func__, ib_wc_status_msg(wc->status));
139 } else {
140 struct rpcrdma_mw *r;
141 136
142 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; 137 rpcrdma_reply_handler(rep);
143 r->mw_sendcompletion(wc);
144 }
145} 138}
146 139
147/* The common case is a single send completion is waiting. By 140/* Perform basic sanity checking to avoid using garbage
148 * passing two WC entries to ib_poll_cq, a return code of 1 141 * to update the credit grant value.
149 * means there is exactly one WC waiting and no more. We don't
150 * have to invoke ib_poll_cq again to know that the CQ has been
151 * properly drained.
152 */ 142 */
153static void 143static void
154rpcrdma_sendcq_poll(struct ib_cq *cq) 144rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
155{ 145{
156 struct ib_wc *pos, wcs[2]; 146 struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
157 int count, rc; 147 struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
148 u32 credits;
158 149
159 do { 150 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
160 pos = wcs; 151 return;
161 152
162 rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos); 153 credits = be32_to_cpu(rmsgp->rm_credit);
163 if (rc < 0) 154 if (credits == 0)
164 break; 155 credits = 1; /* don't deadlock */
156 else if (credits > buffer->rb_max_requests)
157 credits = buffer->rb_max_requests;
165 158
166 count = rc; 159 atomic_set(&buffer->rb_credits, credits);
167 while (count-- > 0)
168 rpcrdma_sendcq_process_wc(pos++);
169 } while (rc == ARRAY_SIZE(wcs));
170 return;
171} 160}
172 161
173/* Handle provider send completion upcalls. 162/**
163 * rpcrdma_receive_wc - Invoked by RDMA provider for each polled Receive WC
164 * @cq: completion queue (ignored)
165 * @wc: completed WR
166 *
174 */ 167 */
175static void 168static void
176rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) 169rpcrdma_receive_wc(struct ib_cq *cq, struct ib_wc *wc)
177{ 170{
178 do { 171 struct ib_cqe *cqe = wc->wr_cqe;
179 rpcrdma_sendcq_poll(cq); 172 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
180 } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP | 173 rr_cqe);
181 IB_CQ_REPORT_MISSED_EVENTS) > 0);
182}
183
184static void
185rpcrdma_receive_worker(struct work_struct *work)
186{
187 struct rpcrdma_rep *rep =
188 container_of(work, struct rpcrdma_rep, rr_work);
189
190 rpcrdma_reply_handler(rep);
191}
192
193static void
194rpcrdma_recvcq_process_wc(struct ib_wc *wc)
195{
196 struct rpcrdma_rep *rep =
197 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
198 174
199 /* WARNING: Only wr_id and status are reliable at this point */ 175 /* WARNING: Only wr_id and status are reliable at this point */
200 if (wc->status != IB_WC_SUCCESS) 176 if (wc->status != IB_WC_SUCCESS)
@@ -211,7 +187,8 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc)
211 ib_dma_sync_single_for_cpu(rep->rr_device, 187 ib_dma_sync_single_for_cpu(rep->rr_device,
212 rdmab_addr(rep->rr_rdmabuf), 188 rdmab_addr(rep->rr_rdmabuf),
213 rep->rr_len, DMA_FROM_DEVICE); 189 rep->rr_len, DMA_FROM_DEVICE);
214 prefetch(rdmab_to_msg(rep->rr_rdmabuf)); 190
191 rpcrdma_update_granted_credits(rep);
215 192
216out_schedule: 193out_schedule:
217 queue_work(rpcrdma_receive_wq, &rep->rr_work); 194 queue_work(rpcrdma_receive_wq, &rep->rr_work);
@@ -219,57 +196,20 @@ out_schedule:
219 196
220out_fail: 197out_fail:
221 if (wc->status != IB_WC_WR_FLUSH_ERR) 198 if (wc->status != IB_WC_WR_FLUSH_ERR)
222 pr_err("RPC: %s: rep %p: %s\n", 199 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
223 __func__, rep, ib_wc_status_msg(wc->status)); 200 ib_wc_status_msg(wc->status),
201 wc->status, wc->vendor_err);
224 rep->rr_len = RPCRDMA_BAD_LEN; 202 rep->rr_len = RPCRDMA_BAD_LEN;
225 goto out_schedule; 203 goto out_schedule;
226} 204}
227 205
228/* The wc array is on stack: automatic memory is always CPU-local.
229 *
230 * struct ib_wc is 64 bytes, making the poll array potentially
231 * large. But this is at the bottom of the call chain. Further
232 * substantial work is done in another thread.
233 */
234static void
235rpcrdma_recvcq_poll(struct ib_cq *cq)
236{
237 struct ib_wc *pos, wcs[4];
238 int count, rc;
239
240 do {
241 pos = wcs;
242
243 rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos);
244 if (rc < 0)
245 break;
246
247 count = rc;
248 while (count-- > 0)
249 rpcrdma_recvcq_process_wc(pos++);
250 } while (rc == ARRAY_SIZE(wcs));
251}
252
253/* Handle provider receive completion upcalls.
254 */
255static void
256rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
257{
258 do {
259 rpcrdma_recvcq_poll(cq);
260 } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
261 IB_CQ_REPORT_MISSED_EVENTS) > 0);
262}
263
264static void 206static void
265rpcrdma_flush_cqs(struct rpcrdma_ep *ep) 207rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
266{ 208{
267 struct ib_wc wc; 209 struct ib_wc wc;
268 210
269 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0) 211 while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
270 rpcrdma_recvcq_process_wc(&wc); 212 rpcrdma_receive_wc(NULL, &wc);
271 while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
272 rpcrdma_sendcq_process_wc(&wc);
273} 213}
274 214
275static int 215static int
@@ -330,6 +270,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
330connected: 270connected:
331 dprintk("RPC: %s: %sconnected\n", 271 dprintk("RPC: %s: %sconnected\n",
332 __func__, connstate > 0 ? "" : "dis"); 272 __func__, connstate > 0 ? "" : "dis");
273 atomic_set(&xprt->rx_buf.rb_credits, 1);
333 ep->rep_connected = connstate; 274 ep->rep_connected = connstate;
334 rpcrdma_conn_func(ep); 275 rpcrdma_conn_func(ep);
335 wake_up_all(&ep->rep_connect_wait); 276 wake_up_all(&ep->rep_connect_wait);
@@ -560,9 +501,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
560 struct rpcrdma_create_data_internal *cdata) 501 struct rpcrdma_create_data_internal *cdata)
561{ 502{
562 struct ib_cq *sendcq, *recvcq; 503 struct ib_cq *sendcq, *recvcq;
563 struct ib_cq_init_attr cq_attr = {};
564 unsigned int max_qp_wr; 504 unsigned int max_qp_wr;
565 int rc, err; 505 int rc;
566 506
567 if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) { 507 if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_IOVS) {
568 dprintk("RPC: %s: insufficient sge's available\n", 508 dprintk("RPC: %s: insufficient sge's available\n",
@@ -614,9 +554,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
614 init_waitqueue_head(&ep->rep_connect_wait); 554 init_waitqueue_head(&ep->rep_connect_wait);
615 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 555 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
616 556
617 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1; 557 sendcq = ib_alloc_cq(ia->ri_device, NULL,
618 sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall, 558 ep->rep_attr.cap.max_send_wr + 1,
619 rpcrdma_cq_async_error_upcall, NULL, &cq_attr); 559 0, IB_POLL_SOFTIRQ);
620 if (IS_ERR(sendcq)) { 560 if (IS_ERR(sendcq)) {
621 rc = PTR_ERR(sendcq); 561 rc = PTR_ERR(sendcq);
622 dprintk("RPC: %s: failed to create send CQ: %i\n", 562 dprintk("RPC: %s: failed to create send CQ: %i\n",
@@ -624,16 +564,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
624 goto out1; 564 goto out1;
625 } 565 }
626 566
627 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP); 567 recvcq = ib_alloc_cq(ia->ri_device, NULL,
628 if (rc) { 568 ep->rep_attr.cap.max_recv_wr + 1,
629 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n", 569 0, IB_POLL_SOFTIRQ);
630 __func__, rc);
631 goto out2;
632 }
633
634 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
635 recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
636 rpcrdma_cq_async_error_upcall, NULL, &cq_attr);
637 if (IS_ERR(recvcq)) { 570 if (IS_ERR(recvcq)) {
638 rc = PTR_ERR(recvcq); 571 rc = PTR_ERR(recvcq);
639 dprintk("RPC: %s: failed to create recv CQ: %i\n", 572 dprintk("RPC: %s: failed to create recv CQ: %i\n",
@@ -641,14 +574,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
641 goto out2; 574 goto out2;
642 } 575 }
643 576
644 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
645 if (rc) {
646 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
647 __func__, rc);
648 ib_destroy_cq(recvcq);
649 goto out2;
650 }
651
652 ep->rep_attr.send_cq = sendcq; 577 ep->rep_attr.send_cq = sendcq;
653 ep->rep_attr.recv_cq = recvcq; 578 ep->rep_attr.recv_cq = recvcq;
654 579
@@ -673,10 +598,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
673 return 0; 598 return 0;
674 599
675out2: 600out2:
676 err = ib_destroy_cq(sendcq); 601 ib_free_cq(sendcq);
677 if (err)
678 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
679 __func__, err);
680out1: 602out1:
681 if (ia->ri_dma_mr) 603 if (ia->ri_dma_mr)
682 ib_dereg_mr(ia->ri_dma_mr); 604 ib_dereg_mr(ia->ri_dma_mr);
@@ -711,15 +633,8 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
711 ia->ri_id->qp = NULL; 633 ia->ri_id->qp = NULL;
712 } 634 }
713 635
714 rc = ib_destroy_cq(ep->rep_attr.recv_cq); 636 ib_free_cq(ep->rep_attr.recv_cq);
715 if (rc) 637 ib_free_cq(ep->rep_attr.send_cq);
716 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
717 __func__, rc);
718
719 rc = ib_destroy_cq(ep->rep_attr.send_cq);
720 if (rc)
721 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
722 __func__, rc);
723 638
724 if (ia->ri_dma_mr) { 639 if (ia->ri_dma_mr) {
725 rc = ib_dereg_mr(ia->ri_dma_mr); 640 rc = ib_dereg_mr(ia->ri_dma_mr);
@@ -898,6 +813,7 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
898 spin_lock(&buffer->rb_reqslock); 813 spin_lock(&buffer->rb_reqslock);
899 list_add(&req->rl_all, &buffer->rb_allreqs); 814 list_add(&req->rl_all, &buffer->rb_allreqs);
900 spin_unlock(&buffer->rb_reqslock); 815 spin_unlock(&buffer->rb_reqslock);
816 req->rl_cqe.done = rpcrdma_wc_send;
901 req->rl_buffer = &r_xprt->rx_buf; 817 req->rl_buffer = &r_xprt->rx_buf;
902 return req; 818 return req;
903} 819}
@@ -923,6 +839,7 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
923 } 839 }
924 840
925 rep->rr_device = ia->ri_device; 841 rep->rr_device = ia->ri_device;
842 rep->rr_cqe.done = rpcrdma_receive_wc;
926 rep->rr_rxprt = r_xprt; 843 rep->rr_rxprt = r_xprt;
927 INIT_WORK(&rep->rr_work, rpcrdma_receive_worker); 844 INIT_WORK(&rep->rr_work, rpcrdma_receive_worker);
928 return rep; 845 return rep;
@@ -943,6 +860,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
943 buf->rb_max_requests = r_xprt->rx_data.max_requests; 860 buf->rb_max_requests = r_xprt->rx_data.max_requests;
944 buf->rb_bc_srv_max_requests = 0; 861 buf->rb_bc_srv_max_requests = 0;
945 spin_lock_init(&buf->rb_lock); 862 spin_lock_init(&buf->rb_lock);
863 atomic_set(&buf->rb_credits, 1);
946 864
947 rc = ia->ri_ops->ro_init(r_xprt); 865 rc = ia->ri_ops->ro_init(r_xprt);
948 if (rc) 866 if (rc)
@@ -1259,7 +1177,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
1259 } 1177 }
1260 1178
1261 send_wr.next = NULL; 1179 send_wr.next = NULL;
1262 send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION; 1180 send_wr.wr_cqe = &req->rl_cqe;
1263 send_wr.sg_list = iov; 1181 send_wr.sg_list = iov;
1264 send_wr.num_sge = req->rl_niovs; 1182 send_wr.num_sge = req->rl_niovs;
1265 send_wr.opcode = IB_WR_SEND; 1183 send_wr.opcode = IB_WR_SEND;
@@ -1297,7 +1215,7 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1297 int rc; 1215 int rc;
1298 1216
1299 recv_wr.next = NULL; 1217 recv_wr.next = NULL;
1300 recv_wr.wr_id = (u64) (unsigned long) rep; 1218 recv_wr.wr_cqe = &rep->rr_cqe;
1301 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; 1219 recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1302 recv_wr.num_sge = 1; 1220 recv_wr.num_sge = 1;
1303 1221
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 38fe11b09875..2ebc743cb96f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -95,10 +95,6 @@ struct rpcrdma_ep {
95#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 95#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
96#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 96#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
97 97
98/* Force completion handler to ignore the signal
99 */
100#define RPCRDMA_IGNORE_COMPLETION (0ULL)
101
102/* Pre-allocate extra Work Requests for handling backward receives 98/* Pre-allocate extra Work Requests for handling backward receives
103 * and sends. This is a fixed value because the Work Queues are 99 * and sends. This is a fixed value because the Work Queues are
104 * allocated when the forward channel is set up. 100 * allocated when the forward channel is set up.
@@ -171,6 +167,7 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
171struct rpcrdma_buffer; 167struct rpcrdma_buffer;
172 168
173struct rpcrdma_rep { 169struct rpcrdma_rep {
170 struct ib_cqe rr_cqe;
174 unsigned int rr_len; 171 unsigned int rr_len;
175 struct ib_device *rr_device; 172 struct ib_device *rr_device;
176 struct rpcrdma_xprt *rr_rxprt; 173 struct rpcrdma_xprt *rr_rxprt;
@@ -204,11 +201,11 @@ struct rpcrdma_frmr {
204 struct scatterlist *sg; 201 struct scatterlist *sg;
205 int sg_nents; 202 int sg_nents;
206 struct ib_mr *fr_mr; 203 struct ib_mr *fr_mr;
204 struct ib_cqe fr_cqe;
207 enum rpcrdma_frmr_state fr_state; 205 enum rpcrdma_frmr_state fr_state;
206 struct completion fr_linv_done;
208 struct work_struct fr_work; 207 struct work_struct fr_work;
209 struct rpcrdma_xprt *fr_xprt; 208 struct rpcrdma_xprt *fr_xprt;
210 bool fr_waiter;
211 struct completion fr_linv_done;;
212 union { 209 union {
213 struct ib_reg_wr fr_regwr; 210 struct ib_reg_wr fr_regwr;
214 struct ib_send_wr fr_invwr; 211 struct ib_send_wr fr_invwr;
@@ -224,8 +221,7 @@ struct rpcrdma_mw {
224 union { 221 union {
225 struct rpcrdma_fmr fmr; 222 struct rpcrdma_fmr fmr;
226 struct rpcrdma_frmr frmr; 223 struct rpcrdma_frmr frmr;
227 } r; 224 };
228 void (*mw_sendcompletion)(struct ib_wc *);
229 struct list_head mw_list; 225 struct list_head mw_list;
230 struct list_head mw_all; 226 struct list_head mw_all;
231}; 227};
@@ -281,6 +277,7 @@ struct rpcrdma_req {
281 struct rpcrdma_regbuf *rl_sendbuf; 277 struct rpcrdma_regbuf *rl_sendbuf;
282 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; 278 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
283 279
280 struct ib_cqe rl_cqe;
284 struct list_head rl_all; 281 struct list_head rl_all;
285 bool rl_backchannel; 282 bool rl_backchannel;
286}; 283};
@@ -311,6 +308,7 @@ struct rpcrdma_buffer {
311 struct list_head rb_send_bufs; 308 struct list_head rb_send_bufs;
312 struct list_head rb_recv_bufs; 309 struct list_head rb_recv_bufs;
313 u32 rb_max_requests; 310 u32 rb_max_requests;
311 atomic_t rb_credits; /* most recent credit grant */
314 312
315 u32 rb_bc_srv_max_requests; 313 u32 rb_bc_srv_max_requests;
316 spinlock_t rb_reqslock; /* protect rb_allreqs */ 314 spinlock_t rb_reqslock; /* protect rb_allreqs */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0c2944fb9ae0..347cdc99ed09 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1973,8 +1973,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1973 1973
1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, 1974 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1975 NLM_F_MULTI, TIPC_NL_LINK_GET); 1975 NLM_F_MULTI, TIPC_NL_LINK_GET);
1976 if (!hdr) 1976 if (!hdr) {
1977 tipc_bcast_unlock(net);
1977 return -EMSGSIZE; 1978 return -EMSGSIZE;
1979 }
1978 1980
1979 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); 1981 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1980 if (!attrs) 1982 if (!attrs)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa97d9649a28..9d7a16fc5ca4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -346,12 +346,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
346 skb_queue_head_init(&n->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
347 for (i = 0; i < MAX_BEARERS; i++) 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock); 348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
351 if (n->addr < temp_node->addr)
352 break;
353 }
354 list_add_tail_rcu(&n->list, &temp_node->list);
355 n->state = SELF_DOWN_PEER_LEAVING; 349 n->state = SELF_DOWN_PEER_LEAVING;
356 n->signature = INVALID_NODE_SIG; 350 n->signature = INVALID_NODE_SIG;
357 n->active_links[0] = INVALID_BEARER_ID; 351 n->active_links[0] = INVALID_BEARER_ID;
@@ -372,6 +366,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
372 tipc_node_get(n); 366 tipc_node_get(n);
373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 367 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
374 n->keepalive_intv = U32_MAX; 368 n->keepalive_intv = U32_MAX;
369 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
370 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
371 if (n->addr < temp_node->addr)
372 break;
373 }
374 list_add_tail_rcu(&n->list, &temp_node->list);
375exit: 375exit:
376 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
377 return n; 377 return n;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 49d5093eb055..f75f847e688d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1496 UNIXCB(skb).fp = NULL; 1496 UNIXCB(skb).fp = NULL;
1497 1497
1498 for (i = scm->fp->count-1; i >= 0; i--) 1498 for (i = scm->fp->count-1; i >= 0; i--)
1499 unix_notinflight(scm->fp->fp[i]); 1499 unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1500} 1500}
1501 1501
1502static void unix_destruct_scm(struct sk_buff *skb) 1502static void unix_destruct_scm(struct sk_buff *skb)
@@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1561 return -ENOMEM; 1561 return -ENOMEM;
1562 1562
1563 for (i = scm->fp->count - 1; i >= 0; i--) 1563 for (i = scm->fp->count - 1; i >= 0; i--)
1564 unix_inflight(scm->fp->fp[i]); 1564 unix_inflight(scm->fp->user, scm->fp->fp[i]);
1565 return max_level; 1565 return max_level;
1566} 1566}
1567 1567
@@ -1781,7 +1781,12 @@ restart_locked:
1781 goto out_unlock; 1781 goto out_unlock;
1782 } 1782 }
1783 1783
1784 if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { 1784 /* other == sk && unix_peer(other) != sk if
1785 * - unix_peer(sk) == NULL, destination address bound to sk
1786 * - unix_peer(sk) == sk by time of get but disconnected before lock
1787 */
1788 if (other != sk &&
1789 unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
1785 if (timeo) { 1790 if (timeo) {
1786 timeo = unix_wait_for_peer(other, timeo); 1791 timeo = unix_wait_for_peer(other, timeo);
1787 1792
@@ -2277,13 +2282,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2277 size_t size = state->size; 2282 size_t size = state->size;
2278 unsigned int last_len; 2283 unsigned int last_len;
2279 2284
2280 err = -EINVAL; 2285 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2281 if (sk->sk_state != TCP_ESTABLISHED) 2286 err = -EINVAL;
2282 goto out; 2287 goto out;
2288 }
2283 2289
2284 err = -EOPNOTSUPP; 2290 if (unlikely(flags & MSG_OOB)) {
2285 if (flags & MSG_OOB) 2291 err = -EOPNOTSUPP;
2286 goto out; 2292 goto out;
2293 }
2287 2294
2288 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); 2295 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2289 timeo = sock_rcvtimeo(sk, noblock); 2296 timeo = sock_rcvtimeo(sk, noblock);
@@ -2305,6 +2312,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
2305 bool drop_skb; 2312 bool drop_skb;
2306 struct sk_buff *skb, *last; 2313 struct sk_buff *skb, *last;
2307 2314
2315redo:
2308 unix_state_lock(sk); 2316 unix_state_lock(sk);
2309 if (sock_flag(sk, SOCK_DEAD)) { 2317 if (sock_flag(sk, SOCK_DEAD)) {
2310 err = -ECONNRESET; 2318 err = -ECONNRESET;
@@ -2329,9 +2337,11 @@ again:
2329 goto unlock; 2337 goto unlock;
2330 2338
2331 unix_state_unlock(sk); 2339 unix_state_unlock(sk);
2332 err = -EAGAIN; 2340 if (!timeo) {
2333 if (!timeo) 2341 err = -EAGAIN;
2334 break; 2342 break;
2343 }
2344
2335 mutex_unlock(&u->readlock); 2345 mutex_unlock(&u->readlock);
2336 2346
2337 timeo = unix_stream_data_wait(sk, timeo, last, 2347 timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2344,7 +2354,7 @@ again:
2344 } 2354 }
2345 2355
2346 mutex_lock(&u->readlock); 2356 mutex_lock(&u->readlock);
2347 continue; 2357 goto redo;
2348unlock: 2358unlock:
2349 unix_state_unlock(sk); 2359 unix_state_unlock(sk);
2350 break; 2360 break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c512f64d5287..4d9679701a6d 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -220,7 +220,7 @@ done:
220 return skb->len; 220 return skb->len;
221} 221}
222 222
223static struct sock *unix_lookup_by_ino(int ino) 223static struct sock *unix_lookup_by_ino(unsigned int ino)
224{ 224{
225 int i; 225 int i;
226 struct sock *sk; 226 struct sock *sk;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 8fcdc2283af5..6a0d48525fcf 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
116 * descriptor if it is for an AF_UNIX socket. 116 * descriptor if it is for an AF_UNIX socket.
117 */ 117 */
118 118
119void unix_inflight(struct file *fp) 119void unix_inflight(struct user_struct *user, struct file *fp)
120{ 120{
121 struct sock *s = unix_get_socket(fp); 121 struct sock *s = unix_get_socket(fp);
122 122
@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
133 } 133 }
134 unix_tot_inflight++; 134 unix_tot_inflight++;
135 } 135 }
136 fp->f_cred->user->unix_inflight++; 136 user->unix_inflight++;
137 spin_unlock(&unix_gc_lock); 137 spin_unlock(&unix_gc_lock);
138} 138}
139 139
140void unix_notinflight(struct file *fp) 140void unix_notinflight(struct user_struct *user, struct file *fp)
141{ 141{
142 struct sock *s = unix_get_socket(fp); 142 struct sock *s = unix_get_socket(fp);
143 143
@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
152 list_del_init(&u->link); 152 list_del_init(&u->link);
153 unix_tot_inflight--; 153 unix_tot_inflight--;
154 } 154 }
155 fp->f_cred->user->unix_inflight--; 155 user->unix_inflight--;
156 spin_unlock(&unix_gc_lock); 156 spin_unlock(&unix_gc_lock);
157} 157}
158 158
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7fd1220fbfa0..bbe65dcb9738 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1557,8 +1557,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1557 if (err < 0) 1557 if (err < 0)
1558 goto out; 1558 goto out;
1559 1559
1560 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1561
1562 while (total_written < len) { 1560 while (total_written < len) {
1563 ssize_t written; 1561 ssize_t written;
1564 1562
@@ -1578,7 +1576,9 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1578 goto out_wait; 1576 goto out_wait;
1579 1577
1580 release_sock(sk); 1578 release_sock(sk);
1579 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1581 timeout = schedule_timeout(timeout); 1580 timeout = schedule_timeout(timeout);
1581 finish_wait(sk_sleep(sk), &wait);
1582 lock_sock(sk); 1582 lock_sock(sk);
1583 if (signal_pending(current)) { 1583 if (signal_pending(current)) {
1584 err = sock_intr_errno(timeout); 1584 err = sock_intr_errno(timeout);
@@ -1588,8 +1588,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1588 goto out_wait; 1588 goto out_wait;
1589 } 1589 }
1590 1590
1591 prepare_to_wait(sk_sleep(sk), &wait,
1592 TASK_INTERRUPTIBLE);
1593 } 1591 }
1594 1592
1595 /* These checks occur both as part of and after the loop 1593 /* These checks occur both as part of and after the loop
@@ -1635,7 +1633,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1635out_wait: 1633out_wait:
1636 if (total_written > 0) 1634 if (total_written > 0)
1637 err = total_written; 1635 err = total_written;
1638 finish_wait(sk_sleep(sk), &wait);
1639out: 1636out:
1640 release_sock(sk); 1637 release_sock(sk);
1641 return err; 1638 return err;
@@ -1716,7 +1713,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1716 if (err < 0) 1713 if (err < 0)
1717 goto out; 1714 goto out;
1718 1715
1719 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1720 1716
1721 while (1) { 1717 while (1) {
1722 s64 ready = vsock_stream_has_data(vsk); 1718 s64 ready = vsock_stream_has_data(vsk);
@@ -1727,7 +1723,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1727 */ 1723 */
1728 1724
1729 err = -ENOMEM; 1725 err = -ENOMEM;
1730 goto out_wait; 1726 goto out;
1731 } else if (ready > 0) { 1727 } else if (ready > 0) {
1732 ssize_t read; 1728 ssize_t read;
1733 1729
@@ -1750,7 +1746,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1750 vsk, target, read, 1746 vsk, target, read,
1751 !(flags & MSG_PEEK), &recv_data); 1747 !(flags & MSG_PEEK), &recv_data);
1752 if (err < 0) 1748 if (err < 0)
1753 goto out_wait; 1749 goto out;
1754 1750
1755 if (read >= target || flags & MSG_PEEK) 1751 if (read >= target || flags & MSG_PEEK)
1756 break; 1752 break;
@@ -1773,7 +1769,9 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1773 break; 1769 break;
1774 1770
1775 release_sock(sk); 1771 release_sock(sk);
1772 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1776 timeout = schedule_timeout(timeout); 1773 timeout = schedule_timeout(timeout);
1774 finish_wait(sk_sleep(sk), &wait);
1777 lock_sock(sk); 1775 lock_sock(sk);
1778 1776
1779 if (signal_pending(current)) { 1777 if (signal_pending(current)) {
@@ -1783,9 +1781,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1783 err = -EAGAIN; 1781 err = -EAGAIN;
1784 break; 1782 break;
1785 } 1783 }
1786
1787 prepare_to_wait(sk_sleep(sk), &wait,
1788 TASK_INTERRUPTIBLE);
1789 } 1784 }
1790 } 1785 }
1791 1786
@@ -1816,8 +1811,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1816 err = copied; 1811 err = copied;
1817 } 1812 }
1818 1813
1819out_wait:
1820 finish_wait(sk_sleep(sk), &wait);
1821out: 1814out:
1822 release_sock(sk); 1815 release_sock(sk);
1823 return err; 1816 return err;
diff --git a/scripts/prune-kernel b/scripts/prune-kernel
new file mode 100755
index 000000000000..ab5034e1d081
--- /dev/null
+++ b/scripts/prune-kernel
@@ -0,0 +1,20 @@
1#!/bin/bash
2
3# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and
4# again, /boot and /lib/modules/ eventually fill up.
5# Dumb script to purge that stuff:
6
7for f in "$@"
8do
9 if rpm -qf "/lib/modules/$f" >/dev/null; then
10 echo "keeping $f (installed from rpm)"
11 elif [ $(uname -r) = "$f" ]; then
12 echo "keeping $f (running kernel) "
13 else
14 echo "removing $f"
15 rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f"
16 rm -f "/boot/vmlinuz-$f" "/boot/config-$f"
17 rm -rf "/lib/modules/$f"
18 new-kernel-pkg --remove $f
19 fi
20done
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f7160253f17f..e6ea9d4b1de9 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -23,6 +23,7 @@
23#include <linux/integrity.h> 23#include <linux/integrity.h>
24#include <linux/evm.h> 24#include <linux/evm.h>
25#include <crypto/hash.h> 25#include <crypto/hash.h>
26#include <crypto/algapi.h>
26#include "evm.h" 27#include "evm.h"
27 28
28int evm_initialized; 29int evm_initialized;
@@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
148 xattr_value_len, calc.digest); 149 xattr_value_len, calc.digest);
149 if (rc) 150 if (rc)
150 break; 151 break;
151 rc = memcmp(xattr_data->digest, calc.digest, 152 rc = crypto_memneq(xattr_data->digest, calc.digest,
152 sizeof(calc.digest)); 153 sizeof(calc.digest));
153 if (rc) 154 if (rc)
154 rc = -EINVAL; 155 rc = -EINVAL;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f8110cfd80ff..f1ab71504e1d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3249,7 +3249,7 @@ static int selinux_inode_listsecurity(struct inode *inode, char *buffer, size_t
3249 3249
3250static void selinux_inode_getsecid(struct inode *inode, u32 *secid) 3250static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
3251{ 3251{
3252 struct inode_security_struct *isec = inode_security(inode); 3252 struct inode_security_struct *isec = inode_security_novalidate(inode);
3253 *secid = isec->sid; 3253 *secid = isec->sid;
3254} 3254}
3255 3255
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 2bbb41822d8e..8495b9368190 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -83,6 +83,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
83 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 83 { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
84 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 84 { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
85 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, 85 { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
86 { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
86}; 87};
87 88
88static struct nlmsg_perm nlmsg_xfrm_perms[] = 89static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index fadd3eb8e8bb..9106d8e2300e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
74static DEFINE_RWLOCK(snd_pcm_link_rwlock); 74static DEFINE_RWLOCK(snd_pcm_link_rwlock);
75static DECLARE_RWSEM(snd_pcm_link_rwsem); 75static DECLARE_RWSEM(snd_pcm_link_rwsem);
76 76
77/* Writer in rwsem may block readers even during its waiting in queue,
78 * and this may lead to a deadlock when the code path takes read sem
79 * twice (e.g. one in snd_pcm_action_nonatomic() and another in
80 * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
81 * spin until it gets the lock.
82 */
83static inline void down_write_nonblock(struct rw_semaphore *lock)
84{
85 while (!down_write_trylock(lock))
86 cond_resched();
87}
88
77/** 89/**
78 * snd_pcm_stream_lock - Lock the PCM stream 90 * snd_pcm_stream_lock - Lock the PCM stream
79 * @substream: PCM substream 91 * @substream: PCM substream
@@ -1813,7 +1825,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
1813 res = -ENOMEM; 1825 res = -ENOMEM;
1814 goto _nolock; 1826 goto _nolock;
1815 } 1827 }
1816 down_write(&snd_pcm_link_rwsem); 1828 down_write_nonblock(&snd_pcm_link_rwsem);
1817 write_lock_irq(&snd_pcm_link_rwlock); 1829 write_lock_irq(&snd_pcm_link_rwlock);
1818 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN || 1830 if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
1819 substream->runtime->status->state != substream1->runtime->status->state || 1831 substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
1860 struct snd_pcm_substream *s; 1872 struct snd_pcm_substream *s;
1861 int res = 0; 1873 int res = 0;
1862 1874
1863 down_write(&snd_pcm_link_rwsem); 1875 down_write_nonblock(&snd_pcm_link_rwsem);
1864 write_lock_irq(&snd_pcm_link_rwlock); 1876 write_lock_irq(&snd_pcm_link_rwlock);
1865 if (!snd_pcm_stream_linked(substream)) { 1877 if (!snd_pcm_stream_linked(substream)) {
1866 res = -EALREADY; 1878 res = -EALREADY;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 801076687bb1..c850345c43b5 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
383 383
384 if (snd_BUG_ON(!pool)) 384 if (snd_BUG_ON(!pool))
385 return -EINVAL; 385 return -EINVAL;
386 if (pool->ptr) /* should be atomic? */
387 return 0;
388 386
389 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 387 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
390 if (!pool->ptr) 388 if (!cellptr)
391 return -ENOMEM; 389 return -ENOMEM;
392 390
393 /* add new cells to the free cell list */ 391 /* add new cells to the free cell list */
394 spin_lock_irqsave(&pool->lock, flags); 392 spin_lock_irqsave(&pool->lock, flags);
393 if (pool->ptr) {
394 spin_unlock_irqrestore(&pool->lock, flags);
395 vfree(cellptr);
396 return 0;
397 }
398
399 pool->ptr = cellptr;
395 pool->free = NULL; 400 pool->free = NULL;
396 401
397 for (cell = 0; cell < pool->size; cell++) { 402 for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 921fb2bd8fad..fe686ee41c6d 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
535 bool is_src, bool ack) 535 bool is_src, bool ack)
536{ 536{
537 struct snd_seq_port_subs_info *grp; 537 struct snd_seq_port_subs_info *grp;
538 struct list_head *list;
539 bool empty;
538 540
539 grp = is_src ? &port->c_src : &port->c_dest; 541 grp = is_src ? &port->c_src : &port->c_dest;
542 list = is_src ? &subs->src_list : &subs->dest_list;
540 down_write(&grp->list_mutex); 543 down_write(&grp->list_mutex);
541 write_lock_irq(&grp->list_lock); 544 write_lock_irq(&grp->list_lock);
542 if (is_src) 545 empty = list_empty(list);
543 list_del(&subs->src_list); 546 if (!empty)
544 else 547 list_del_init(list);
545 list_del(&subs->dest_list);
546 grp->exclusive = 0; 548 grp->exclusive = 0;
547 write_unlock_irq(&grp->list_lock); 549 write_unlock_irq(&grp->list_lock);
548 up_write(&grp->list_mutex); 550 up_write(&grp->list_mutex);
549 551
550 unsubscribe_port(client, port, grp, &subs->info, ack); 552 if (!empty)
553 unsubscribe_port(client, port, grp, &subs->info, ack);
551} 554}
552 555
553/* connect two ports */ 556/* connect two ports */
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 9b513a05765a..dca817fc7894 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
422 spin_lock_irqsave(&timer->lock, flags); 422 spin_lock_irqsave(&timer->lock, flags);
423 list_for_each_entry(ts, &ti->slave_active_head, active_list) 423 list_for_each_entry(ts, &ti->slave_active_head, active_list)
424 if (ts->ccallback) 424 if (ts->ccallback)
425 ts->ccallback(ti, event + 100, &tstamp, resolution); 425 ts->ccallback(ts, event + 100, &tstamp, resolution);
426 spin_unlock_irqrestore(&timer->lock, flags); 426 spin_unlock_irqrestore(&timer->lock, flags);
427} 427}
428 428
@@ -518,9 +518,13 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
518 spin_unlock_irqrestore(&slave_active_lock, flags); 518 spin_unlock_irqrestore(&slave_active_lock, flags);
519 return -EBUSY; 519 return -EBUSY;
520 } 520 }
521 if (timeri->timer)
522 spin_lock(&timeri->timer->lock);
521 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; 523 timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
522 list_del_init(&timeri->ack_list); 524 list_del_init(&timeri->ack_list);
523 list_del_init(&timeri->active_list); 525 list_del_init(&timeri->active_list);
526 if (timeri->timer)
527 spin_unlock(&timeri->timer->lock);
524 spin_unlock_irqrestore(&slave_active_lock, flags); 528 spin_unlock_irqrestore(&slave_active_lock, flags);
525 goto __end; 529 goto __end;
526 } 530 }
@@ -1929,6 +1933,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1929{ 1933{
1930 struct snd_timer_user *tu; 1934 struct snd_timer_user *tu;
1931 long result = 0, unit; 1935 long result = 0, unit;
1936 int qhead;
1932 int err = 0; 1937 int err = 0;
1933 1938
1934 tu = file->private_data; 1939 tu = file->private_data;
@@ -1940,7 +1945,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1940 1945
1941 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { 1946 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
1942 err = -EAGAIN; 1947 err = -EAGAIN;
1943 break; 1948 goto _error;
1944 } 1949 }
1945 1950
1946 set_current_state(TASK_INTERRUPTIBLE); 1951 set_current_state(TASK_INTERRUPTIBLE);
@@ -1955,42 +1960,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1955 1960
1956 if (tu->disconnected) { 1961 if (tu->disconnected) {
1957 err = -ENODEV; 1962 err = -ENODEV;
1958 break; 1963 goto _error;
1959 } 1964 }
1960 if (signal_pending(current)) { 1965 if (signal_pending(current)) {
1961 err = -ERESTARTSYS; 1966 err = -ERESTARTSYS;
1962 break; 1967 goto _error;
1963 } 1968 }
1964 } 1969 }
1965 1970
1971 qhead = tu->qhead++;
1972 tu->qhead %= tu->queue_size;
1966 spin_unlock_irq(&tu->qlock); 1973 spin_unlock_irq(&tu->qlock);
1967 if (err < 0)
1968 goto _error;
1969 1974
1970 if (tu->tread) { 1975 if (tu->tread) {
1971 if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], 1976 if (copy_to_user(buffer, &tu->tqueue[qhead],
1972 sizeof(struct snd_timer_tread))) { 1977 sizeof(struct snd_timer_tread)))
1973 err = -EFAULT; 1978 err = -EFAULT;
1974 goto _error;
1975 }
1976 } else { 1979 } else {
1977 if (copy_to_user(buffer, &tu->queue[tu->qhead++], 1980 if (copy_to_user(buffer, &tu->queue[qhead],
1978 sizeof(struct snd_timer_read))) { 1981 sizeof(struct snd_timer_read)))
1979 err = -EFAULT; 1982 err = -EFAULT;
1980 goto _error;
1981 }
1982 } 1983 }
1983 1984
1984 tu->qhead %= tu->queue_size;
1985
1986 result += unit;
1987 buffer += unit;
1988
1989 spin_lock_irq(&tu->qlock); 1985 spin_lock_irq(&tu->qlock);
1990 tu->qused--; 1986 tu->qused--;
1987 if (err < 0)
1988 goto _error;
1989 result += unit;
1990 buffer += unit;
1991 } 1991 }
1992 spin_unlock_irq(&tu->qlock);
1993 _error: 1992 _error:
1993 spin_unlock_irq(&tu->qlock);
1994 return result > 0 ? result : err; 1994 return result > 0 ? result : err;
1995} 1995}
1996 1996
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index bde33308f0d6..c0f8f613f1f1 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
87module_param(fake_buffer, bool, 0444); 87module_param(fake_buffer, bool, 0444);
88MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); 88MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
89#ifdef CONFIG_HIGH_RES_TIMERS 89#ifdef CONFIG_HIGH_RES_TIMERS
90module_param(hrtimer, bool, 0444); 90module_param(hrtimer, bool, 0644);
91MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); 91MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
92#endif 92#endif
93 93
@@ -109,6 +109,9 @@ struct dummy_timer_ops {
109 snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); 109 snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
110}; 110};
111 111
112#define get_dummy_ops(substream) \
113 (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
114
112struct dummy_model { 115struct dummy_model {
113 const char *name; 116 const char *name;
114 int (*playback_constraints)(struct snd_pcm_runtime *runtime); 117 int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@ struct snd_dummy {
137 int iobox; 140 int iobox;
138 struct snd_kcontrol *cd_volume_ctl; 141 struct snd_kcontrol *cd_volume_ctl;
139 struct snd_kcontrol *cd_switch_ctl; 142 struct snd_kcontrol *cd_switch_ctl;
140 const struct dummy_timer_ops *timer_ops;
141}; 143};
142 144
143/* 145/*
@@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = {
231 */ 233 */
232 234
233struct dummy_systimer_pcm { 235struct dummy_systimer_pcm {
236 /* ops must be the first item */
237 const struct dummy_timer_ops *timer_ops;
234 spinlock_t lock; 238 spinlock_t lock;
235 struct timer_list timer; 239 struct timer_list timer;
236 unsigned long base_time; 240 unsigned long base_time;
@@ -366,6 +370,8 @@ static const struct dummy_timer_ops dummy_systimer_ops = {
366 */ 370 */
367 371
368struct dummy_hrtimer_pcm { 372struct dummy_hrtimer_pcm {
373 /* ops must be the first item */
374 const struct dummy_timer_ops *timer_ops;
369 ktime_t base_time; 375 ktime_t base_time;
370 ktime_t period_time; 376 ktime_t period_time;
371 atomic_t running; 377 atomic_t running;
@@ -492,31 +498,25 @@ static const struct dummy_timer_ops dummy_hrtimer_ops = {
492 498
493static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 499static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
494{ 500{
495 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
496
497 switch (cmd) { 501 switch (cmd) {
498 case SNDRV_PCM_TRIGGER_START: 502 case SNDRV_PCM_TRIGGER_START:
499 case SNDRV_PCM_TRIGGER_RESUME: 503 case SNDRV_PCM_TRIGGER_RESUME:
500 return dummy->timer_ops->start(substream); 504 return get_dummy_ops(substream)->start(substream);
501 case SNDRV_PCM_TRIGGER_STOP: 505 case SNDRV_PCM_TRIGGER_STOP:
502 case SNDRV_PCM_TRIGGER_SUSPEND: 506 case SNDRV_PCM_TRIGGER_SUSPEND:
503 return dummy->timer_ops->stop(substream); 507 return get_dummy_ops(substream)->stop(substream);
504 } 508 }
505 return -EINVAL; 509 return -EINVAL;
506} 510}
507 511
508static int dummy_pcm_prepare(struct snd_pcm_substream *substream) 512static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
509{ 513{
510 struct snd_dummy *dummy = snd_pcm_substream_chip(substream); 514 return get_dummy_ops(substream)->prepare(substream);
511
512 return dummy->timer_ops->prepare(substream);
513} 515}
514 516
515static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream) 517static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
516{ 518{
517 struct snd_dummy *dummy = snd_pcm_substream_chip(substream); 519 return get_dummy_ops(substream)->pointer(substream);
518
519 return dummy->timer_ops->pointer(substream);
520} 520}
521 521
522static struct snd_pcm_hardware dummy_pcm_hardware = { 522static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
562 struct snd_dummy *dummy = snd_pcm_substream_chip(substream); 562 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
563 struct dummy_model *model = dummy->model; 563 struct dummy_model *model = dummy->model;
564 struct snd_pcm_runtime *runtime = substream->runtime; 564 struct snd_pcm_runtime *runtime = substream->runtime;
565 const struct dummy_timer_ops *ops;
565 int err; 566 int err;
566 567
567 dummy->timer_ops = &dummy_systimer_ops; 568 ops = &dummy_systimer_ops;
568#ifdef CONFIG_HIGH_RES_TIMERS 569#ifdef CONFIG_HIGH_RES_TIMERS
569 if (hrtimer) 570 if (hrtimer)
570 dummy->timer_ops = &dummy_hrtimer_ops; 571 ops = &dummy_hrtimer_ops;
571#endif 572#endif
572 573
573 err = dummy->timer_ops->create(substream); 574 err = ops->create(substream);
574 if (err < 0) 575 if (err < 0)
575 return err; 576 return err;
577 get_dummy_ops(substream) = ops;
576 578
577 runtime->hw = dummy->pcm_hw; 579 runtime->hw = dummy->pcm_hw;
578 if (substream->pcm->device & 1) { 580 if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
594 err = model->capture_constraints(substream->runtime); 596 err = model->capture_constraints(substream->runtime);
595 } 597 }
596 if (err < 0) { 598 if (err < 0) {
597 dummy->timer_ops->free(substream); 599 get_dummy_ops(substream)->free(substream);
598 return err; 600 return err;
599 } 601 }
600 return 0; 602 return 0;
@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
602 604
603static int dummy_pcm_close(struct snd_pcm_substream *substream) 605static int dummy_pcm_close(struct snd_pcm_substream *substream)
604{ 606{
605 struct snd_dummy *dummy = snd_pcm_substream_chip(substream); 607 get_dummy_ops(substream)->free(substream);
606 dummy->timer_ops->free(substream);
607 return 0; 608 return 0;
608} 609}
609 610
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index b02a5e8cad44..0ac92aba5bc1 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -63,7 +63,7 @@ struct amdtp_dot {
63#define BYTE_PER_SAMPLE (4) 63#define BYTE_PER_SAMPLE (4)
64#define MAGIC_DOT_BYTE (2) 64#define MAGIC_DOT_BYTE (2)
65#define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE) 65#define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE)
66static const u8 dot_scrt(const u8 idx, const unsigned int off) 66static u8 dot_scrt(const u8 idx, const unsigned int off)
67{ 67{
68 /* 68 /*
69 * the length of the added pattern only depends on the lower nibble 69 * the length of the added pattern only depends on the lower nibble
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 904ce0329fa1..040a96d1ba8e 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -230,6 +230,7 @@ int snd_tscm_transaction_register(struct snd_tscm *tscm)
230 return err; 230 return err;
231error: 231error:
232 fw_core_remove_address_handler(&tscm->async_handler); 232 fw_core_remove_address_handler(&tscm->async_handler);
233 tscm->async_handler.callback_data = NULL;
233 return err; 234 return err;
234} 235}
235 236
@@ -276,6 +277,9 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
276 __be32 reg; 277 __be32 reg;
277 unsigned int i; 278 unsigned int i;
278 279
280 if (tscm->async_handler.callback_data == NULL)
281 return;
282
279 /* Turn off FireWire LED. */ 283 /* Turn off FireWire LED. */
280 reg = cpu_to_be32(0x0000008e); 284 reg = cpu_to_be32(0x0000008e);
281 snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST, 285 snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST,
@@ -297,6 +301,8 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
297 &reg, sizeof(reg), 0); 301 &reg, sizeof(reg), 0);
298 302
299 fw_core_remove_address_handler(&tscm->async_handler); 303 fw_core_remove_address_handler(&tscm->async_handler);
304 tscm->async_handler.callback_data = NULL;
305
300 for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++) 306 for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++)
301 snd_fw_async_midi_port_destroy(&tscm->out_ports[i]); 307 snd_fw_async_midi_port_destroy(&tscm->out_ports[i]);
302} 308}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index ee0bc1839508..e281c338e562 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -21,7 +21,6 @@ static struct snd_tscm_spec model_specs[] = {
21 .pcm_playback_analog_channels = 8, 21 .pcm_playback_analog_channels = 8,
22 .midi_capture_ports = 4, 22 .midi_capture_ports = 4,
23 .midi_playback_ports = 4, 23 .midi_playback_ports = 4,
24 .is_controller = true,
25 }, 24 },
26 { 25 {
27 .name = "FW-1082", 26 .name = "FW-1082",
@@ -31,9 +30,16 @@ static struct snd_tscm_spec model_specs[] = {
31 .pcm_playback_analog_channels = 2, 30 .pcm_playback_analog_channels = 2,
32 .midi_capture_ports = 2, 31 .midi_capture_ports = 2,
33 .midi_playback_ports = 2, 32 .midi_playback_ports = 2,
34 .is_controller = true,
35 }, 33 },
36 /* FW-1804 may be supported. */ 34 {
35 .name = "FW-1804",
36 .has_adat = true,
37 .has_spdif = true,
38 .pcm_capture_analog_channels = 8,
39 .pcm_playback_analog_channels = 2,
40 .midi_capture_ports = 2,
41 .midi_playback_ports = 4,
42 },
37}; 43};
38 44
39static int identify_model(struct snd_tscm *tscm) 45static int identify_model(struct snd_tscm *tscm)
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 2d028d2bd3bd..30ab77e924f7 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -39,7 +39,6 @@ struct snd_tscm_spec {
39 unsigned int pcm_playback_analog_channels; 39 unsigned int pcm_playback_analog_channels;
40 unsigned int midi_capture_ports; 40 unsigned int midi_capture_ports;
41 unsigned int midi_playback_ports; 41 unsigned int midi_playback_ports;
42 bool is_controller;
43}; 42};
44 43
45#define TSCM_MIDI_IN_PORT_MAX 4 44#define TSCM_MIDI_IN_PORT_MAX 4
@@ -72,9 +71,6 @@ struct snd_tscm {
72 struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX]; 71 struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
73 u8 running_status[TSCM_MIDI_OUT_PORT_MAX]; 72 u8 running_status[TSCM_MIDI_OUT_PORT_MAX];
74 bool on_sysex[TSCM_MIDI_OUT_PORT_MAX]; 73 bool on_sysex[TSCM_MIDI_OUT_PORT_MAX];
75
76 /* For control messages. */
77 struct snd_firewire_tascam_status *status;
78}; 74};
79 75
80#define TSCM_ADDR_BASE 0xffff00000000ull 76#define TSCM_ADDR_BASE 0xffff00000000ull
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index b5a17cb510a0..8c486235c905 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -426,18 +426,22 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_stop_chip);
426 * @bus: HD-audio core bus 426 * @bus: HD-audio core bus
427 * @status: INTSTS register value 427 * @status: INTSTS register value
428 * @ask: callback to be called for woken streams 428 * @ask: callback to be called for woken streams
429 *
430 * Returns the bits of handled streams, or zero if no stream is handled.
429 */ 431 */
430void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status, 432int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
431 void (*ack)(struct hdac_bus *, 433 void (*ack)(struct hdac_bus *,
432 struct hdac_stream *)) 434 struct hdac_stream *))
433{ 435{
434 struct hdac_stream *azx_dev; 436 struct hdac_stream *azx_dev;
435 u8 sd_status; 437 u8 sd_status;
438 int handled = 0;
436 439
437 list_for_each_entry(azx_dev, &bus->stream_list, list) { 440 list_for_each_entry(azx_dev, &bus->stream_list, list) {
438 if (status & azx_dev->sd_int_sta_mask) { 441 if (status & azx_dev->sd_int_sta_mask) {
439 sd_status = snd_hdac_stream_readb(azx_dev, SD_STS); 442 sd_status = snd_hdac_stream_readb(azx_dev, SD_STS);
440 snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); 443 snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK);
444 handled |= 1 << azx_dev->index;
441 if (!azx_dev->substream || !azx_dev->running || 445 if (!azx_dev->substream || !azx_dev->running ||
442 !(sd_status & SD_INT_COMPLETE)) 446 !(sd_status & SD_INT_COMPLETE))
443 continue; 447 continue;
@@ -445,6 +449,7 @@ void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
445 ack(bus, azx_dev); 449 ack(bus, azx_dev);
446 } 450 }
447 } 451 }
452 return handled;
448} 453}
449EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq); 454EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq);
450 455
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 37cf9cee9835..27de8015717d 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -930,6 +930,8 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
930 struct azx *chip = dev_id; 930 struct azx *chip = dev_id;
931 struct hdac_bus *bus = azx_bus(chip); 931 struct hdac_bus *bus = azx_bus(chip);
932 u32 status; 932 u32 status;
933 bool active, handled = false;
934 int repeat = 0; /* count for avoiding endless loop */
933 935
934#ifdef CONFIG_PM 936#ifdef CONFIG_PM
935 if (azx_has_pm_runtime(chip)) 937 if (azx_has_pm_runtime(chip))
@@ -939,33 +941,36 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
939 941
940 spin_lock(&bus->reg_lock); 942 spin_lock(&bus->reg_lock);
941 943
942 if (chip->disabled) { 944 if (chip->disabled)
943 spin_unlock(&bus->reg_lock); 945 goto unlock;
944 return IRQ_NONE;
945 }
946
947 status = azx_readl(chip, INTSTS);
948 if (status == 0 || status == 0xffffffff) {
949 spin_unlock(&bus->reg_lock);
950 return IRQ_NONE;
951 }
952 946
953 snd_hdac_bus_handle_stream_irq(bus, status, stream_update); 947 do {
948 status = azx_readl(chip, INTSTS);
949 if (status == 0 || status == 0xffffffff)
950 break;
954 951
955 /* clear rirb int */ 952 handled = true;
956 status = azx_readb(chip, RIRBSTS); 953 active = false;
957 if (status & RIRB_INT_MASK) { 954 if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
958 if (status & RIRB_INT_RESPONSE) { 955 active = true;
959 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) 956
960 udelay(80); 957 /* clear rirb int */
961 snd_hdac_bus_update_rirb(bus); 958 status = azx_readb(chip, RIRBSTS);
959 if (status & RIRB_INT_MASK) {
960 active = true;
961 if (status & RIRB_INT_RESPONSE) {
962 if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
963 udelay(80);
964 snd_hdac_bus_update_rirb(bus);
965 }
966 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
962 } 967 }
963 azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); 968 } while (active && ++repeat < 10);
964 }
965 969
970 unlock:
966 spin_unlock(&bus->reg_lock); 971 spin_unlock(&bus->reg_lock);
967 972
968 return IRQ_HANDLED; 973 return IRQ_RETVAL(handled);
969} 974}
970EXPORT_SYMBOL_GPL(azx_interrupt); 975EXPORT_SYMBOL_GPL(azx_interrupt);
971 976
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 30c8efe0f80a..7ca5b89f088a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4028,9 +4028,9 @@ static void pin_power_callback(struct hda_codec *codec,
4028 struct hda_jack_callback *jack, 4028 struct hda_jack_callback *jack,
4029 bool on) 4029 bool on)
4030{ 4030{
4031 if (jack && jack->tbl->nid) 4031 if (jack && jack->nid)
4032 sync_power_state_change(codec, 4032 sync_power_state_change(codec,
4033 set_pin_power_jack(codec, jack->tbl->nid, on)); 4033 set_pin_power_jack(codec, jack->nid, on));
4034} 4034}
4035 4035
4036/* callback only doing power up -- called at first */ 4036/* callback only doing power up -- called at first */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4045dca3d699..e5240cb3749f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -363,7 +363,10 @@ enum {
363 ((pci)->device == 0x0d0c) || \ 363 ((pci)->device == 0x0d0c) || \
364 ((pci)->device == 0x160c)) 364 ((pci)->device == 0x160c))
365 365
366#define IS_BROXTON(pci) ((pci)->device == 0x5a98) 366#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
369#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
367 370
368static char *driver_short_names[] = { 371static char *driver_short_names[] = {
369 [AZX_DRIVER_ICH] = "HDA Intel", 372 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -540,13 +543,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
540 543
541 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 544 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
542 snd_hdac_set_codec_wakeup(bus, true); 545 snd_hdac_set_codec_wakeup(bus, true);
543 if (IS_BROXTON(pci)) { 546 if (IS_SKL_PLUS(pci)) {
544 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); 547 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
545 val = val & ~INTEL_HDA_CGCTL_MISCBDCGE; 548 val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
546 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); 549 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
547 } 550 }
548 azx_init_chip(chip, full_reset); 551 azx_init_chip(chip, full_reset);
549 if (IS_BROXTON(pci)) { 552 if (IS_SKL_PLUS(pci)) {
550 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); 553 pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
551 val = val | INTEL_HDA_CGCTL_MISCBDCGE; 554 val = val | INTEL_HDA_CGCTL_MISCBDCGE;
552 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); 555 pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -555,7 +558,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
555 snd_hdac_set_codec_wakeup(bus, false); 558 snd_hdac_set_codec_wakeup(bus, false);
556 559
557 /* reduce dma latency to avoid noise */ 560 /* reduce dma latency to avoid noise */
558 if (IS_BROXTON(pci)) 561 if (IS_BXT(pci))
559 bxt_reduce_dma_latency(chip); 562 bxt_reduce_dma_latency(chip);
560} 563}
561 564
@@ -977,11 +980,6 @@ static int azx_resume(struct device *dev)
977/* put codec down to D3 at hibernation for Intel SKL+; 980/* put codec down to D3 at hibernation for Intel SKL+;
978 * otherwise BIOS may still access the codec and screw up the driver 981 * otherwise BIOS may still access the codec and screw up the driver
979 */ 982 */
980#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
981#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
982#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
983#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
984
985static int azx_freeze_noirq(struct device *dev) 983static int azx_freeze_noirq(struct device *dev)
986{ 984{
987 struct pci_dev *pci = to_pci_dev(dev); 985 struct pci_dev *pci = to_pci_dev(dev);
@@ -2168,10 +2166,10 @@ static void azx_remove(struct pci_dev *pci)
2168 struct hda_intel *hda; 2166 struct hda_intel *hda;
2169 2167
2170 if (card) { 2168 if (card) {
2171 /* flush the pending probing work */ 2169 /* cancel the pending probing work */
2172 chip = card->private_data; 2170 chip = card->private_data;
2173 hda = container_of(chip, struct hda_intel, chip); 2171 hda = container_of(chip, struct hda_intel, chip);
2174 flush_work(&hda->probe_work); 2172 cancel_work_sync(&hda->probe_work);
2175 2173
2176 snd_card_free(card); 2174 snd_card_free(card);
2177 } 2175 }
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index c945e257d368..a33234e04d4f 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
259 if (!callback) 259 if (!callback)
260 return ERR_PTR(-ENOMEM); 260 return ERR_PTR(-ENOMEM);
261 callback->func = func; 261 callback->func = func;
262 callback->tbl = jack; 262 callback->nid = jack->nid;
263 callback->next = jack->callback; 263 callback->next = jack->callback;
264 jack->callback = callback; 264 jack->callback = callback;
265 } 265 }
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 858708a044f5..e9814c0168ea 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -21,7 +21,7 @@ struct hda_jack_callback;
21typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *); 21typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
22 22
23struct hda_jack_callback { 23struct hda_jack_callback {
24 struct hda_jack_tbl *tbl; 24 hda_nid_t nid;
25 hda_jack_callback_fn func; 25 hda_jack_callback_fn func;
26 unsigned int private_data; /* arbitrary data */ 26 unsigned int private_data; /* arbitrary data */
27 struct hda_jack_callback *next; 27 struct hda_jack_callback *next;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4ef2259f88ca..9ceb2bc36e68 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
4427static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) 4427static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
4428{ 4428{
4429 struct ca0132_spec *spec = codec->spec; 4429 struct ca0132_spec *spec = codec->spec;
4430 struct hda_jack_tbl *tbl;
4430 4431
4431 /* Delay enabling the HP amp, to let the mic-detection 4432 /* Delay enabling the HP amp, to let the mic-detection
4432 * state machine run. 4433 * state machine run.
4433 */ 4434 */
4434 cancel_delayed_work_sync(&spec->unsol_hp_work); 4435 cancel_delayed_work_sync(&spec->unsol_hp_work);
4435 schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); 4436 schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
4436 cb->tbl->block_report = 1; 4437 tbl = snd_hda_jack_tbl_get(codec, cb->nid);
4438 if (tbl)
4439 tbl->block_report = 1;
4437} 4440}
4438 4441
4439static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) 4442static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 1f52b55d77c9..8ee78dbd4c60 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -448,7 +448,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
448 eld = &per_pin->sink_eld; 448 eld = &per_pin->sink_eld;
449 449
450 mutex_lock(&per_pin->lock); 450 mutex_lock(&per_pin->lock);
451 if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) { 451 if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
452 eld->eld_size > ELD_MAX_SIZE) {
452 mutex_unlock(&per_pin->lock); 453 mutex_unlock(&per_pin->lock);
453 snd_BUG(); 454 snd_BUG();
454 return -EINVAL; 455 return -EINVAL;
@@ -1193,7 +1194,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
1193static void jack_callback(struct hda_codec *codec, 1194static void jack_callback(struct hda_codec *codec,
1194 struct hda_jack_callback *jack) 1195 struct hda_jack_callback *jack)
1195{ 1196{
1196 check_presence_and_report(codec, jack->tbl->nid); 1197 check_presence_and_report(codec, jack->nid);
1197} 1198}
1198 1199
1199static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) 1200static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 21992fb7035d..1f357cd72d9c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
282 uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); 282 uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
283 if (!uctl) 283 if (!uctl)
284 return; 284 return;
285 val = snd_hda_codec_read(codec, jack->tbl->nid, 0, 285 val = snd_hda_codec_read(codec, jack->nid, 0,
286 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0); 286 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
287 val &= HDA_AMP_VOLMASK; 287 val &= HDA_AMP_VOLMASK;
288 uctl->value.integer.value[0] = val; 288 uctl->value.integer.value[0] = val;
@@ -1787,7 +1787,6 @@ enum {
1787 ALC882_FIXUP_NO_PRIMARY_HP, 1787 ALC882_FIXUP_NO_PRIMARY_HP,
1788 ALC887_FIXUP_ASUS_BASS, 1788 ALC887_FIXUP_ASUS_BASS,
1789 ALC887_FIXUP_BASS_CHMAP, 1789 ALC887_FIXUP_BASS_CHMAP,
1790 ALC882_FIXUP_DISABLE_AAMIX,
1791}; 1790};
1792 1791
1793static void alc889_fixup_coef(struct hda_codec *codec, 1792static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1949,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
1949 1948
1950static void alc_fixup_bass_chmap(struct hda_codec *codec, 1949static void alc_fixup_bass_chmap(struct hda_codec *codec,
1951 const struct hda_fixup *fix, int action); 1950 const struct hda_fixup *fix, int action);
1952static void alc_fixup_disable_aamix(struct hda_codec *codec,
1953 const struct hda_fixup *fix, int action);
1954 1951
1955static const struct hda_fixup alc882_fixups[] = { 1952static const struct hda_fixup alc882_fixups[] = {
1956 [ALC882_FIXUP_ABIT_AW9D_MAX] = { 1953 [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2188,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = {
2188 .type = HDA_FIXUP_FUNC, 2185 .type = HDA_FIXUP_FUNC,
2189 .v.func = alc_fixup_bass_chmap, 2186 .v.func = alc_fixup_bass_chmap,
2190 }, 2187 },
2191 [ALC882_FIXUP_DISABLE_AAMIX] = {
2192 .type = HDA_FIXUP_FUNC,
2193 .v.func = alc_fixup_disable_aamix,
2194 },
2195}; 2188};
2196 2189
2197static const struct snd_pci_quirk alc882_fixup_tbl[] = { 2190static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2230,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2230 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), 2223 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
2231 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), 2224 SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
2232 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), 2225 SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
2226 SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
2233 2227
2234 /* All Apple entries are in codec SSIDs */ 2228 /* All Apple entries are in codec SSIDs */
2235 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), 2229 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2259,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2259 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2253 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
2260 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2254 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2261 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2255 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
2262 SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
2263 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2256 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2264 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2257 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2265 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), 2258 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -3808,6 +3801,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3808 3801
3809static void alc_headset_mode_default(struct hda_codec *codec) 3802static void alc_headset_mode_default(struct hda_codec *codec)
3810{ 3803{
3804 static struct coef_fw coef0225[] = {
3805 UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
3806 {}
3807 };
3811 static struct coef_fw coef0255[] = { 3808 static struct coef_fw coef0255[] = {
3812 WRITE_COEF(0x45, 0xc089), 3809 WRITE_COEF(0x45, 0xc089),
3813 WRITE_COEF(0x45, 0xc489), 3810 WRITE_COEF(0x45, 0xc489),
@@ -3849,6 +3846,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3849 }; 3846 };
3850 3847
3851 switch (codec->core.vendor_id) { 3848 switch (codec->core.vendor_id) {
3849 case 0x10ec0225:
3850 alc_process_coef_fw(codec, coef0225);
3851 break;
3852 case 0x10ec0255: 3852 case 0x10ec0255:
3853 case 0x10ec0256: 3853 case 0x10ec0256:
3854 alc_process_coef_fw(codec, coef0255); 3854 alc_process_coef_fw(codec, coef0255);
@@ -4756,6 +4756,9 @@ enum {
4756 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE, 4756 ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
4757 ALC293_FIXUP_LENOVO_SPK_NOISE, 4757 ALC293_FIXUP_LENOVO_SPK_NOISE,
4758 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 4758 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
4759 ALC255_FIXUP_DELL_SPK_NOISE,
4760 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
4761 ALC280_FIXUP_HP_HEADSET_MIC,
4759}; 4762};
4760 4763
4761static const struct hda_fixup alc269_fixups[] = { 4764static const struct hda_fixup alc269_fixups[] = {
@@ -5375,6 +5378,29 @@ static const struct hda_fixup alc269_fixups[] = {
5375 .type = HDA_FIXUP_FUNC, 5378 .type = HDA_FIXUP_FUNC,
5376 .v.func = alc233_fixup_lenovo_line2_mic_hotkey, 5379 .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
5377 }, 5380 },
5381 [ALC255_FIXUP_DELL_SPK_NOISE] = {
5382 .type = HDA_FIXUP_FUNC,
5383 .v.func = alc_fixup_disable_aamix,
5384 .chained = true,
5385 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
5386 },
5387 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
5388 .type = HDA_FIXUP_VERBS,
5389 .v.verbs = (const struct hda_verb[]) {
5390 /* Disable pass-through path for FRONT 14h */
5391 { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
5392 { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
5393 {}
5394 },
5395 .chained = true,
5396 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
5397 },
5398 [ALC280_FIXUP_HP_HEADSET_MIC] = {
5399 .type = HDA_FIXUP_FUNC,
5400 .v.func = alc_fixup_disable_aamix,
5401 .chained = true,
5402 .chain_id = ALC269_FIXUP_HEADSET_MIC,
5403 },
5378}; 5404};
5379 5405
5380static const struct snd_pci_quirk alc269_fixup_tbl[] = { 5406static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5417,6 +5443,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5417 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5443 SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5418 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK), 5444 SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
5419 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), 5445 SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
5446 SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
5420 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5447 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5421 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5448 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5422 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5449 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5477,6 +5504,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5477 SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5504 SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5478 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5505 SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5479 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5506 SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5507 SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
5480 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5508 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5481 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5509 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5482 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5510 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5645,10 +5673,10 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5645 {0x21, 0x03211020} 5673 {0x21, 0x03211020}
5646 5674
5647static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { 5675static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5648 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 5676 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5649 ALC225_STANDARD_PINS, 5677 ALC225_STANDARD_PINS,
5650 {0x14, 0x901701a0}), 5678 {0x14, 0x901701a0}),
5651 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, 5679 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5652 ALC225_STANDARD_PINS, 5680 ALC225_STANDARD_PINS,
5653 {0x14, 0x901701b0}), 5681 {0x14, 0x901701b0}),
5654 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, 5682 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 2c7c5eb8b1e9..37b70f8e878f 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
493 if (!spec->num_pwrs) 493 if (!spec->num_pwrs)
494 return; 494 return;
495 495
496 if (jack && jack->tbl->nid) { 496 if (jack && jack->nid) {
497 stac_toggle_power_map(codec, jack->tbl->nid, 497 stac_toggle_power_map(codec, jack->nid,
498 snd_hda_jack_detect(codec, jack->tbl->nid), 498 snd_hda_jack_detect(codec, jack->nid),
499 true); 499 true);
500 return; 500 return;
501 } 501 }
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 3191e0a7d273..d1fb035f44db 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -635,6 +635,7 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
635 SNDRV_PCM_HW_PARAM_PERIODS); 635 SNDRV_PCM_HW_PARAM_PERIODS);
636 if (ret < 0) { 636 if (ret < 0) {
637 dev_err(prtd->platform->dev, "set integer constraint failed\n"); 637 dev_err(prtd->platform->dev, "set integer constraint failed\n");
638 kfree(adata);
638 return ret; 639 return ret;
639 } 640 }
640 641
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 33143fe1de0b..91785318b283 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1929,6 +1929,25 @@ static struct {
1929 { 1000000, 13500000, 0, 1 }, 1929 { 1000000, 13500000, 0, 1 },
1930}; 1930};
1931 1931
1932static const unsigned int pseudo_fref_max[ARIZONA_FLL_MAX_FRATIO] = {
1933 13500000,
1934 6144000,
1935 6144000,
1936 3072000,
1937 3072000,
1938 2822400,
1939 2822400,
1940 1536000,
1941 1536000,
1942 1536000,
1943 1536000,
1944 1536000,
1945 1536000,
1946 1536000,
1947 1536000,
1948 768000,
1949};
1950
1932static struct { 1951static struct {
1933 unsigned int min; 1952 unsigned int min;
1934 unsigned int max; 1953 unsigned int max;
@@ -2042,16 +2061,32 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
2042 /* Adjust FRATIO/refdiv to avoid integer mode if possible */ 2061 /* Adjust FRATIO/refdiv to avoid integer mode if possible */
2043 refdiv = cfg->refdiv; 2062 refdiv = cfg->refdiv;
2044 2063
2064 arizona_fll_dbg(fll, "pseudo: initial ratio=%u fref=%u refdiv=%u\n",
2065 init_ratio, Fref, refdiv);
2066
2045 while (div <= ARIZONA_FLL_MAX_REFDIV) { 2067 while (div <= ARIZONA_FLL_MAX_REFDIV) {
2046 for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO; 2068 for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO;
2047 ratio++) { 2069 ratio++) {
2048 if ((ARIZONA_FLL_VCO_CORNER / 2) / 2070 if ((ARIZONA_FLL_VCO_CORNER / 2) /
2049 (fll->vco_mult * ratio) < Fref) 2071 (fll->vco_mult * ratio) < Fref) {
2072 arizona_fll_dbg(fll, "pseudo: hit VCO corner\n");
2050 break; 2073 break;
2074 }
2075
2076 if (Fref > pseudo_fref_max[ratio - 1]) {
2077 arizona_fll_dbg(fll,
2078 "pseudo: exceeded max fref(%u) for ratio=%u\n",
2079 pseudo_fref_max[ratio - 1],
2080 ratio);
2081 break;
2082 }
2051 2083
2052 if (target % (ratio * Fref)) { 2084 if (target % (ratio * Fref)) {
2053 cfg->refdiv = refdiv; 2085 cfg->refdiv = refdiv;
2054 cfg->fratio = ratio - 1; 2086 cfg->fratio = ratio - 1;
2087 arizona_fll_dbg(fll,
2088 "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
2089 Fref, refdiv, div, ratio);
2055 return ratio; 2090 return ratio;
2056 } 2091 }
2057 } 2092 }
@@ -2060,6 +2095,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
2060 if (target % (ratio * Fref)) { 2095 if (target % (ratio * Fref)) {
2061 cfg->refdiv = refdiv; 2096 cfg->refdiv = refdiv;
2062 cfg->fratio = ratio - 1; 2097 cfg->fratio = ratio - 1;
2098 arizona_fll_dbg(fll,
2099 "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
2100 Fref, refdiv, div, ratio);
2063 return ratio; 2101 return ratio;
2064 } 2102 }
2065 } 2103 }
@@ -2068,6 +2106,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
2068 Fref /= 2; 2106 Fref /= 2;
2069 refdiv++; 2107 refdiv++;
2070 init_ratio = arizona_find_fratio(Fref, NULL); 2108 init_ratio = arizona_find_fratio(Fref, NULL);
2109 arizona_fll_dbg(fll,
2110 "pseudo: change fref=%u refdiv=%d(%d) ratio=%u\n",
2111 Fref, refdiv, div, init_ratio);
2071 } 2112 }
2072 2113
2073 arizona_fll_warn(fll, "Falling back to integer mode operation\n"); 2114 arizona_fll_warn(fll, "Falling back to integer mode operation\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index bc08f0c5a5f6..1bd31644a782 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -266,6 +266,8 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
266 } else { 266 } else {
267 *mic = false; 267 *mic = false;
268 regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20); 268 regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20);
269 regmap_update_bits(rt286->regmap,
270 RT286_CBJ_CTRL1, 0x0400, 0x0000);
269 } 271 }
270 } else { 272 } else {
271 regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf); 273 regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
@@ -470,24 +472,6 @@ static int rt286_set_dmic1_event(struct snd_soc_dapm_widget *w,
470 return 0; 472 return 0;
471} 473}
472 474
473static int rt286_vref_event(struct snd_soc_dapm_widget *w,
474 struct snd_kcontrol *kcontrol, int event)
475{
476 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
477
478 switch (event) {
479 case SND_SOC_DAPM_PRE_PMU:
480 snd_soc_update_bits(codec,
481 RT286_CBJ_CTRL1, 0x0400, 0x0000);
482 mdelay(50);
483 break;
484 default:
485 return 0;
486 }
487
488 return 0;
489}
490
491static int rt286_ldo2_event(struct snd_soc_dapm_widget *w, 475static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
492 struct snd_kcontrol *kcontrol, int event) 476 struct snd_kcontrol *kcontrol, int event)
493{ 477{
@@ -536,7 +520,7 @@ static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = {
536 SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1, 520 SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1,
537 12, 1, NULL, 0), 521 12, 1, NULL, 0),
538 SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1, 522 SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1,
539 0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU), 523 0, 1, NULL, 0),
540 SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2, 524 SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2,
541 2, 0, NULL, 0), 525 2, 0, NULL, 0),
542 SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1, 526 SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1,
@@ -911,8 +895,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec,
911 case SND_SOC_BIAS_ON: 895 case SND_SOC_BIAS_ON:
912 mdelay(10); 896 mdelay(10);
913 snd_soc_update_bits(codec, 897 snd_soc_update_bits(codec,
914 RT286_CBJ_CTRL1, 0x0400, 0x0400);
915 snd_soc_update_bits(codec,
916 RT286_DC_GAIN, 0x200, 0x0); 898 RT286_DC_GAIN, 0x200, 0x0);
917 899
918 break; 900 break;
@@ -920,8 +902,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec,
920 case SND_SOC_BIAS_STANDBY: 902 case SND_SOC_BIAS_STANDBY:
921 snd_soc_write(codec, 903 snd_soc_write(codec,
922 RT286_SET_AUDIO_POWER, AC_PWRST_D3); 904 RT286_SET_AUDIO_POWER, AC_PWRST_D3);
923 snd_soc_update_bits(codec,
924 RT286_CBJ_CTRL1, 0x0400, 0x0000);
925 break; 905 break;
926 906
927 default: 907 default:
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index c61d38b585fb..93e8c9017633 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -776,7 +776,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
776 776
777 /* IN1/IN2 Control */ 777 /* IN1/IN2 Control */
778 SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1, 778 SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
779 RT5645_BST_SFT1, 8, 0, bst_tlv), 779 RT5645_BST_SFT1, 12, 0, bst_tlv),
780 SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL, 780 SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
781 RT5645_BST_SFT2, 8, 0, bst_tlv), 781 RT5645_BST_SFT2, 8, 0, bst_tlv),
782 782
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 820d8fa62b5e..fb8ea05c0de1 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3985,7 +3985,6 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
3985 if (rt5659 == NULL) 3985 if (rt5659 == NULL)
3986 return -ENOMEM; 3986 return -ENOMEM;
3987 3987
3988 rt5659->i2c = i2c;
3989 i2c_set_clientdata(i2c, rt5659); 3988 i2c_set_clientdata(i2c, rt5659);
3990 3989
3991 if (pdata) 3990 if (pdata)
@@ -4157,24 +4156,17 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
4157 4156
4158 INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work); 4157 INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work);
4159 4158
4160 if (rt5659->i2c->irq) { 4159 if (i2c->irq) {
4161 ret = request_threaded_irq(rt5659->i2c->irq, NULL, rt5659_irq, 4160 ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
4162 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING 4161 rt5659_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
4163 | IRQF_ONESHOT, "rt5659", rt5659); 4162 | IRQF_ONESHOT, "rt5659", rt5659);
4164 if (ret) 4163 if (ret)
4165 dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret); 4164 dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
4166 4165
4167 } 4166 }
4168 4167
4169 ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659, 4168 return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
4170 rt5659_dai, ARRAY_SIZE(rt5659_dai)); 4169 rt5659_dai, ARRAY_SIZE(rt5659_dai));
4171
4172 if (ret) {
4173 if (rt5659->i2c->irq)
4174 free_irq(rt5659->i2c->irq, rt5659);
4175 }
4176
4177 return 0;
4178} 4170}
4179 4171
4180static int rt5659_i2c_remove(struct i2c_client *i2c) 4172static int rt5659_i2c_remove(struct i2c_client *i2c)
@@ -4191,24 +4183,29 @@ void rt5659_i2c_shutdown(struct i2c_client *client)
4191 regmap_write(rt5659->regmap, RT5659_RESET, 0); 4183 regmap_write(rt5659->regmap, RT5659_RESET, 0);
4192} 4184}
4193 4185
4186#ifdef CONFIG_OF
4194static const struct of_device_id rt5659_of_match[] = { 4187static const struct of_device_id rt5659_of_match[] = {
4195 { .compatible = "realtek,rt5658", }, 4188 { .compatible = "realtek,rt5658", },
4196 { .compatible = "realtek,rt5659", }, 4189 { .compatible = "realtek,rt5659", },
4197 {}, 4190 { },
4198}; 4191};
4192MODULE_DEVICE_TABLE(of, rt5659_of_match);
4193#endif
4199 4194
4195#ifdef CONFIG_ACPI
4200static struct acpi_device_id rt5659_acpi_match[] = { 4196static struct acpi_device_id rt5659_acpi_match[] = {
4201 { "10EC5658", 0}, 4197 { "10EC5658", 0, },
4202 { "10EC5659", 0}, 4198 { "10EC5659", 0, },
4203 { }, 4199 { },
4204}; 4200};
4205MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match); 4201MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match);
4202#endif
4206 4203
4207struct i2c_driver rt5659_i2c_driver = { 4204struct i2c_driver rt5659_i2c_driver = {
4208 .driver = { 4205 .driver = {
4209 .name = "rt5659", 4206 .name = "rt5659",
4210 .owner = THIS_MODULE, 4207 .owner = THIS_MODULE,
4211 .of_match_table = rt5659_of_match, 4208 .of_match_table = of_match_ptr(rt5659_of_match),
4212 .acpi_match_table = ACPI_PTR(rt5659_acpi_match), 4209 .acpi_match_table = ACPI_PTR(rt5659_acpi_match),
4213 }, 4210 },
4214 .probe = rt5659_i2c_probe, 4211 .probe = rt5659_i2c_probe,
diff --git a/sound/soc/codecs/rt5659.h b/sound/soc/codecs/rt5659.h
index 8f07ee903eaa..d31c9e5bcec8 100644
--- a/sound/soc/codecs/rt5659.h
+++ b/sound/soc/codecs/rt5659.h
@@ -1792,7 +1792,6 @@ struct rt5659_priv {
1792 struct snd_soc_codec *codec; 1792 struct snd_soc_codec *codec;
1793 struct rt5659_platform_data pdata; 1793 struct rt5659_platform_data pdata;
1794 struct regmap *regmap; 1794 struct regmap *regmap;
1795 struct i2c_client *i2c;
1796 struct gpio_desc *gpiod_ldo1_en; 1795 struct gpio_desc *gpiod_ldo1_en;
1797 struct gpio_desc *gpiod_reset; 1796 struct gpio_desc *gpiod_reset;
1798 struct snd_soc_jack *hs_jack; 1797 struct snd_soc_jack *hs_jack;
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
index 21ca3a5e9f66..d374c18d4db7 100644
--- a/sound/soc/codecs/sigmadsp-i2c.c
+++ b/sound/soc/codecs/sigmadsp-i2c.c
@@ -31,7 +31,10 @@ static int sigmadsp_write_i2c(void *control_data,
31 31
32 kfree(buf); 32 kfree(buf);
33 33
34 return ret; 34 if (ret < 0)
35 return ret;
36
37 return 0;
35} 38}
36 39
37static int sigmadsp_read_i2c(void *control_data, 40static int sigmadsp_read_i2c(void *control_data,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6088d30962a9..97c0f1e23886 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2382,6 +2382,7 @@ error:
2382 2382
2383static int wm5110_remove(struct platform_device *pdev) 2383static int wm5110_remove(struct platform_device *pdev)
2384{ 2384{
2385 snd_soc_unregister_platform(&pdev->dev);
2385 snd_soc_unregister_codec(&pdev->dev); 2386 snd_soc_unregister_codec(&pdev->dev);
2386 pm_runtime_disable(&pdev->dev); 2387 pm_runtime_disable(&pdev->dev);
2387 2388
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff237726775a..d7f444f87460 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -240,13 +240,13 @@ SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
240SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 240SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
241 7, 1, 1), 241 7, 1, 1),
242 242
243SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", 243SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
244 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), 244 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
245SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", 245SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
246 WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv), 246 WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
247SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", 247SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
248 WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv), 248 WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
249SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", 249SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
250 WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv), 250 WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
251SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume", 251SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
252 WM8960_RINPATH, 4, 3, 0, micboost_tlv), 252 WM8960_RINPATH, 4, 3, 0, micboost_tlv),
@@ -643,29 +643,31 @@ static int wm8960_configure_clocking(struct snd_soc_codec *codec)
643 return -EINVAL; 643 return -EINVAL;
644 } 644 }
645 645
646 /* check if the sysclk frequency is available. */ 646 if (wm8960->clk_id != WM8960_SYSCLK_PLL) {
647 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { 647 /* check if the sysclk frequency is available. */
648 if (sysclk_divs[i] == -1) 648 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
649 continue; 649 if (sysclk_divs[i] == -1)
650 sysclk = freq_out / sysclk_divs[i]; 650 continue;
651 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) { 651 sysclk = freq_out / sysclk_divs[i];
652 if (sysclk == dac_divs[j] * lrclk) { 652 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
653 if (sysclk != dac_divs[j] * lrclk)
654 continue;
653 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) 655 for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
654 if (sysclk == bclk * bclk_divs[k] / 10) 656 if (sysclk == bclk * bclk_divs[k] / 10)
655 break; 657 break;
656 if (k != ARRAY_SIZE(bclk_divs)) 658 if (k != ARRAY_SIZE(bclk_divs))
657 break; 659 break;
658 } 660 }
661 if (j != ARRAY_SIZE(dac_divs))
662 break;
659 } 663 }
660 if (j != ARRAY_SIZE(dac_divs))
661 break;
662 }
663 664
664 if (i != ARRAY_SIZE(sysclk_divs)) { 665 if (i != ARRAY_SIZE(sysclk_divs)) {
665 goto configure_clock; 666 goto configure_clock;
666 } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) { 667 } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
667 dev_err(codec->dev, "failed to configure clock\n"); 668 dev_err(codec->dev, "failed to configure clock\n");
668 return -EINVAL; 669 return -EINVAL;
670 }
669 } 671 }
670 /* get a available pll out frequency and set pll */ 672 /* get a available pll out frequency and set pll */
671 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { 673 for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index ce664c239be3..bff258d7bcea 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -645,6 +645,8 @@ static int dw_i2s_probe(struct platform_device *pdev)
645 645
646 dev->dev = &pdev->dev; 646 dev->dev = &pdev->dev;
647 647
648 dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
649 dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
648 if (pdata) { 650 if (pdata) {
649 dev->capability = pdata->cap; 651 dev->capability = pdata->cap;
650 clk_id = NULL; 652 clk_id = NULL;
@@ -652,9 +654,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
652 if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) { 654 if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) {
653 dev->i2s_reg_comp1 = pdata->i2s_reg_comp1; 655 dev->i2s_reg_comp1 = pdata->i2s_reg_comp1;
654 dev->i2s_reg_comp2 = pdata->i2s_reg_comp2; 656 dev->i2s_reg_comp2 = pdata->i2s_reg_comp2;
655 } else {
656 dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
657 dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
658 } 657 }
659 ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata); 658 ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata);
660 } else { 659 } else {
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 40dfd8a36484..ed8de1035cda 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -112,20 +112,6 @@ struct fsl_ssi_rxtx_reg_val {
112 struct fsl_ssi_reg_val tx; 112 struct fsl_ssi_reg_val tx;
113}; 113};
114 114
115static const struct reg_default fsl_ssi_reg_defaults[] = {
116 {CCSR_SSI_SCR, 0x00000000},
117 {CCSR_SSI_SIER, 0x00003003},
118 {CCSR_SSI_STCR, 0x00000200},
119 {CCSR_SSI_SRCR, 0x00000200},
120 {CCSR_SSI_STCCR, 0x00040000},
121 {CCSR_SSI_SRCCR, 0x00040000},
122 {CCSR_SSI_SACNT, 0x00000000},
123 {CCSR_SSI_STMSK, 0x00000000},
124 {CCSR_SSI_SRMSK, 0x00000000},
125 {CCSR_SSI_SACCEN, 0x00000000},
126 {CCSR_SSI_SACCDIS, 0x00000000},
127};
128
129static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg) 115static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
130{ 116{
131 switch (reg) { 117 switch (reg) {
@@ -190,8 +176,7 @@ static const struct regmap_config fsl_ssi_regconfig = {
190 .val_bits = 32, 176 .val_bits = 32,
191 .reg_stride = 4, 177 .reg_stride = 4,
192 .val_format_endian = REGMAP_ENDIAN_NATIVE, 178 .val_format_endian = REGMAP_ENDIAN_NATIVE,
193 .reg_defaults = fsl_ssi_reg_defaults, 179 .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
194 .num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults),
195 .readable_reg = fsl_ssi_readable_reg, 180 .readable_reg = fsl_ssi_readable_reg,
196 .volatile_reg = fsl_ssi_volatile_reg, 181 .volatile_reg = fsl_ssi_volatile_reg,
197 .precious_reg = fsl_ssi_precious_reg, 182 .precious_reg = fsl_ssi_precious_reg,
@@ -201,6 +186,7 @@ static const struct regmap_config fsl_ssi_regconfig = {
201 186
202struct fsl_ssi_soc_data { 187struct fsl_ssi_soc_data {
203 bool imx; 188 bool imx;
189 bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */
204 bool offline_config; 190 bool offline_config;
205 u32 sisr_write_mask; 191 u32 sisr_write_mask;
206}; 192};
@@ -303,6 +289,7 @@ static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
303 289
304static struct fsl_ssi_soc_data fsl_ssi_imx21 = { 290static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
305 .imx = true, 291 .imx = true,
292 .imx21regs = true,
306 .offline_config = true, 293 .offline_config = true,
307 .sisr_write_mask = 0, 294 .sisr_write_mask = 0,
308}; 295};
@@ -586,8 +573,12 @@ static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
586 */ 573 */
587 regmap_write(regs, CCSR_SSI_SACNT, 574 regmap_write(regs, CCSR_SSI_SACNT,
588 CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV); 575 CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
589 regmap_write(regs, CCSR_SSI_SACCDIS, 0xff); 576
590 regmap_write(regs, CCSR_SSI_SACCEN, 0x300); 577 /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
578 if (!ssi_private->soc->imx21regs) {
579 regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
580 regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
581 }
591 582
592 /* 583 /*
593 * Enable SSI, Transmit and Receive. AC97 has to communicate with the 584 * Enable SSI, Transmit and Receive. AC97 has to communicate with the
@@ -1397,6 +1388,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1397 struct resource *res; 1388 struct resource *res;
1398 void __iomem *iomem; 1389 void __iomem *iomem;
1399 char name[64]; 1390 char name[64];
1391 struct regmap_config regconfig = fsl_ssi_regconfig;
1400 1392
1401 of_id = of_match_device(fsl_ssi_ids, &pdev->dev); 1393 of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
1402 if (!of_id || !of_id->data) 1394 if (!of_id || !of_id->data)
@@ -1444,15 +1436,25 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1444 return PTR_ERR(iomem); 1436 return PTR_ERR(iomem);
1445 ssi_private->ssi_phys = res->start; 1437 ssi_private->ssi_phys = res->start;
1446 1438
1439 if (ssi_private->soc->imx21regs) {
1440 /*
1441 * According to datasheet imx21-class SSI
1442 * don't have SACC{ST,EN,DIS} regs.
1443 */
1444 regconfig.max_register = CCSR_SSI_SRMSK;
1445 regconfig.num_reg_defaults_raw =
1446 CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
1447 }
1448
1447 ret = of_property_match_string(np, "clock-names", "ipg"); 1449 ret = of_property_match_string(np, "clock-names", "ipg");
1448 if (ret < 0) { 1450 if (ret < 0) {
1449 ssi_private->has_ipg_clk_name = false; 1451 ssi_private->has_ipg_clk_name = false;
1450 ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem, 1452 ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
1451 &fsl_ssi_regconfig); 1453 &regconfig);
1452 } else { 1454 } else {
1453 ssi_private->has_ipg_clk_name = true; 1455 ssi_private->has_ipg_clk_name = true;
1454 ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev, 1456 ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
1455 "ipg", iomem, &fsl_ssi_regconfig); 1457 "ipg", iomem, &regconfig);
1456 } 1458 }
1457 if (IS_ERR(ssi_private->regs)) { 1459 if (IS_ERR(ssi_private->regs)) {
1458 dev_err(&pdev->dev, "Failed to init register map\n"); 1460 dev_err(&pdev->dev, "Failed to init register map\n");
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index a407e833c612..fb896b2c9ba3 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -72,8 +72,6 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
72 goto end; 72 goto end;
73 } 73 }
74 74
75 platform_set_drvdata(pdev, data);
76
77end: 75end:
78 of_node_put(spdif_np); 76 of_node_put(spdif_np);
79 77
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 1ded8811598e..2389ab47e25f 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -99,7 +99,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
99 if (ret && ret != -ENOTSUPP) 99 if (ret && ret != -ENOTSUPP)
100 goto err; 100 goto err;
101 } 101 }
102 102 return 0;
103err: 103err:
104 return ret; 104 return ret;
105} 105}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 803f95e40679..7d7c872c280d 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -30,11 +30,15 @@ config SND_SST_IPC_ACPI
30config SND_SOC_INTEL_SST 30config SND_SOC_INTEL_SST
31 tristate 31 tristate
32 select SND_SOC_INTEL_SST_ACPI if ACPI 32 select SND_SOC_INTEL_SST_ACPI if ACPI
33 select SND_SOC_INTEL_SST_MATCH if ACPI
33 depends on (X86 || COMPILE_TEST) 34 depends on (X86 || COMPILE_TEST)
34 35
35config SND_SOC_INTEL_SST_ACPI 36config SND_SOC_INTEL_SST_ACPI
36 tristate 37 tristate
37 38
39config SND_SOC_INTEL_SST_MATCH
40 tristate
41
38config SND_SOC_INTEL_HASWELL 42config SND_SOC_INTEL_HASWELL
39 tristate 43 tristate
40 44
@@ -57,7 +61,7 @@ config SND_SOC_INTEL_HASWELL_MACH
57config SND_SOC_INTEL_BYT_RT5640_MACH 61config SND_SOC_INTEL_BYT_RT5640_MACH
58 tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec" 62 tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
59 depends on X86_INTEL_LPSS && I2C 63 depends on X86_INTEL_LPSS && I2C
60 depends on DW_DMAC_CORE=y && (SND_SOC_INTEL_BYTCR_RT5640_MACH = n) 64 depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
61 select SND_SOC_INTEL_SST 65 select SND_SOC_INTEL_SST
62 select SND_SOC_INTEL_BAYTRAIL 66 select SND_SOC_INTEL_BAYTRAIL
63 select SND_SOC_RT5640 67 select SND_SOC_RT5640
@@ -69,7 +73,7 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
69config SND_SOC_INTEL_BYT_MAX98090_MACH 73config SND_SOC_INTEL_BYT_MAX98090_MACH
70 tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec" 74 tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
71 depends on X86_INTEL_LPSS && I2C 75 depends on X86_INTEL_LPSS && I2C
72 depends on DW_DMAC_CORE=y 76 depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
73 select SND_SOC_INTEL_SST 77 select SND_SOC_INTEL_SST
74 select SND_SOC_INTEL_BAYTRAIL 78 select SND_SOC_INTEL_BAYTRAIL
75 select SND_SOC_MAX98090 79 select SND_SOC_MAX98090
@@ -97,6 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
97 select SND_SOC_RT5640 101 select SND_SOC_RT5640
98 select SND_SST_MFLD_PLATFORM 102 select SND_SST_MFLD_PLATFORM
99 select SND_SST_IPC_ACPI 103 select SND_SST_IPC_ACPI
104 select SND_SOC_INTEL_SST_MATCH if ACPI
100 help 105 help
101 This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR 106 This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
102 platforms with RT5640 audio codec. 107 platforms with RT5640 audio codec.
@@ -109,6 +114,7 @@ config SND_SOC_INTEL_BYTCR_RT5651_MACH
109 select SND_SOC_RT5651 114 select SND_SOC_RT5651
110 select SND_SST_MFLD_PLATFORM 115 select SND_SST_MFLD_PLATFORM
111 select SND_SST_IPC_ACPI 116 select SND_SST_IPC_ACPI
117 select SND_SOC_INTEL_SST_MATCH if ACPI
112 help 118 help
113 This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR 119 This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
114 platforms with RT5651 audio codec. 120 platforms with RT5651 audio codec.
@@ -121,6 +127,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
121 select SND_SOC_RT5670 127 select SND_SOC_RT5670
122 select SND_SST_MFLD_PLATFORM 128 select SND_SST_MFLD_PLATFORM
123 select SND_SST_IPC_ACPI 129 select SND_SST_IPC_ACPI
130 select SND_SOC_INTEL_SST_MATCH if ACPI
124 help 131 help
125 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell 132 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
126 platforms with RT5672 audio codec. 133 platforms with RT5672 audio codec.
@@ -133,6 +140,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
133 select SND_SOC_RT5645 140 select SND_SOC_RT5645
134 select SND_SST_MFLD_PLATFORM 141 select SND_SST_MFLD_PLATFORM
135 select SND_SST_IPC_ACPI 142 select SND_SST_IPC_ACPI
143 select SND_SOC_INTEL_SST_MATCH if ACPI
136 help 144 help
137 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell 145 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
138 platforms with RT5645/5650 audio codec. 146 platforms with RT5645/5650 audio codec.
@@ -145,6 +153,7 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
145 select SND_SOC_TS3A227E 153 select SND_SOC_TS3A227E
146 select SND_SST_MFLD_PLATFORM 154 select SND_SST_MFLD_PLATFORM
147 select SND_SST_IPC_ACPI 155 select SND_SST_IPC_ACPI
156 select SND_SOC_INTEL_SST_MATCH if ACPI
148 help 157 help
149 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell 158 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
150 platforms with MAX98090 audio codec it also can support TI jack chip as aux device. 159 platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 55c33dc76ce4..52ed434cbca6 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -528,6 +528,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
528 .ops = &sst_compr_dai_ops, 528 .ops = &sst_compr_dai_ops,
529 .playback = { 529 .playback = {
530 .stream_name = "Compress Playback", 530 .stream_name = "Compress Playback",
531 .channels_min = 1,
531 }, 532 },
532}, 533},
533/* BE CPU Dais */ 534/* BE CPU Dais */
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 7396ddb427d8..2cbcbe412661 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -212,7 +212,10 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
212{ 212{
213 struct snd_interval *channels = hw_param_interval(params, 213 struct snd_interval *channels = hw_param_interval(params,
214 SNDRV_PCM_HW_PARAM_CHANNELS); 214 SNDRV_PCM_HW_PARAM_CHANNELS);
215 channels->min = channels->max = 4; 215 if (params_channels(params) == 2)
216 channels->min = channels->max = 2;
217 else
218 channels->min = channels->max = 4;
216 219
217 return 0; 220 return 0;
218} 221}
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 668fdeee195e..fbbb25c2ceed 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -1,13 +1,10 @@
1snd-soc-sst-dsp-objs := sst-dsp.o 1snd-soc-sst-dsp-objs := sst-dsp.o
2ifneq ($(CONFIG_SND_SST_IPC_ACPI),) 2snd-soc-sst-acpi-objs := sst-acpi.o
3snd-soc-sst-acpi-objs := sst-match-acpi.o 3snd-soc-sst-match-objs := sst-match-acpi.o
4else
5snd-soc-sst-acpi-objs := sst-acpi.o sst-match-acpi.o
6endif
7
8snd-soc-sst-ipc-objs := sst-ipc.o 4snd-soc-sst-ipc-objs := sst-ipc.o
9 5
10snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o 6snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
11 7
12obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o 8obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
13obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o 9obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
10obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index 7a85c576dad3..2c5eda14d510 100644
--- a/sound/soc/intel/common/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -215,6 +215,7 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
215 .dma_size = SST_LPT_DSP_DMA_SIZE, 215 .dma_size = SST_LPT_DSP_DMA_SIZE,
216}; 216};
217 217
218#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
218static struct sst_acpi_mach baytrail_machines[] = { 219static struct sst_acpi_mach baytrail_machines[] = {
219 { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL }, 220 { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
220 { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL }, 221 { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
@@ -231,11 +232,14 @@ static struct sst_acpi_desc sst_acpi_baytrail_desc = {
231 .sst_id = SST_DEV_ID_BYT, 232 .sst_id = SST_DEV_ID_BYT,
232 .resindex_dma_base = -1, 233 .resindex_dma_base = -1,
233}; 234};
235#endif
234 236
235static const struct acpi_device_id sst_acpi_match[] = { 237static const struct acpi_device_id sst_acpi_match[] = {
236 { "INT33C8", (unsigned long)&sst_acpi_haswell_desc }, 238 { "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
237 { "INT3438", (unsigned long)&sst_acpi_broadwell_desc }, 239 { "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
240#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
238 { "80860F28", (unsigned long)&sst_acpi_baytrail_desc }, 241 { "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
242#endif
239 { } 243 { }
240}; 244};
241MODULE_DEVICE_TABLE(acpi, sst_acpi_match); 245MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index dd077e116d25..3b4539d21492 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -41,3 +41,6 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
41 return NULL; 41 return NULL;
42} 42}
43EXPORT_SYMBOL_GPL(sst_acpi_find_machine); 43EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
44
45MODULE_LICENSE("GPL v2");
46MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index de6dac496a0d..4629372d7c8e 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -688,14 +688,14 @@ int skl_unbind_modules(struct skl_sst *ctx,
688 /* get src queue index */ 688 /* get src queue index */
689 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); 689 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
690 if (src_index < 0) 690 if (src_index < 0)
691 return -EINVAL; 691 return 0;
692 692
693 msg.src_queue = src_index; 693 msg.src_queue = src_index;
694 694
695 /* get dst queue index */ 695 /* get dst queue index */
696 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); 696 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
697 if (dst_index < 0) 697 if (dst_index < 0)
698 return -EINVAL; 698 return 0;
699 699
700 msg.dst_queue = dst_index; 700 msg.dst_queue = dst_index;
701 701
@@ -747,7 +747,7 @@ int skl_bind_modules(struct skl_sst *ctx,
747 747
748 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); 748 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
749 749
750 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE && 750 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
751 dst_mcfg->m_state < SKL_MODULE_INIT_DONE) 751 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
752 return 0; 752 return 0;
753 753
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index f3553258091a..b6e6b61d10ec 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -863,6 +863,7 @@ static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
863 else 863 else
864 delay += hstream->bufsize; 864 delay += hstream->bufsize;
865 } 865 }
866 delay = (hstream->bufsize == delay) ? 0 : delay;
866 867
867 if (delay >= hstream->period_bytes) { 868 if (delay >= hstream->period_bytes) {
868 dev_info(bus->dev, 869 dev_info(bus->dev,
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 4624556f486d..a294fee431f0 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -54,12 +54,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
54 54
55/* 55/*
56 * Each pipelines needs memory to be allocated. Check if we have free memory 56 * Each pipelines needs memory to be allocated. Check if we have free memory
57 * from available pool. Then only add this to pool 57 * from available pool.
58 * This is freed when pipe is deleted
59 * Note: DSP does actual memory management we only keep track for complete
60 * pool
61 */ 58 */
62static bool skl_tplg_alloc_pipe_mem(struct skl *skl, 59static bool skl_is_pipe_mem_avail(struct skl *skl,
63 struct skl_module_cfg *mconfig) 60 struct skl_module_cfg *mconfig)
64{ 61{
65 struct skl_sst *ctx = skl->skl_sst; 62 struct skl_sst *ctx = skl->skl_sst;
@@ -74,10 +71,20 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
74 "exceeds ppl memory available %d mem %d\n", 71 "exceeds ppl memory available %d mem %d\n",
75 skl->resource.max_mem, skl->resource.mem); 72 skl->resource.max_mem, skl->resource.mem);
76 return false; 73 return false;
74 } else {
75 return true;
77 } 76 }
77}
78 78
79/*
80 * Add the mem to the mem pool. This is freed when pipe is deleted.
81 * Note: DSP does actual memory management we only keep track for complete
82 * pool
83 */
84static void skl_tplg_alloc_pipe_mem(struct skl *skl,
85 struct skl_module_cfg *mconfig)
86{
79 skl->resource.mem += mconfig->pipe->memory_pages; 87 skl->resource.mem += mconfig->pipe->memory_pages;
80 return true;
81} 88}
82 89
83/* 90/*
@@ -85,10 +92,10 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
85 * quantified in MCPS (Million Clocks Per Second) required for module/pipe 92 * quantified in MCPS (Million Clocks Per Second) required for module/pipe
86 * 93 *
87 * Each pipelines needs mcps to be allocated. Check if we have mcps for this 94 * Each pipelines needs mcps to be allocated. Check if we have mcps for this
88 * pipe. This adds the mcps to driver counter 95 * pipe.
89 * This is removed on pipeline delete
90 */ 96 */
91static bool skl_tplg_alloc_pipe_mcps(struct skl *skl, 97
98static bool skl_is_pipe_mcps_avail(struct skl *skl,
92 struct skl_module_cfg *mconfig) 99 struct skl_module_cfg *mconfig)
93{ 100{
94 struct skl_sst *ctx = skl->skl_sst; 101 struct skl_sst *ctx = skl->skl_sst;
@@ -98,13 +105,18 @@ static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
98 "%s: module_id %d instance %d\n", __func__, 105 "%s: module_id %d instance %d\n", __func__,
99 mconfig->id.module_id, mconfig->id.instance_id); 106 mconfig->id.module_id, mconfig->id.instance_id);
100 dev_err(ctx->dev, 107 dev_err(ctx->dev,
101 "exceeds ppl memory available %d > mem %d\n", 108 "exceeds ppl mcps available %d > mem %d\n",
102 skl->resource.max_mcps, skl->resource.mcps); 109 skl->resource.max_mcps, skl->resource.mcps);
103 return false; 110 return false;
111 } else {
112 return true;
104 } 113 }
114}
105 115
116static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
117 struct skl_module_cfg *mconfig)
118{
106 skl->resource.mcps += mconfig->mcps; 119 skl->resource.mcps += mconfig->mcps;
107 return true;
108} 120}
109 121
110/* 122/*
@@ -411,7 +423,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
411 mconfig = w->priv; 423 mconfig = w->priv;
412 424
413 /* check resource available */ 425 /* check resource available */
414 if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) 426 if (!skl_is_pipe_mcps_avail(skl, mconfig))
415 return -ENOMEM; 427 return -ENOMEM;
416 428
417 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 429 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
@@ -435,6 +447,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
435 ret = skl_tplg_set_module_params(w, ctx); 447 ret = skl_tplg_set_module_params(w, ctx);
436 if (ret < 0) 448 if (ret < 0)
437 return ret; 449 return ret;
450 skl_tplg_alloc_pipe_mcps(skl, mconfig);
438 } 451 }
439 452
440 return 0; 453 return 0;
@@ -477,10 +490,10 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
477 struct skl_sst *ctx = skl->skl_sst; 490 struct skl_sst *ctx = skl->skl_sst;
478 491
479 /* check resource available */ 492 /* check resource available */
480 if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) 493 if (!skl_is_pipe_mcps_avail(skl, mconfig))
481 return -EBUSY; 494 return -EBUSY;
482 495
483 if (!skl_tplg_alloc_pipe_mem(skl, mconfig)) 496 if (!skl_is_pipe_mem_avail(skl, mconfig))
484 return -ENOMEM; 497 return -ENOMEM;
485 498
486 /* 499 /*
@@ -526,11 +539,15 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
526 src_module = dst_module; 539 src_module = dst_module;
527 } 540 }
528 541
542 skl_tplg_alloc_pipe_mem(skl, mconfig);
543 skl_tplg_alloc_pipe_mcps(skl, mconfig);
544
529 return 0; 545 return 0;
530} 546}
531 547
532static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 548static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
533 struct skl *skl, 549 struct skl *skl,
550 struct snd_soc_dapm_widget *src_w,
534 struct skl_module_cfg *src_mconfig) 551 struct skl_module_cfg *src_mconfig)
535{ 552{
536 struct snd_soc_dapm_path *p; 553 struct snd_soc_dapm_path *p;
@@ -547,6 +564,10 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
547 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); 564 dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
548 565
549 next_sink = p->sink; 566 next_sink = p->sink;
567
568 if (!is_skl_dsp_widget_type(p->sink))
569 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
570
550 /* 571 /*
551 * here we will check widgets in sink pipelines, so that 572 * here we will check widgets in sink pipelines, so that
552 * can be any widgets type and we are only interested if 573 * can be any widgets type and we are only interested if
@@ -576,7 +597,7 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
576 } 597 }
577 598
578 if (!sink) 599 if (!sink)
579 return skl_tplg_bind_sinks(next_sink, skl, src_mconfig); 600 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
580 601
581 return 0; 602 return 0;
582} 603}
@@ -605,7 +626,7 @@ static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
605 * if sink is not started, start sink pipe first, then start 626 * if sink is not started, start sink pipe first, then start
606 * this pipe 627 * this pipe
607 */ 628 */
608 ret = skl_tplg_bind_sinks(w, skl, src_mconfig); 629 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
609 if (ret) 630 if (ret)
610 return ret; 631 return ret;
611 632
@@ -773,10 +794,7 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
773 continue; 794 continue;
774 } 795 }
775 796
776 ret = skl_unbind_modules(ctx, src_module, dst_module); 797 skl_unbind_modules(ctx, src_module, dst_module);
777 if (ret < 0)
778 return ret;
779
780 src_module = dst_module; 798 src_module = dst_module;
781 } 799 }
782 800
@@ -814,9 +832,6 @@ static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
814 * This is a connecter and if path is found that means 832 * This is a connecter and if path is found that means
815 * unbind between source and sink has not happened yet 833 * unbind between source and sink has not happened yet
816 */ 834 */
817 ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
818 if (ret < 0)
819 return ret;
820 ret = skl_unbind_modules(ctx, src_mconfig, 835 ret = skl_unbind_modules(ctx, src_mconfig,
821 sink_mconfig); 836 sink_mconfig);
822 } 837 }
@@ -842,6 +857,12 @@ static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
842 case SND_SOC_DAPM_PRE_PMU: 857 case SND_SOC_DAPM_PRE_PMU:
843 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 858 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
844 859
860 case SND_SOC_DAPM_POST_PMU:
861 return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
862
863 case SND_SOC_DAPM_PRE_PMD:
864 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
865
845 case SND_SOC_DAPM_POST_PMD: 866 case SND_SOC_DAPM_POST_PMD:
846 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 867 return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
847 } 868 }
@@ -916,6 +937,13 @@ static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
916 skl_get_module_params(skl->skl_sst, (u32 *)bc->params, 937 skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
917 bc->max, bc->param_id, mconfig); 938 bc->max, bc->param_id, mconfig);
918 939
940 /* decrement size for TLV header */
941 size -= 2 * sizeof(u32);
942
943 /* check size as we don't want to send kernel data */
944 if (size > bc->max)
945 size = bc->max;
946
919 if (bc->params) { 947 if (bc->params) {
920 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 948 if (copy_to_user(data, &bc->param_id, sizeof(u32)))
921 return -EFAULT; 949 return -EFAULT;
@@ -1510,6 +1538,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
1510 &skl_tplg_ops, fw, 0); 1538 &skl_tplg_ops, fw, 0);
1511 if (ret < 0) { 1539 if (ret < 0) {
1512 dev_err(bus->dev, "tplg component load failed%d\n", ret); 1540 dev_err(bus->dev, "tplg component load failed%d\n", ret);
1541 release_firmware(fw);
1513 return -EINVAL; 1542 return -EINVAL;
1514 } 1543 }
1515 1544
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 443a15de94b5..092705e73db4 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -614,8 +614,6 @@ static int skl_probe(struct pci_dev *pci,
614 goto out_unregister; 614 goto out_unregister;
615 615
616 /*configure PM */ 616 /*configure PM */
617 pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
618 pm_runtime_use_autosuspend(bus->dev);
619 pm_runtime_put_noidle(bus->dev); 617 pm_runtime_put_noidle(bus->dev);
620 pm_runtime_allow(bus->dev); 618 pm_runtime_allow(bus->dev);
621 619
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 15c04e2eae34..976967675387 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -9,7 +9,7 @@ config SND_SOC_MEDIATEK
9 9
10config SND_SOC_MT8173_MAX98090 10config SND_SOC_MT8173_MAX98090
11 tristate "ASoC Audio driver for MT8173 with MAX98090 codec" 11 tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
12 depends on SND_SOC_MEDIATEK 12 depends on SND_SOC_MEDIATEK && I2C
13 select SND_SOC_MAX98090 13 select SND_SOC_MAX98090
14 help 14 help
15 This adds ASoC driver for Mediatek MT8173 boards 15 This adds ASoC driver for Mediatek MT8173 boards
@@ -19,7 +19,7 @@ config SND_SOC_MT8173_MAX98090
19 19
20config SND_SOC_MT8173_RT5650_RT5676 20config SND_SOC_MT8173_RT5650_RT5676
21 tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs" 21 tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
22 depends on SND_SOC_MEDIATEK 22 depends on SND_SOC_MEDIATEK && I2C
23 select SND_SOC_RT5645 23 select SND_SOC_RT5645
24 select SND_SOC_RT5677 24 select SND_SOC_RT5677
25 help 25 help
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index c866ade28ad0..a6c7b8d87cd2 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -381,9 +381,19 @@ static int mxs_saif_startup(struct snd_pcm_substream *substream,
381 __raw_writel(BM_SAIF_CTRL_CLKGATE, 381 __raw_writel(BM_SAIF_CTRL_CLKGATE,
382 saif->base + SAIF_CTRL + MXS_CLR_ADDR); 382 saif->base + SAIF_CTRL + MXS_CLR_ADDR);
383 383
384 clk_prepare(saif->clk);
385
384 return 0; 386 return 0;
385} 387}
386 388
389static void mxs_saif_shutdown(struct snd_pcm_substream *substream,
390 struct snd_soc_dai *cpu_dai)
391{
392 struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
393
394 clk_unprepare(saif->clk);
395}
396
387/* 397/*
388 * Should only be called when port is inactive. 398 * Should only be called when port is inactive.
389 * although can be called multiple times by upper layers. 399 * although can be called multiple times by upper layers.
@@ -424,8 +434,6 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
424 return ret; 434 return ret;
425 } 435 }
426 436
427 /* prepare clk in hw_param, enable in trigger */
428 clk_prepare(saif->clk);
429 if (saif != master_saif) { 437 if (saif != master_saif) {
430 /* 438 /*
431 * Set an initial clock rate for the saif internal logic to work 439 * Set an initial clock rate for the saif internal logic to work
@@ -611,6 +619,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
611 619
612static const struct snd_soc_dai_ops mxs_saif_dai_ops = { 620static const struct snd_soc_dai_ops mxs_saif_dai_ops = {
613 .startup = mxs_saif_startup, 621 .startup = mxs_saif_startup,
622 .shutdown = mxs_saif_shutdown,
614 .trigger = mxs_saif_trigger, 623 .trigger = mxs_saif_trigger,
615 .prepare = mxs_saif_prepare, 624 .prepare = mxs_saif_prepare,
616 .hw_params = mxs_saif_hw_params, 625 .hw_params = mxs_saif_hw_params,
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 79688aa1941a..4aeb8e1a7160 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -440,18 +440,18 @@ static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
440} 440}
441 441
442static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream, 442static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
443 struct snd_soc_pcm_runtime *soc_runtime) 443 struct snd_soc_pcm_runtime *rt)
444{ 444{
445 struct snd_dma_buffer *buf = &substream->dma_buffer; 445 struct snd_dma_buffer *buf = &substream->dma_buffer;
446 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; 446 size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
447 447
448 buf->dev.type = SNDRV_DMA_TYPE_DEV; 448 buf->dev.type = SNDRV_DMA_TYPE_DEV;
449 buf->dev.dev = soc_runtime->dev; 449 buf->dev.dev = rt->platform->dev;
450 buf->private_data = NULL; 450 buf->private_data = NULL;
451 buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr, 451 buf->area = dma_alloc_coherent(rt->platform->dev, size, &buf->addr,
452 GFP_KERNEL); 452 GFP_KERNEL);
453 if (!buf->area) { 453 if (!buf->area) {
454 dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n", 454 dev_err(rt->platform->dev, "%s: Could not allocate DMA buffer\n",
455 __func__); 455 __func__);
456 return -ENOMEM; 456 return -ENOMEM;
457 } 457 }
@@ -461,12 +461,12 @@ static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
461} 461}
462 462
463static void lpass_platform_free_buffer(struct snd_pcm_substream *substream, 463static void lpass_platform_free_buffer(struct snd_pcm_substream *substream,
464 struct snd_soc_pcm_runtime *soc_runtime) 464 struct snd_soc_pcm_runtime *rt)
465{ 465{
466 struct snd_dma_buffer *buf = &substream->dma_buffer; 466 struct snd_dma_buffer *buf = &substream->dma_buffer;
467 467
468 if (buf->area) { 468 if (buf->area) {
469 dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area, 469 dma_free_coherent(rt->dev, buf->bytes, buf->area,
470 buf->addr); 470 buf->addr);
471 } 471 }
472 buf->area = NULL; 472 buf->area = NULL;
@@ -499,9 +499,6 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
499 499
500 snd_soc_pcm_set_drvdata(soc_runtime, data); 500 snd_soc_pcm_set_drvdata(soc_runtime, data);
501 501
502 soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32);
503 soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask;
504
505 ret = lpass_platform_alloc_buffer(substream, soc_runtime); 502 ret = lpass_platform_alloc_buffer(substream, soc_runtime);
506 if (ret) 503 if (ret)
507 return ret; 504 return ret;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5a2812fa8946..0d3707987900 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -310,7 +310,7 @@ struct dapm_kcontrol_data {
310}; 310};
311 311
312static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, 312static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
313 struct snd_kcontrol *kcontrol) 313 struct snd_kcontrol *kcontrol, const char *ctrl_name)
314{ 314{
315 struct dapm_kcontrol_data *data; 315 struct dapm_kcontrol_data *data;
316 struct soc_mixer_control *mc; 316 struct soc_mixer_control *mc;
@@ -333,7 +333,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
333 if (mc->autodisable) { 333 if (mc->autodisable) {
334 struct snd_soc_dapm_widget template; 334 struct snd_soc_dapm_widget template;
335 335
336 name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, 336 name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
337 "Autodisable"); 337 "Autodisable");
338 if (!name) { 338 if (!name) {
339 ret = -ENOMEM; 339 ret = -ENOMEM;
@@ -371,7 +371,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
371 if (e->autodisable) { 371 if (e->autodisable) {
372 struct snd_soc_dapm_widget template; 372 struct snd_soc_dapm_widget template;
373 373
374 name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, 374 name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
375 "Autodisable"); 375 "Autodisable");
376 if (!name) { 376 if (!name) {
377 ret = -ENOMEM; 377 ret = -ENOMEM;
@@ -871,7 +871,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
871 871
872 kcontrol->private_free = dapm_kcontrol_free; 872 kcontrol->private_free = dapm_kcontrol_free;
873 873
874 ret = dapm_kcontrol_data_alloc(w, kcontrol); 874 ret = dapm_kcontrol_data_alloc(w, kcontrol, name);
875 if (ret) { 875 if (ret) {
876 snd_ctl_free_one(kcontrol); 876 snd_ctl_free_one(kcontrol);
877 goto exit_free; 877 goto exit_free;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e898b427be7e..1af4f23697a7 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1810,7 +1810,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
1810 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && 1810 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
1811 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && 1811 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
1812 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) && 1812 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
1813 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) 1813 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
1814 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
1814 continue; 1815 continue;
1815 1816
1816 dev_dbg(be->dev, "ASoC: hw_free BE %s\n", 1817 dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index cc39f63299ef..007cf5831121 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -2455,7 +2455,6 @@ int snd_usbmidi_create(struct snd_card *card,
2455 else 2455 else
2456 err = snd_usbmidi_create_endpoints(umidi, endpoints); 2456 err = snd_usbmidi_create_endpoints(umidi, endpoints);
2457 if (err < 0) { 2457 if (err < 0) {
2458 snd_usbmidi_free(umidi);
2459 return err; 2458 return err;
2460 } 2459 }
2461 2460
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 81a2eb77ba7f..05d815851be1 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2068,6 +2068,15 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
2068 err = -ENOMEM; 2068 err = -ENOMEM;
2069 goto err_free_queues; 2069 goto err_free_queues;
2070 } 2070 }
2071
2072 /*
2073 * Since this thread will not be kept in any rbtree not in a
2074 * list, initialize its list node so that at thread__put() the
2075 * current thread lifetime assuption is kept and we don't segfault
2076 * at list_del_init().
2077 */
2078 INIT_LIST_HEAD(&pt->unknown_thread->node);
2079
2071 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 2080 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2072 if (err) 2081 if (err)
2073 goto err_delete_thread; 2082 goto err_delete_thread;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4f7b0efdde2f..813d9b272c81 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err,
399{ 399{
400 char help[BUFSIZ]; 400 char help[BUFSIZ];
401 401
402 if (!e)
403 return;
404
402 /* 405 /*
403 * We get error directly from syscall errno ( > 0), 406 * We get error directly from syscall errno ( > 0),
404 * or from encoded pointer's error ( < 0). 407 * or from encoded pointer's error ( < 0).
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2be10fb27172..4ce5c5e18f48 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,8 +686,9 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
686 pf->fb_ops = NULL; 686 pf->fb_ops = NULL;
687#if _ELFUTILS_PREREQ(0, 142) 687#if _ELFUTILS_PREREQ(0, 142)
688 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && 688 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
689 pf->cfi != NULL) { 689 (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
690 if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || 690 if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
691 (dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) ||
691 dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { 692 dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
692 pr_warning("Failed to get call frame on 0x%jx\n", 693 pr_warning("Failed to get call frame on 0x%jx\n",
693 (uintmax_t)pf->addr); 694 (uintmax_t)pf->addr);
@@ -1015,8 +1016,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
1015 return DWARF_CB_OK; 1016 return DWARF_CB_OK;
1016} 1017}
1017 1018
1018/* Find probe points from debuginfo */ 1019static int debuginfo__find_probe_location(struct debuginfo *dbg,
1019static int debuginfo__find_probes(struct debuginfo *dbg,
1020 struct probe_finder *pf) 1020 struct probe_finder *pf)
1021{ 1021{
1022 struct perf_probe_point *pp = &pf->pev->point; 1022 struct perf_probe_point *pp = &pf->pev->point;
@@ -1025,27 +1025,6 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
1025 Dwarf_Die *diep; 1025 Dwarf_Die *diep;
1026 int ret = 0; 1026 int ret = 0;
1027 1027
1028#if _ELFUTILS_PREREQ(0, 142)
1029 Elf *elf;
1030 GElf_Ehdr ehdr;
1031 GElf_Shdr shdr;
1032
1033 /* Get the call frame information from this dwarf */
1034 elf = dwarf_getelf(dbg->dbg);
1035 if (elf == NULL)
1036 return -EINVAL;
1037
1038 if (gelf_getehdr(elf, &ehdr) == NULL)
1039 return -EINVAL;
1040
1041 if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
1042 shdr.sh_type == SHT_PROGBITS) {
1043 pf->cfi = dwarf_getcfi_elf(elf);
1044 } else {
1045 pf->cfi = dwarf_getcfi(dbg->dbg);
1046 }
1047#endif
1048
1049 off = 0; 1028 off = 0;
1050 pf->lcache = intlist__new(NULL); 1029 pf->lcache = intlist__new(NULL);
1051 if (!pf->lcache) 1030 if (!pf->lcache)
@@ -1108,6 +1087,39 @@ found:
1108 return ret; 1087 return ret;
1109} 1088}
1110 1089
1090/* Find probe points from debuginfo */
1091static int debuginfo__find_probes(struct debuginfo *dbg,
1092 struct probe_finder *pf)
1093{
1094 int ret = 0;
1095
1096#if _ELFUTILS_PREREQ(0, 142)
1097 Elf *elf;
1098 GElf_Ehdr ehdr;
1099 GElf_Shdr shdr;
1100
1101 if (pf->cfi_eh || pf->cfi_dbg)
1102 return debuginfo__find_probe_location(dbg, pf);
1103
1104 /* Get the call frame information from this dwarf */
1105 elf = dwarf_getelf(dbg->dbg);
1106 if (elf == NULL)
1107 return -EINVAL;
1108
1109 if (gelf_getehdr(elf, &ehdr) == NULL)
1110 return -EINVAL;
1111
1112 if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
1113 shdr.sh_type == SHT_PROGBITS)
1114 pf->cfi_eh = dwarf_getcfi_elf(elf);
1115
1116 pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
1117#endif
1118
1119 ret = debuginfo__find_probe_location(dbg, pf);
1120 return ret;
1121}
1122
1111struct local_vars_finder { 1123struct local_vars_finder {
1112 struct probe_finder *pf; 1124 struct probe_finder *pf;
1113 struct perf_probe_arg *args; 1125 struct perf_probe_arg *args;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index bed82716e1b4..0aec7704e395 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -76,7 +76,10 @@ struct probe_finder {
76 76
77 /* For variable searching */ 77 /* For variable searching */
78#if _ELFUTILS_PREREQ(0, 142) 78#if _ELFUTILS_PREREQ(0, 142)
79 Dwarf_CFI *cfi; /* Call Frame Information */ 79 /* Call Frame Information from .eh_frame */
80 Dwarf_CFI *cfi_eh;
81 /* Call Frame Information from .debug_frame */
82 Dwarf_CFI *cfi_dbg;
80#endif 83#endif
81 Dwarf_Op *fb_ops; /* Frame base attribute */ 84 Dwarf_Op *fb_ops; /* Frame base attribute */
82 struct perf_probe_arg *pvar; /* Current target variable */ 85 struct perf_probe_arg *pvar; /* Current target variable */
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2b58edccd56f..afb0c45eba34 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -311,6 +311,16 @@ int perf_stat_process_counter(struct perf_stat_config *config,
311 311
312 aggr->val = aggr->ena = aggr->run = 0; 312 aggr->val = aggr->ena = aggr->run = 0;
313 313
314 /*
315 * We calculate counter's data every interval,
316 * and the display code shows ps->res_stats
317 * avg value. We need to zero the stats for
318 * interval mode, otherwise overall avg running
319 * averages will be shown for each interval.
320 */
321 if (config->interval)
322 init_stats(ps->res_stats);
323
314 if (counter->per_pkg) 324 if (counter->per_pkg)
315 zero_per_pkg(counter); 325 zero_per_pkg(counter);
316 326
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 90bd2ea41032..b3281dcd4a5d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -217,13 +217,16 @@ static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
217 return rc; 217 return rc;
218} 218}
219 219
220#define NFIT_TEST_ARS_RECORDS 4
221
220static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd, 222static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
221 unsigned int buf_len) 223 unsigned int buf_len)
222{ 224{
223 if (buf_len < sizeof(*nd_cmd)) 225 if (buf_len < sizeof(*nd_cmd))
224 return -EINVAL; 226 return -EINVAL;
225 227
226 nd_cmd->max_ars_out = 256; 228 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
229 + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
227 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16; 230 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
228 231
229 return 0; 232 return 0;
@@ -246,7 +249,8 @@ static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
246 if (buf_len < sizeof(*nd_cmd)) 249 if (buf_len < sizeof(*nd_cmd))
247 return -EINVAL; 250 return -EINVAL;
248 251
249 nd_cmd->out_length = 256; 252 nd_cmd->out_length = sizeof(struct nd_cmd_ars_status);
253 /* TODO: emit error records */
250 nd_cmd->num_records = 0; 254 nd_cmd->num_records = 0;
251 nd_cmd->address = 0; 255 nd_cmd->address = 0;
252 nd_cmd->length = -1ULL; 256 nd_cmd->length = -1ULL;
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdcc016b..057278448515 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -88,7 +88,11 @@ test_delete()
88 exit 1 88 exit 1
89 fi 89 fi
90 90
91 rm $file 91 rm $file 2>/dev/null
92 if [ $? -ne 0 ]; then
93 chattr -i $file
94 rm $file
95 fi
92 96
93 if [ -e $file ]; then 97 if [ -e $file ]; then
94 echo "$file couldn't be deleted" >&2 98 echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@ test_zero_size_delete()
111 exit 1 115 exit 1
112 fi 116 fi
113 117
118 chattr -i $file
114 printf "$attrs" > $file 119 printf "$attrs" > $file
115 120
116 if [ -e $file ]; then 121 if [ -e $file ]; then
@@ -141,7 +146,11 @@ test_valid_filenames()
141 echo "$file could not be created" >&2 146 echo "$file could not be created" >&2
142 ret=1 147 ret=1
143 else 148 else
144 rm $file 149 rm $file 2>/dev/null
150 if [ $? -ne 0 ]; then
151 chattr -i $file
152 rm $file
153 fi
145 fi 154 fi
146 done 155 done
147 156
@@ -174,7 +183,11 @@ test_invalid_filenames()
174 183
175 if [ -e $file ]; then 184 if [ -e $file ]; then
176 echo "Creating $file should have failed" >&2 185 echo "Creating $file should have failed" >&2
177 rm $file 186 rm $file 2>/dev/null
187 if [ $? -ne 0 ]; then
188 chattr -i $file
189 rm $file
190 fi
178 ret=1 191 ret=1
179 fi 192 fi
180 done 193 done
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
index 8c0764407b3c..4af74f733036 100644
--- a/tools/testing/selftests/efivarfs/open-unlink.c
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -1,10 +1,68 @@
1#include <errno.h>
1#include <stdio.h> 2#include <stdio.h>
2#include <stdint.h> 3#include <stdint.h>
3#include <stdlib.h> 4#include <stdlib.h>
4#include <unistd.h> 5#include <unistd.h>
6#include <sys/ioctl.h>
5#include <sys/types.h> 7#include <sys/types.h>
6#include <sys/stat.h> 8#include <sys/stat.h>
7#include <fcntl.h> 9#include <fcntl.h>
10#include <linux/fs.h>
11
12static int set_immutable(const char *path, int immutable)
13{
14 unsigned int flags;
15 int fd;
16 int rc;
17 int error;
18
19 fd = open(path, O_RDONLY);
20 if (fd < 0)
21 return fd;
22
23 rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
24 if (rc < 0) {
25 error = errno;
26 close(fd);
27 errno = error;
28 return rc;
29 }
30
31 if (immutable)
32 flags |= FS_IMMUTABLE_FL;
33 else
34 flags &= ~FS_IMMUTABLE_FL;
35
36 rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
37 error = errno;
38 close(fd);
39 errno = error;
40 return rc;
41}
42
43static int get_immutable(const char *path)
44{
45 unsigned int flags;
46 int fd;
47 int rc;
48 int error;
49
50 fd = open(path, O_RDONLY);
51 if (fd < 0)
52 return fd;
53
54 rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
55 if (rc < 0) {
56 error = errno;
57 close(fd);
58 errno = error;
59 return rc;
60 }
61 close(fd);
62 if (flags & FS_IMMUTABLE_FL)
63 return 1;
64 return 0;
65}
8 66
9int main(int argc, char **argv) 67int main(int argc, char **argv)
10{ 68{
@@ -27,7 +85,7 @@ int main(int argc, char **argv)
27 buf[4] = 0; 85 buf[4] = 0;
28 86
29 /* create a test variable */ 87 /* create a test variable */
30 fd = open(path, O_WRONLY | O_CREAT); 88 fd = open(path, O_WRONLY | O_CREAT, 0600);
31 if (fd < 0) { 89 if (fd < 0) {
32 perror("open(O_WRONLY)"); 90 perror("open(O_WRONLY)");
33 return EXIT_FAILURE; 91 return EXIT_FAILURE;
@@ -41,6 +99,18 @@ int main(int argc, char **argv)
41 99
42 close(fd); 100 close(fd);
43 101
102 rc = get_immutable(path);
103 if (rc < 0) {
104 perror("ioctl(FS_IOC_GETFLAGS)");
105 return EXIT_FAILURE;
106 } else if (rc) {
107 rc = set_immutable(path, 0);
108 if (rc < 0) {
109 perror("ioctl(FS_IOC_SETFLAGS)");
110 return EXIT_FAILURE;
111 }
112 }
113
44 fd = open(path, O_RDONLY); 114 fd = open(path, O_RDONLY);
45 if (fd < 0) { 115 if (fd < 0) {
46 perror("open"); 116 perror("open");
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
index 773e276ff90b..1e1abe0ad354 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -39,28 +39,23 @@ instance_slam() {
39} 39}
40 40
41instance_slam & 41instance_slam &
42x=`jobs -l` 42p1=$!
43p1=`echo $x | cut -d' ' -f2`
44echo $p1 43echo $p1
45 44
46instance_slam & 45instance_slam &
47x=`jobs -l | tail -1` 46p2=$!
48p2=`echo $x | cut -d' ' -f2`
49echo $p2 47echo $p2
50 48
51instance_slam & 49instance_slam &
52x=`jobs -l | tail -1` 50p3=$!
53p3=`echo $x | cut -d' ' -f2`
54echo $p3 51echo $p3
55 52
56instance_slam & 53instance_slam &
57x=`jobs -l | tail -1` 54p4=$!
58p4=`echo $x | cut -d' ' -f2`
59echo $p4 55echo $p4
60 56
61instance_slam & 57instance_slam &
62x=`jobs -l | tail -1` 58p5=$!
63p5=`echo $x | cut -d' ' -f2`
64echo $p5 59echo $p5
65 60
66ls -lR >/dev/null 61ls -lR >/dev/null
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca185c471..ea6064696fe4 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -143,7 +143,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
143 * Check if there was a change in the timer state (should we raise or lower 143 * Check if there was a change in the timer state (should we raise or lower
144 * the line level to the GIC). 144 * the line level to the GIC).
145 */ 145 */
146static void kvm_timer_update_state(struct kvm_vcpu *vcpu) 146static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
147{ 147{
148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; 148 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
149 149
@@ -154,10 +154,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
154 * until we call this function from kvm_timer_flush_hwstate. 154 * until we call this function from kvm_timer_flush_hwstate.
155 */ 155 */
156 if (!vgic_initialized(vcpu->kvm)) 156 if (!vgic_initialized(vcpu->kvm))
157 return; 157 return -ENODEV;
158 158
159 if (kvm_timer_should_fire(vcpu) != timer->irq.level) 159 if (kvm_timer_should_fire(vcpu) != timer->irq.level)
160 kvm_timer_update_irq(vcpu, !timer->irq.level); 160 kvm_timer_update_irq(vcpu, !timer->irq.level);
161
162 return 0;
161} 163}
162 164
163/* 165/*
@@ -218,7 +220,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
218 bool phys_active; 220 bool phys_active;
219 int ret; 221 int ret;
220 222
221 kvm_timer_update_state(vcpu); 223 if (kvm_timer_update_state(vcpu))
224 return;
222 225
223 /* 226 /*
224 * If we enter the guest with the virtual input level to the VGIC 227 * If we enter the guest with the virtual input level to the VGIC
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 043032c6a5a4..00429b392c61 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1875static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) 1875static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1876{ 1876{
1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1877 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1878 1878 int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
1879 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; 1879 int sz = nr_longs * sizeof(unsigned long);
1880 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); 1880 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1881 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL); 1881 vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
1882 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL); 1882 vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 353159922456..db2dd3335c6a 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -172,7 +172,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
172 * do alloc nowait since if we are going to sleep anyway we 172 * do alloc nowait since if we are going to sleep anyway we
173 * may as well sleep faulting in page 173 * may as well sleep faulting in page
174 */ 174 */
175 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); 175 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
176 if (!work) 176 if (!work)
177 return 0; 177 return 0;
178 178